Skip to content

Commit

Permalink
Merge pull request #38 from Saran-nns/dev
Browse files Browse the repository at this point in the history
Concurrent execution of plasticity rules using threadpool executor
  • Loading branch information
Saran-nns authored Nov 30, 2021
2 parents d6ca8af + 29ce32d commit c59739c
Show file tree
Hide file tree
Showing 2 changed files with 93 additions and 123 deletions.
195 changes: 82 additions & 113 deletions sorn/sorn.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,17 @@
import numpy as np
import os
import random
import concurrent.futures

try:
from sorn.utils import Initializer
except:
from utils import Initializer


class Sorn(object):

""" This class wraps initialization of the network and its parameters"""
"""This class wraps initialization of the network and its parameters"""

nu = 10
ne = 200
Expand All @@ -34,7 +36,8 @@ class Sorn(object):
lambda_ie = 100

@staticmethod
def initialize_weight_matrix(network_type: str, synaptic_connection: str, self_connection: str, lambd_w: int
def initialize_weight_matrix(
network_type: str, synaptic_connection: str, self_connection: str, lambd_w: int
):
"""Wrapper for initializing the weight matrices for SORN
Expand All @@ -54,7 +57,9 @@ def initialize_weight_matrix(network_type: str, synaptic_connection: str, self_c
if (network_type == "Sparse") and (self_connection == "False"):

# Generate weight matrix for E-E/ E-I connections with mean lamda incoming and out-going connections per neuron
assert (lambd_w <= Sorn.ne), "Number of connections per unit (lambda) should be less than number of units(Ne) in the pool and also Ne should be greater than 25"
assert (
lambd_w <= Sorn.ne
), "Number of connections per unit (lambda) should be less than number of units(Ne) in the pool and also Ne should be greater than 25"
weight_matrix = Initializer.generate_lambd_connections(
synaptic_connection, Sorn.ne, Sorn.ni, lambd_w, lambd_std=1
)
Expand All @@ -69,7 +74,9 @@ def initialize_weight_matrix(network_type: str, synaptic_connection: str, self_c
return weight_matrix

@staticmethod
def initialize_threshold_matrix(te_min: float, te_max: float, ti_min: float, ti_max: float):
def initialize_threshold_matrix(
te_min: float, te_max: float, ti_min: float, ti_max: float
):
"""Initialize the threshold for excitatory and inhibitory neurons
Args:
Expand Down Expand Up @@ -105,6 +112,7 @@ def initialize_activity_vector(ne: int, ni: int):

return x, y


class Plasticity(Sorn):
"""Instance of class Sorn. Inherits the variables and functions defined in class Sorn.
It encapsulates all plasticity mechanisms mentioned in the article. Inherits all attributed from parent class Sorn
Expand Down Expand Up @@ -162,8 +170,7 @@ def stdp(self, wee: np.array, x: np.array, cutoff_weights: list):
if wee_t[j][i] != 0.0: # Check connectivity

# Get the change in weight
delta_wee_t = self.eta_stdp * \
(xt[i] * xt_1[j] - xt_1[i] * xt[j])
delta_wee_t = self.eta_stdp * (xt[i] * xt_1[j] - xt_1[i] * xt[j])

# Update the weight between jth neuron to i ""Different from notation in article

Expand Down Expand Up @@ -253,8 +260,7 @@ def istdp(self, wei: np.array, x: np.array, y: np.array, cutoff_weights: list):

# Get the change in weight
delta_wei_t = (
-self.eta_inhib * yt_1[j] *
(1 - xt[i] * (1 + 1 / self.mu_ip))
-self.eta_inhib * yt_1[j] * (1 - xt[i] * (1 + 1 / self.mu_ip))
)

# Update the weight between jth neuron to i ""Different from notation in article
Expand Down Expand Up @@ -305,7 +311,7 @@ def initialize_plasticity():
kwargs (self.__dict__): All arguments are inherited from Sorn attributes
Returns:
tuple(array): Weight matrices WEI, WEE, WIE and threshold matrices Te, Ti and Initial state vectors X,Y """
tuple(array): Weight matrices WEI, WEE, WIE and threshold matrices Te, Ti and Initial state vectors X,Y"""

sorn_init = Sorn()
WEE_init = sorn_init.initialize_weight_matrix(
Expand Down Expand Up @@ -352,8 +358,7 @@ def initialize_plasticity():
te_init, ti_init = sorn_init.initialize_threshold_matrix(
Sorn.te_min, Sorn.te_max, Sorn.ti_min, Sorn.ti_max
)
x_init, y_init = sorn_init.initialize_activity_vector(
Sorn.ne, Sorn.ni)
x_init, y_init = sorn_init.initialize_activity_vector(Sorn.ne, Sorn.ni)

# Initializing variables from sorn_initialize.py

Expand All @@ -368,6 +373,47 @@ def initialize_plasticity():
return wee, wei, wie, te, ti, x, y


class Async:
def __init__(self, max_workers=4):
super().__init__()
self.max_workers = max_workers
self.plasticity = Plasticity()

def step(self, X, Y, Wee, Wei, Te, freeze):

with concurrent.futures.ThreadPoolExecutor(
max_workers=self.max_workers
) as executor:

# STDP
if "stdp" not in freeze:
stdp = executor.submit(
self.plasticity.stdp, Wee, X, cutoff_weights=(0.0, 1.0)
)
Wee = stdp.result()

# Intrinsic plasticity
if "ip" not in freeze:
ip = executor.submit(self.plasticity.ip, Te, X)
Te = ip.result()
# Structural plasticity
if "sp" not in freeze:
sp = executor.submit(self.plasticity.structural_plasticity, Wee)
Wee = sp.result()
# iSTDP
if "istdp" not in freeze:
istdp = executor.submit(
self.plasticity.istdp, Wei, X, Y, cutoff_weights=(0.0, 1.0)
)
Wei = istdp.result()

# Synaptic scaling Wee
if "ss" not in freeze:
Wee = self.plasticity.ss(Wee)
Wei = self.plasticity.ss(Wei)
return Wee, Wei, Te


class MatrixCollection(Sorn):
"""Collect all matrices initialized and updated during simulation (plasiticity and training phases)
Expand Down Expand Up @@ -736,6 +782,7 @@ def simulate_sorn(
time_steps: int = None,
noise: bool = True,
freeze: list = None,
max_workers: int = 4,
**kwargs
):
"""Simulation/Plasticity phase
Expand All @@ -753,6 +800,8 @@ def simulate_sorn(
freeze (list, optional): List of synaptic plasticity mechanisms which will be turned off during simulation. Defaults to None.
max_workers (int, optional): Maximum workers for multhreading the plasticity steps
Returns:
plastic_matrices (dict): Network states, connections and threshold matrices
Expand All @@ -773,7 +822,7 @@ def simulate_sorn(
self.phase = phase
self.matrices = matrices
self.freeze = [] if freeze == None else freeze

self.max_workers = max_workers
kwargs_ = [
"ne",
"nu",
Expand All @@ -800,8 +849,7 @@ def simulate_sorn(
Sorn.ni = int(0.2 * Sorn.ne)

# Initialize/Get the weight, threshold matrices and activity vectors
matrix_collection = MatrixCollection(
phase=self.phase, matrices=self.matrices)
matrix_collection = MatrixCollection(phase=self.phase, matrices=self.matrices)

# Collect the network activity at all time steps

Expand All @@ -824,16 +872,10 @@ def simulate_sorn(
else:
white_noise_e, white_noise_i = 0.0, 0.0

network_state = NetworkState(
inputs[:, i]
)
network_state = NetworkState(inputs[:, i])

# Buffers to get the resulting x and y vectors at the current time step and update the master matrix
x_buffer, y_buffer = np.zeros(
(Sorn.ne, 2)), np.zeros((Sorn.ni, 2))

te_buffer, ti_buffer = np.zeros(
(Sorn.ne, 1)), np.zeros((Sorn.ni, 1))
x_buffer, y_buffer = np.zeros((Sorn.ne, 2)), np.zeros((Sorn.ni, 2))

Wee, Wei, Wie = (
matrix_collection.Wee,
Expand Down Expand Up @@ -869,33 +911,9 @@ def simulate_sorn(
y_buffer[:, 0] = Y[i][:, 1]
y_buffer[:, 1] = inhibitory_state_yt_buffer.T

# Plasticity phase
plasticity = Plasticity()

# STDP
if 'stdp' not in self.freeze:
Wee[i] = plasticity.stdp(
Wee[i], x_buffer, cutoff_weights=(0.0, 1.0))

# Intrinsic plasticity
if 'ip' not in self.freeze:
Te[i] = plasticity.ip(Te[i], x_buffer)

# Structural plasticity
if 'sp' not in self.freeze:
Wee[i] = plasticity.structural_plasticity(Wee[i])

# iSTDP
if 'istdp' not in self.freeze:
Wei[i] = plasticity.istdp(
Wei[i], x_buffer, y_buffer, cutoff_weights=(0.0, 1.0)
)

# Synaptic scaling Wee
if 'ss' not in self.freeze:
Wee[i] = plasticity.ss(Wee[i])
Wei[i] = plasticity.ss(Wei[i])

Wee[i], Wei[i], Te[i] = Async(max_workers=max_workers).step(
x_buffer, y_buffer, Wee[i], Wei[i], Te[i], self.freeze
)
# Assign the matrices to the matrix collections
matrix_collection.weight_matrix(Wee[i], Wei[i], Wie[i], i)
matrix_collection.threshold_matrix(Te[i], Ti[i], i)
Expand All @@ -919,31 +937,7 @@ def simulate_sorn(


class Trainer_(Sorn):
"""Train the network with the fresh or pretrained network matrices and external stimuli
Args:
inputs (np.array, optional): External stimuli. Defaults to None.
phase (str, optional): Training phase. Defaults to "training".
matrices (dict, optional): Network states, connections and threshold matrices. Defaults to None.
time_steps (int, optional): Total number of time steps to simulate the network. Defaults to 1.
noise (bool, optional): If True, noise will be added. Defaults to True.
freeze (list, optional): List of synaptic plasticity mechanisms which will be turned off during simulation. Defaults to None.
Returns:
plastic_matrices (dict): Network states, connections and threshold matrices
X_all (array): Excitatory network activity collected during entire simulation steps
Y_all (array): Inhibitory network activity collected during entire simulation steps
R_all (array): Recurrent network activity collected during entire simulation steps
frac_pos_active_conn (list): Number of positive connection strengths in the network at each time step during simulation"""
"""Train the network with the fresh or pretrained network matrices and external stimuli"""

def __init__(self):
super().__init__()
Expand All @@ -957,11 +951,12 @@ def train_sorn(
time_steps: int = None,
noise: bool = True,
freeze: list = None,
max_workers: int = 4,
**kwargs
):
"""Train the network with the fresh or pretrained network matrices and external stimuli
Args:
Args:
inputs (np.array, optional): External stimuli. Defaults to None.
phase (str, optional): Training phase. Defaults to "training".
Expand All @@ -972,8 +967,11 @@ def train_sorn(
noise (bool, optional): If True, noise will be added. Defaults to True.
Returns:
freeze (list, optional): List of synaptic plasticity mechanisms which will be turned off during simulation. Defaults to None.
max_workers (int, optional): Maximum workers for multhreading the plasticity steps
Returns:
plastic_matrices (dict): Network states, connections and threshold matrices
X_all (array): Excitatory network activity collected during entire simulation steps
Expand Down Expand Up @@ -1019,15 +1017,14 @@ def train_sorn(
Sorn.time_steps = time_steps
self.inputs = np.asarray(inputs)
self.freeze = [] if freeze == None else freeze

self.max_workers = max_workers
X_all = [0] * self.time_steps
Y_all = [0] * self.time_steps
R_all = [0] * self.time_steps

frac_pos_active_conn = []

matrix_collection = MatrixCollection(
phase=self.phase, matrices=self.matrices)
matrix_collection = MatrixCollection(phase=self.phase, matrices=self.matrices)

for i in range(self.time_steps):

Expand All @@ -1042,15 +1039,10 @@ def train_sorn(
white_noise_e = 0.0
white_noise_i = 0.0

network_state = NetworkState(
self.inputs[:, i]
)
network_state = NetworkState(self.inputs[:, i])

# Buffers to get the resulting x and y vectors at the current time step and update the master matrix
x_buffer, y_buffer = np.zeros(
(Sorn.ne, 2)), np.zeros((Sorn.ni, 2))
te_buffer, ti_buffer = np.zeros(
(Sorn.ne, 1)), np.zeros((Sorn.ni, 1))
x_buffer, y_buffer = np.zeros((Sorn.ne, 2)), np.zeros((Sorn.ni, 2))

Wee, Wei, Wie = (
matrix_collection.Wee,
Expand Down Expand Up @@ -1084,33 +1076,10 @@ def train_sorn(
y_buffer[:, 1] = inhibitory_state_yt_buffer.T

if self.phase == "plasticity":
plasticity = Plasticity()

# STDP
if 'stdp' not in self.freeze:
Wee[i] = plasticity.stdp(
Wee[i], x_buffer, cutoff_weights=(0.0, 1.0))

# Intrinsic plasticity
if 'ip' not in self.freeze:
Te[i] = plasticity.ip(Te[i], x_buffer)

# Structural plasticity
if 'sp' not in self.freeze:
Wee[i] = plasticity.structural_plasticity(Wee[i])

# iSTDP
if 'istdp' not in self.freeze:
Wei[i] = plasticity.istdp(
Wei[i], x_buffer, y_buffer, cutoff_weights=(0.0, 1.0)
)

# Synaptic scaling Wee
if 'ss' not in self.freeze:
Wee[i] = plasticity.ss(Wee[i])
Wee[i], Wei[i], Te[i] = Async(max_workers=self.max_workers).step(
x_buffer, y_buffer, Wee[i], Wei[i], Te[i], self.freeze
)

# Synaptic scaling Wei
Wei[i] = plasticity.ss(Wei[i])
else:
# Wee[i], Wei[i], Te[i] remain same
pass
Expand Down Expand Up @@ -1140,4 +1109,4 @@ def train_sorn(
Trainer = Trainer_()
Simulator = Simulator_()
if __name__ == "__main__":
pass
pass
Loading

0 comments on commit c59739c

Please sign in to comment.