diff --git a/.gitignore b/.gitignore index 888f5bb..86d3ad3 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,11 @@ __pycache__/ # Notebook checkpoints .ipynb_checkpoints +# Pypi binaries +build/ +dist/ +sorn.egg-info/ + ### VisualStudioCode ### .vscode/* !.vscode/tasks.json diff --git a/build/lib/sorn/__init__.py b/build/lib/sorn/__init__.py deleted file mode 100644 index 98fd132..0000000 --- a/build/lib/sorn/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .sorn import Simulator, Trainer -import logging -from .utils import * - -__author__ = "Saranraj Nambusubramaniyan" -__version__ = "0.6.2" - -logging.basicConfig(level=logging.INFO) diff --git a/build/lib/sorn/sorn.py b/build/lib/sorn/sorn.py deleted file mode 100644 index c23eafc..0000000 --- a/build/lib/sorn/sorn.py +++ /dev/null @@ -1,1143 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import division -import numpy as np -import os -import random - -try: - from sorn.utils import Initializer -except: - from utils import Initializer - -class Sorn(object): - - """ This class wraps initialization of the network and its parameters""" - - nu = 10 - ne = 200 - ni = int(0.2 * ne) - eta_stdp = 0.004 - eta_inhib = 0.001 - eta_ip = 0.01 - te_max = 1.0 - ti_max = 0.5 - ti_min = 0.0 - te_min = 0.0 - mu_ip = 0.1 - sigma_ip = 0.0 - network_type_ee = "Sparse" - network_type_ei = "Sparse" - network_type_ie = "Dense" - lambda_ee = 20 - lambda_ei = 40 - lambda_ie = 100 - - @staticmethod - def initialize_weight_matrix(network_type: str, synaptic_connection: str, self_connection: str, lambd_w: int - ): - """Wrapper for initializing the weight matrices for SORN - - Args: - network_type (str): Spare or Dense - - synaptic_connection (str): EE,EI,IE. Note that Spare connection is defined only for EE connections - - self_connection (str): True or False: Synaptic delay or time delay - - lambd_w (int): Average number of incoming and outgoing connections per neuron - - Returns: - weight_matrix (array): Array of connection strengths - """ - - if (network_type == "Sparse") and (self_connection == "False"): - - # Generate weight matrix for E-E/ E-I connections with mean lamda incoming and out-going connections per neuron - assert (lambd_w <= Sorn.ne), "Number of connections per unit (lambda) should be less than number of units(Ne) in the pool and also Ne should be greater than 25" - weight_matrix = Initializer.generate_lambd_connections( - synaptic_connection, Sorn.ne, Sorn.ni, lambd_w, lambd_std=1 - ) - - # Dense matrix for W_ie - elif (network_type == "Dense") and (self_connection == "False"): - - # Uniform distribution of weights - weight_matrix = np.random.uniform(0.0, 0.1, (Sorn.ne, Sorn.ni)) - weight_matrix.reshape((Sorn.ne, Sorn.ni)) - - return weight_matrix - - @staticmethod - def initialize_threshold_matrix(te_min: float, te_max: float, ti_min: float, ti_max: float): - """Initialize the threshold for excitatory and inhibitory neurons - - Args: - te_min (float): Min threshold value for excitatory units - te_max (float): Min threshold value for inhibitory units - ti_min (float): Max threshold value for excitatory units - ti_max (float): Max threshold value for inhibitory units - - Returns: - te (array): Threshold values for excitatory units - ti (array): Threshold values for inhibitory units - """ - - te = np.random.uniform(te_min, te_max, (Sorn.ne, 1)) - ti = np.random.uniform(ti_min, ti_max, (Sorn.ni, 1)) - - return te, ti - - @staticmethod - def initialize_activity_vector(ne: int, ni: int): - """Initialize the activity vectors X and Y for excitatory and inhibitory neurons - - Args: - ne (int): Number of excitatory neurons - ni (int): Number of inhibitory neurons - - Returns: - x (array): Array of activity vectors of excitatory population - y (array): Array of activity vectors of inhibitory population""" - - x = np.zeros((ne, 2)) - y = np.zeros((ni, 2)) - - return x, y - -class Plasticity(Sorn): - """Instance of class Sorn. Inherits the variables and functions defined in class Sorn. - It encapsulates all plasticity mechanisms mentioned in the article. Inherits all attributed from parent class Sorn - """ - - def __init__(self): - - super().__init__() - self.nu = Sorn.nu # Number of input units - self.ne = Sorn.ne # Number of excitatory units - self.eta_stdp = ( - Sorn.eta_stdp - ) # STDP plasticity Learning rate constant; SORN1 and SORN2 - self.eta_ip = ( - Sorn.eta_ip - ) # Intrinsic plasticity learning rate constant; SORN1 and SORN2 - self.eta_inhib = ( - Sorn.eta_inhib - ) # Intrinsic plasticity learning rate constant; SORN2 only - self.h_ip = 2 * Sorn.nu / Sorn.ne # Target firing rate - self.mu_ip = Sorn.mu_ip # Mean target firing rate - # Number of inhibitory units in the network - self.ni = int(0.2 * Sorn.ne) - self.time_steps = Sorn.time_steps # Total time steps of simulation - self.te_min = Sorn.te_min # Excitatory minimum Threshold - self.te_max = Sorn.te_max # Excitatory maximum Threshold - - def stdp(self, wee: np.array, x: np.array, cutoff_weights: list): - """Apply STDP rule : Regulates synaptic strength between the pre(Xj) and post(Xi) synaptic neurons - - Args: - wee (array): Weight matrix - - x (array): Excitatory network activity - - cutoff_weights (list): Maximum and minimum weight ranges - - Returns: - wee (array): Weight matrix - """ - - x = np.asarray(x) - xt_1 = x[:, 0] - xt = x[:, 1] - wee_t = wee.copy() - - # STDP applies only on the neurons which are connected. - - for i in range(len(wee_t[0])): # Each neuron i, Post-synaptic neuron - - for j in range( - len(wee_t[0:]) - ): # Incoming connection from jth pre-synaptic neuron to ith neuron - - if wee_t[j][i] != 0.0: # Check connectivity - - # Get the change in weight - delta_wee_t = self.eta_stdp * \ - (xt[i] * xt_1[j] - xt_1[i] * xt[j]) - - # Update the weight between jth neuron to i ""Different from notation in article - - wee_t[j][i] = wee[j][i] + delta_wee_t - - # Prune the smallest weights induced by plasticity mechanisms; Apply lower cutoff weight - wee_t = Initializer.prune_small_weights(wee_t, cutoff_weights[0]) - - # Check and set all weights < upper cutoff weight - wee_t = Initializer.set_max_cutoff_weight(wee_t, cutoff_weights[1]) - - return wee_t - - def ip(self, te: np.array, x: np.array): - """Intrinsic Plasiticity mechanism - - Args: - te (array): Threshold vector of excitatory units - - x (array): Excitatory network activity - - Returns: - te (array): Threshold vector of excitatory units - """ - - # IP rule: Active unit increases its threshold and inactive decreases its threshold. - xt = x[:, 1] - - te_update = te + self.eta_ip * (xt.reshape(self.ne, 1) - self.h_ip) - - # Check whether all te are in range [0.0,1.0] and update acordingly - - # Update te < 0.0 ---> 0.0 - # te_update = prune_small_weights(te_update,self.te_min) - - # Set all te > 1.0 --> 1.0 - # te_update = set_max_cutoff_weight(te_update,self.te_max) - - return te_update - - @staticmethod - def ss(wee: np.array): - """Synaptic Scaling or Synaptic Normalization - - Args: - wee (array): Weight matrix - - Returns: - wee (array): Scaled Weight matrix - """ - wee = wee / np.sum(wee, axis=0) - return wee - - def istdp(self, wei: np.array, x: np.array, y: np.array, cutoff_weights: list): - """Apply iSTDP rule, which regulates synaptic strength between the pre inhibitory(Xj) and post Excitatory(Xi) synaptic neurons - - Args: - wei (array): Synaptic strengths from inhibitory to excitatory - - x (array): Excitatory network activity - - y (array): Inhibitory network activity - - cutoff_weights (list): Maximum and minimum weight ranges - - Returns: - wei (array): Synaptic strengths from inhibitory to excitatory""" - - # Excitatory network activity - xt = np.asarray(x)[:, 1] - - # Inhibitory network activity - yt_1 = np.asarray(y)[:, 0] - - # iSTDP applies only on the neurons which are connected. - wei_t = wei.copy() - - for i in range( - len(wei_t[0]) - ): # Each neuron i, Post-synaptic neuron: means for each column; - - for j in range( - len(wei_t[0:]) - ): # Incoming connection from j, pre-synaptic neuron to ith neuron - - if wei_t[j][i] != 0.0: # Check connectivity - - # Get the change in weight - delta_wei_t = ( - -self.eta_inhib * yt_1[j] * - (1 - xt[i] * (1 + 1 / self.mu_ip)) - ) - - # Update the weight between jth neuron to i ""Different from notation in article - - wei_t[j][i] = wei[j][i] + delta_wei_t - - # Prune the smallest weights induced by plasticity mechanisms; Apply lower cutoff weight - wei_t = Initializer.prune_small_weights(wei_t, cutoff_weights[0]) - - # Check and set all weights < upper cutoff weight - wei_t = Initializer.set_max_cutoff_weight(wei_t, cutoff_weights[1]) - - return wei_t - - @staticmethod - def structural_plasticity(wee: np.array): - """Add new connection value to the smallest weight between excitatory units randomly - - Args: - wee (array): Weight matrix - - Returns: - wee (array): Weight matrix""" - - p_c = np.random.randint(0, 10, 1) - - if p_c == 0: # p_c= 0.1 - - # Do structural plasticity - # Choose the smallest weights randomly from the weight matrix wee - indexes = Initializer.get_unconnected_indexes(wee) - - # Choose any idx randomly such that i!=j - while True: - idx_rand = random.choice(indexes) - if idx_rand[0] != idx_rand[1]: - break - - wee[idx_rand[0]][idx_rand[1]] = 0.001 - - return wee - - @staticmethod - def initialize_plasticity(): - """Initialize weight matrices for plasticity phase based on network configuration - - Args: - kwargs (self.__dict__): All arguments are inherited from Sorn attributes - - Returns: - tuple(array): Weight matrices WEI, WEE, WIE and threshold matrices Te, Ti and Initial state vectors X,Y """ - - sorn_init = Sorn() - WEE_init = sorn_init.initialize_weight_matrix( - network_type=Sorn.network_type_ee, - synaptic_connection="EE", - self_connection="False", - lambd_w=Sorn.lambda_ee, - ) - WEI_init = sorn_init.initialize_weight_matrix( - network_type=Sorn.network_type_ei, - synaptic_connection="EI", - self_connection="False", - lambd_w=Sorn.lambda_ei, - ) - WIE_init = sorn_init.initialize_weight_matrix( - network_type=Sorn.network_type_ie, - synaptic_connection="IE", - self_connection="False", - lambd_w=Sorn.lambda_ie, - ) - - Wee_init = Initializer.zero_sum_incoming_check(WEE_init) - # Wei_init = initializer.zero_sum_incoming_check(WEI_init.T) # For SORN 1 - Wei_init = Initializer.zero_sum_incoming_check(WEI_init) - Wie_init = Initializer.zero_sum_incoming_check(WIE_init) - - c = np.count_nonzero(Wee_init) - v = np.count_nonzero(Wei_init) - b = np.count_nonzero(Wie_init) - - print("Network Initialized") - print("Number of connections in Wee %s , Wei %s, Wie %s" % (c, v, b)) - print( - "Shapes Wee %s Wei %s Wie %s" - % (Wee_init.shape, Wei_init.shape, Wie_init.shape) - ) - - # Normalize the incoming weights - - normalized_wee = Initializer.normalize_weight_matrix(Wee_init) - normalized_wei = Initializer.normalize_weight_matrix(Wei_init) - normalized_wie = Initializer.normalize_weight_matrix(Wie_init) - - te_init, ti_init = sorn_init.initialize_threshold_matrix( - Sorn.te_min, Sorn.te_max, Sorn.ti_min, Sorn.ti_max - ) - x_init, y_init = sorn_init.initialize_activity_vector( - Sorn.ne, Sorn.ni) - - # Initializing variables from sorn_initialize.py - - wee = normalized_wee.copy() - wei = normalized_wei.copy() - wie = normalized_wie.copy() - te = te_init.copy() - ti = ti_init.copy() - x = x_init.copy() - y = y_init.copy() - - return wee, wei, wie, te, ti, x, y - - -class MatrixCollection(Sorn): - """Collect all matrices initialized and updated during simulation (plasiticity and training phases) - - Args: - phase (str): Training or Plasticity phase - - matrices (dict): Network activity, threshold and connection matrices - - Returns: - MatrixCollection instance""" - - def __init__(self, phase: str, matrices: dict = None): - super().__init__() - - self.phase = phase - self.matrices = matrices - if self.phase == "plasticity" and self.matrices == None: - - self.time_steps = Sorn.time_steps + 1 # Total training steps - self.Wee, self.Wei, self.Wie, self.Te, self.Ti, self.X, self.Y = ( - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - ) - wee, wei, wie, te, ti, x, y = Plasticity.initialize_plasticity() - - # Assign initial matrix to the master matrices - self.Wee[0] = wee - self.Wei[0] = wei - self.Wie[0] = wie - self.Te[0] = te - self.Ti[0] = ti - self.X[0] = x - self.Y[0] = y - - elif self.phase == "plasticity" and self.matrices != None: - - self.time_steps = Sorn.time_steps + 1 # Total training steps - self.Wee, self.Wei, self.Wie, self.Te, self.Ti, self.X, self.Y = ( - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - ) - # Assign matrices from plasticity phase to the new master matrices for training phase - self.Wee[0] = matrices["Wee"] - self.Wei[0] = matrices["Wei"] - self.Wie[0] = matrices["Wie"] - self.Te[0] = matrices["Te"] - self.Ti[0] = matrices["Ti"] - self.X[0] = matrices["X"] - self.Y[0] = matrices["Y"] - - elif self.phase == "training": - - # NOTE:time_steps here is diferent for plasticity and training phase - self.time_steps = Sorn.time_steps + 1 # Total training steps - self.Wee, self.Wei, self.Wie, self.Te, self.Ti, self.X, self.Y = ( - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - [0] * self.time_steps, - ) - # Assign matrices from plasticity phase to new respective matrices for training phase - self.Wee[0] = matrices["Wee"] - self.Wei[0] = matrices["Wei"] - self.Wie[0] = matrices["Wie"] - self.Te[0] = matrices["Te"] - self.Ti[0] = matrices["Ti"] - self.X[0] = matrices["X"] - self.Y[0] = matrices["Y"] - - def weight_matrix(self, wee: np.array, wei: np.array, wie: np.array, i: int): - """Update weight matrices - - Args: - wee (array): Excitatory-Excitatory weight matrix - - wei (array): Inhibitory-Excitatory weight matrix - - wie (array): Excitatory-Inhibitory weight matrix - - i (int): Time step - Returns: - tuple (array): Weight Matrices Wee, Wei, Wie""" - - self.Wee[i + 1] = wee - self.Wei[i + 1] = wei - self.Wie[i + 1] = wie - - return self.Wee, self.Wei, self.Wie - - def threshold_matrix(self, te: np.array, ti: np.array, i: int): - """Update threshold matrices - - Args: - te (array): Excitatory threshold - - ti (array): Inhibitory threshold - - i (int): Time step - - Returns: - tuple (array): Threshold Matrices Te and Ti""" - - self.Te[i + 1] = te - self.Ti[i + 1] = ti - return self.Te, self.Ti - - def network_activity_t( - self, excitatory_net: np.array, inhibitory_net: np.array, i: int - ): - """Network state at current time step - - Args: - excitatory_net (array): Excitatory network activity - - inhibitory_net (array): Inhibitory network activity - - i (int): Time step - - Returns: - tuple (array): Updated Excitatory and Inhibitory states - """ - - self.X[i + 1] = excitatory_net - self.Y[i + 1] = inhibitory_net - - return self.X, self.Y - - def network_activity_t_1(self, x: np.array, y: np.array, i: int): - """Network activity at previous time step - - Args: - x (array): Excitatory network activity - - y (array): Inhibitory network activity - - i (int): Time step - - Returns: - tuple (array): Previous Excitatory and Inhibitory states - """ - x_1, y_1 = [0] * self.time_steps, [0] * self.time_steps - x_1[i] = x - y_1[i] = y - - return x_1, y_1 - - -class NetworkState(Plasticity): - - """The evolution of network states - - Args: - v_t (array): External input/stimuli - - Returns: - instance (object): NetworkState instance""" - - def __init__(self, v_t: np.array): - super().__init__() - self.v_t = v_t - assert Sorn.nu == len( - self.v_t - ), "Input units and input size mismatch: {} != {}".format( - Sorn.nu, len(self.v_t) - ) - if Sorn.nu != Sorn.ne: - self.v_t = list(self.v_t) + [0.0] * (Sorn.ne - Sorn.nu) - self.v_t = np.expand_dims(self.v_t, 1) - - def incoming_drive(self, weights: np.array, activity_vector: np.array): - """Excitatory Post synaptic potential towards neurons in the reservoir in the absence of external input - - Args: - weights (array): Synaptic strengths - - activity_vector (list): Acitivity of inhibitory or Excitatory neurons - - Returns: - incoming (array): Excitatory Post synaptic potential towards neurons - """ - incoming = weights * activity_vector - incoming = np.array(incoming.sum(axis=0)) - return incoming - - def excitatory_network_state( - self, - wee: np.array, - wei: np.array, - te: np.array, - x: np.array, - y: np.array, - white_noise_e: np.array, - ): - """Activity of Excitatory neurons in the network - - Args: - wee (array): Excitatory-Excitatory weight matrix - - wei (array): Inhibitory-Excitatory weight matrix - - te (array): Excitatory threshold - - x (array): Excitatory network activity - - y (array): Inhibitory network activity - - white_noise_e (array): Gaussian noise - - Returns: - x (array): Current Excitatory network activity - """ - xt = x[:, 1] - xt = xt.reshape(self.ne, 1) - yt = y[:, 1] - yt = yt.reshape(self.ni, 1) - - incoming_drive_e = np.expand_dims( - self.incoming_drive(weights=wee, activity_vector=xt), 1 - ) - incoming_drive_i = np.expand_dims( - self.incoming_drive(weights=wei, activity_vector=yt), 1 - ) - tot_incoming_drive = ( - incoming_drive_e - - incoming_drive_i - + white_noise_e - + np.asarray(self.v_t) - - te - ) - - # Heaviside step function - heaviside_step = np.expand_dims([0.0] * len(tot_incoming_drive), 1) - heaviside_step[tot_incoming_drive > 0] = 1.0 - return heaviside_step - - def inhibitory_network_state( - self, wie: np.array, ti: np.array, y: np.array, white_noise_i: np.array - ): - """Activity of Excitatory neurons in the network - - Args: - wee (array): Excitatory-Excitatory weight matrix - - wie (array): Excitatory-Inhibitory weight matrix - - ti (array): Inhibitory threshold - - y (array): Inhibitory network activity - - white_noise_i (array): Gaussian noise - - Returns: - y (array): Current Inhibitory network activity""" - - wie = np.asarray(wie) - yt = y[:, 1] - yt = yt.reshape(Sorn.ne, 1) - - incoming_drive_e = np.expand_dims( - self.incoming_drive(weights=wie, activity_vector=yt), 1 - ) - - tot_incoming_drive = incoming_drive_e + white_noise_i - ti - heaviside_step = np.expand_dims([0.0] * len(tot_incoming_drive), 1) - heaviside_step[tot_incoming_drive > 0] = 1.0 - - return heaviside_step - - def recurrent_drive( - self, - wee: np.array, - wei: np.array, - te: np.array, - x: np.array, - y: np.array, - white_noise_e: np.array, - ): - """Network state due to recurrent drive received by the each unit at time t+1. Activity of Excitatory neurons without external stimuli - - Args: - - wee (array): Excitatory-Excitatory weight matrix - - wei (array): Inhibitory-Excitatory weight matrix - - te (array): Excitatory threshold - - x (array): Excitatory network activity - - y (array): Inhibitory network activity - - white_noise_e (array): Gaussian noise - - Returns: - xt (array): Recurrent network state - """ - xt = x[:, 1] - xt = xt.reshape(self.ne, 1) - yt = y[:, 1] - yt = yt.reshape(self.ni, 1) - - incoming_drive_e = np.expand_dims( - self.incoming_drive(weights=wee, activity_vector=xt), 1 - ) - incoming_drive_i = np.expand_dims( - self.incoming_drive(weights=wei, activity_vector=yt), 1 - ) - - tot_incoming_drive = incoming_drive_e - incoming_drive_i + white_noise_e - te - - heaviside_step = np.expand_dims([0.0] * len(tot_incoming_drive), 1) - heaviside_step[tot_incoming_drive > 0] = 1.0 - - return heaviside_step - - -# Simulate / Train SORN -class Simulator_(Sorn): - - """Simulate SORN using external input/noise using the fresh or pretrained matrices - - Args: - inputs (np.array, optional): External stimuli. Defaults to None. - - phase (str, optional): Plasticity phase. Defaults to "plasticity". - - matrices (dict, optional): Network states, connections and threshold matrices. Defaults to None. - - time_steps (int, optional): Total number of time steps to simulate the network. Defaults to 1. - - noise (bool, optional): If True, noise will be added. Defaults to True. - - Returns: - plastic_matrices (dict): Network states, connections and threshold matrices - - X_all (array): Excitatory network activity collected during entire simulation steps - - Y_all (array): Inhibitory network activity collected during entire simulation steps - - R_all (array): Recurrent network activity collected during entire simulation steps - - frac_pos_active_conn (list): Number of positive connection strengths in the network at each time step during simulation""" - - def __init__(self): - super().__init__() - pass - - def simulate_sorn( - self, - inputs: np.array = None, - phase: str = "plasticity", - matrices: dict = None, - time_steps: int = None, - noise: bool = True, - freeze: list = None, - **kwargs - ): - """Simulation/Plasticity phase - - Args: - inputs (np.array, optional): External stimuli. Defaults to None. - - phase (str, optional): Plasticity phase. Defaults to "plasticity" - - matrices (dict, optional): Network states, connections and threshold matrices. Defaults to None. - - time_steps (int, optional): Total number of time steps to simulate the network. Defaults to 1. - - noise (bool, optional): If True, noise will be added. Defaults to True. - - freeze (list, optional): List of synaptic plasticity mechanisms which will be turned off during simulation. Defaults to None. - - Returns: - plastic_matrices (dict): Network states, connections and threshold matrices - - X_all (array): Excitatory network activity collected during entire simulation steps - - Y_all (array): Inhibitory network activity collected during entire simulation steps - - R_all (array): Recurrent network activity collected during entire simulation steps - - frac_pos_active_conn (list): Number of positive connection strengths in the network at each time step during simulation""" - - assert ( - phase == "plasticity" or "training" - ), "Phase can be either 'plasticity' or 'training'" - - self.time_steps = time_steps - Sorn.time_steps = time_steps - self.phase = phase - self.matrices = matrices - self.freeze = [] if freeze == None else freeze - - kwargs_ = [ - "ne", - "nu", - "network_type_ee", - "network_type_ei", - "network_type_ie", - "lambda_ee", - "lambda_ei", - "lambda_ie", - "eta_stdp", - "eta_inhib", - "eta_ip", - "te_max", - "ti_max", - "ti_min", - "te_min", - "mu_ip", - "sigma_ip", - ] - for key, value in kwargs.items(): - if key in kwargs_: - setattr(Sorn, key, value) - # assert Sorn.nu == len(inputs[:,0]),"Size mismatch: Input != Nu " - Sorn.ni = int(0.2 * Sorn.ne) - - # Initialize/Get the weight, threshold matrices and activity vectors - matrix_collection = MatrixCollection( - phase=self.phase, matrices=self.matrices) - - # Collect the network activity at all time steps - - X_all = [0] * self.time_steps - Y_all = [0] * self.time_steps - R_all = [0] * self.time_steps - - frac_pos_active_conn = [] - - # To get the last activation status of Exc and Inh neurons - for i in range(self.time_steps): - - if noise: - white_noise_e = Initializer.white_gaussian_noise( - mu=0.0, sigma=0.04, t=Sorn.ne - ) - white_noise_i = Initializer.white_gaussian_noise( - mu=0.0, sigma=0.04, t=Sorn.ni - ) - else: - white_noise_e, white_noise_i = 0.0, 0.0 - - network_state = NetworkState( - inputs[:, i] - ) - - # Buffers to get the resulting x and y vectors at the current time step and update the master matrix - x_buffer, y_buffer = np.zeros( - (Sorn.ne, 2)), np.zeros((Sorn.ni, 2)) - - te_buffer, ti_buffer = np.zeros( - (Sorn.ne, 1)), np.zeros((Sorn.ni, 1)) - - Wee, Wei, Wie = ( - matrix_collection.Wee, - matrix_collection.Wei, - matrix_collection.Wie, - ) - Te, Ti = matrix_collection.Te, matrix_collection.Ti - X, Y = matrix_collection.X, matrix_collection.Y - - # Fraction of active connections between E-E network - frac_pos_active_conn.append((Wee[i] > 0.0).sum()) - - # Recurrent drive - r = network_state.recurrent_drive( - Wee[i], Wei[i], Te[i], X[i], Y[i], white_noise_e - ) - - # Get excitatory states and inhibitory states given the weights and thresholds - # x(t+1), y(t+1) - excitatory_state_xt_buffer = network_state.excitatory_network_state( - Wee[i], Wei[i], Te[i], X[i], Y[i], white_noise_e - ) - inhibitory_state_yt_buffer = network_state.inhibitory_network_state( - Wie[i], Ti[i], X[i], white_noise_i - ) - - # Update X and Y - x_buffer[:, 0] = X[i][:, 1] # xt -->(becomes) xt_1 - x_buffer[ - :, 1 - ] = excitatory_state_xt_buffer.T # New_activation; x_buffer --> xt - - y_buffer[:, 0] = Y[i][:, 1] - y_buffer[:, 1] = inhibitory_state_yt_buffer.T - - # Plasticity phase - plasticity = Plasticity() - - # STDP - if 'stdp' not in self.freeze: - Wee[i] = plasticity.stdp( - Wee[i], x_buffer, cutoff_weights=(0.0, 1.0)) - - # Intrinsic plasticity - if 'ip' not in self.freeze: - Te[i] = plasticity.ip(Te[i], x_buffer) - - # Structural plasticity - if 'sp' not in self.freeze: - Wee[i] = plasticity.structural_plasticity(Wee[i]) - - # iSTDP - if 'istdp' not in self.freeze: - Wei[i] = plasticity.istdp( - Wei[i], x_buffer, y_buffer, cutoff_weights=(0.0, 1.0) - ) - - # Synaptic scaling Wee - if 'ss' not in self.freeze: - Wee[i] = plasticity.ss(Wee[i]) - Wei[i] = plasticity.ss(Wei[i]) - - # Assign the matrices to the matrix collections - matrix_collection.weight_matrix(Wee[i], Wei[i], Wie[i], i) - matrix_collection.threshold_matrix(Te[i], Ti[i], i) - matrix_collection.network_activity_t(x_buffer, y_buffer, i) - - X_all[i] = x_buffer[:, 1] - Y_all[i] = y_buffer[:, 1] - R_all[i] = r - - plastic_matrices = { - "Wee": matrix_collection.Wee[-1], - "Wei": matrix_collection.Wei[-1], - "Wie": matrix_collection.Wie[-1], - "Te": matrix_collection.Te[-1], - "Ti": matrix_collection.Ti[-1], - "X": X[-1], - "Y": Y[-1], - } - - return plastic_matrices, X_all, Y_all, R_all, frac_pos_active_conn - - -class Trainer_(Sorn): - """Train the network with the fresh or pretrained network matrices and external stimuli - - Args: - inputs (np.array, optional): External stimuli. Defaults to None. - - phase (str, optional): Training phase. Defaults to "training". - - matrices (dict, optional): Network states, connections and threshold matrices. Defaults to None. - - time_steps (int, optional): Total number of time steps to simulate the network. Defaults to 1. - - noise (bool, optional): If True, noise will be added. Defaults to True. - - freeze (list, optional): List of synaptic plasticity mechanisms which will be turned off during simulation. Defaults to None. - - Returns: - plastic_matrices (dict): Network states, connections and threshold matrices - - X_all (array): Excitatory network activity collected during entire simulation steps - - Y_all (array): Inhibitory network activity collected during entire simulation steps - - R_all (array): Recurrent network activity collected during entire simulation steps - - frac_pos_active_conn (list): Number of positive connection strengths in the network at each time step during simulation""" - - def __init__(self): - super().__init__() - pass - - def train_sorn( - self, - inputs: np.array = None, - phase: str = "training", - matrices: dict = None, - time_steps: int = None, - noise: bool = True, - freeze: list = None, - **kwargs - ): - """Train the network with the fresh or pretrained network matrices and external stimuli - - Args: - inputs (np.array, optional): External stimuli. Defaults to None. - - phase (str, optional): Training phase. Defaults to "training". - - matrices (dict, optional): Network states, connections and threshold matrices. Defaults to None. - - time_steps (int, optional): Total number of time steps to simulate the network. Defaults to 1. - - noise (bool, optional): If True, noise will be added. Defaults to True. - - Returns: - - plastic_matrices (dict): Network states, connections and threshold matrices - - X_all (array): Excitatory network activity collected during entire simulation steps - - Y_all (array): Inhibitory network activity collected during entire simulation steps - - R_all (array): Recurrent network activity collected during entire simulation steps - - frac_pos_active_conn (list): Number of positive connection strengths in the network at each time step during simulation""" - - assert ( - phase == "plasticity" or "training" - ), "Phase can be either 'plasticity' or 'training'" - - kwargs_ = [ - "ne", - "nu", - "network_type_ee", - "network_type_ei", - "network_type_ie", - "lambda_ee", - "lambda_ei", - "lambda_ie", - "eta_stdp", - "eta_inhib", - "eta_ip", - "te_max", - "ti_max", - "ti_min", - "te_min", - "mu_ip", - "sigma_ip", - ] - for key, value in kwargs.items(): - if key in kwargs_: - setattr(Sorn, key, value) - Sorn.ni = int(0.2 * Sorn.ne) - # assert Sorn.nu == len(inputs[:,0]),"Size mismatch: Input != Nu " - - self.phase = phase - self.matrices = matrices - self.time_steps = time_steps - Sorn.time_steps = time_steps - self.inputs = np.asarray(inputs) - self.freeze = [] if freeze == None else freeze - - X_all = [0] * self.time_steps - Y_all = [0] * self.time_steps - R_all = [0] * self.time_steps - - frac_pos_active_conn = [] - - matrix_collection = MatrixCollection( - phase=self.phase, matrices=self.matrices) - - for i in range(self.time_steps): - - if noise: - white_noise_e = Initializer.white_gaussian_noise( - mu=0.0, sigma=0.04, t=Sorn.ne - ) - white_noise_i = Initializer.white_gaussian_noise( - mu=0.0, sigma=0.04, t=Sorn.ni - ) - else: - white_noise_e = 0.0 - white_noise_i = 0.0 - - network_state = NetworkState( - self.inputs[:, i] - ) - - # Buffers to get the resulting x and y vectors at the current time step and update the master matrix - x_buffer, y_buffer = np.zeros( - (Sorn.ne, 2)), np.zeros((Sorn.ni, 2)) - te_buffer, ti_buffer = np.zeros( - (Sorn.ne, 1)), np.zeros((Sorn.ni, 1)) - - Wee, Wei, Wie = ( - matrix_collection.Wee, - matrix_collection.Wei, - matrix_collection.Wie, - ) - Te, Ti = matrix_collection.Te, matrix_collection.Ti - X, Y = matrix_collection.X, matrix_collection.Y - - # Fraction of active connections between E-E network - frac_pos_active_conn.append((Wee[i] > 0.0).sum()) - - # Recurrent drive at t+1 used to predict the next external stimuli - r = network_state.recurrent_drive( - Wee[i], Wei[i], Te[i], X[i], Y[i], white_noise_e=white_noise_e - ) - - # Get excitatory states and inhibitory states given the weights and thresholds - # x(t+1), y(t+1) - excitatory_state_xt_buffer = network_state.excitatory_network_state( - Wee[i], Wei[i], Te[i], X[i], Y[i], white_noise_e=white_noise_e - ) - inhibitory_state_yt_buffer = network_state.inhibitory_network_state( - Wie[i], Ti[i], X[i], white_noise_i=white_noise_i - ) - - # Update X and Y - x_buffer[:, 0] = X[i][:, 1] # xt -->xt_1 - x_buffer[:, 1] = excitatory_state_xt_buffer.T # x_buffer --> xt - y_buffer[:, 0] = Y[i][:, 1] - y_buffer[:, 1] = inhibitory_state_yt_buffer.T - - if self.phase == "plasticity": - plasticity = Plasticity() - - # STDP - if 'stdp' not in self.freeze: - Wee[i] = plasticity.stdp( - Wee[i], x_buffer, cutoff_weights=(0.0, 1.0)) - - # Intrinsic plasticity - if 'ip' not in self.freeze: - Te[i] = plasticity.ip(Te[i], x_buffer) - - # Structural plasticity - if 'sp' not in self.freeze: - Wee[i] = plasticity.structural_plasticity(Wee[i]) - - # iSTDP - if 'istdp' not in self.freeze: - Wei[i] = plasticity.istdp( - Wei[i], x_buffer, y_buffer, cutoff_weights=(0.0, 1.0) - ) - - # Synaptic scaling Wee - if 'ss' not in self.freeze: - Wee[i] = plasticity.ss(Wee[i]) - - # Synaptic scaling Wei - Wei[i] = plasticity.ss(Wei[i]) - else: - # Wee[i], Wei[i], Te[i] remain same - pass - - # Assign the matrices to the matrix collections - matrix_collection.weight_matrix(Wee[i], Wei[i], Wie[i], i) - matrix_collection.threshold_matrix(Te[i], Ti[i], i) - matrix_collection.network_activity_t(x_buffer, y_buffer, i) - - X_all[i] = x_buffer[:, 1] - Y_all[i] = y_buffer[:, 1] - R_all[i] = r - - plastic_matrices = { - "Wee": matrix_collection.Wee[-1], - "Wei": matrix_collection.Wei[-1], - "Wie": matrix_collection.Wie[-1], - "Te": matrix_collection.Te[-1], - "Ti": matrix_collection.Ti[-1], - "X": X[-1], - "Y": Y[-1], - } - - return plastic_matrices, X_all, Y_all, R_all, frac_pos_active_conn - - -Trainer = Trainer_() -Simulator = Simulator_() -if __name__ == "__main__": - pass \ No newline at end of file diff --git a/build/lib/sorn/test_sorn.py b/build/lib/sorn/test_sorn.py deleted file mode 100644 index 01a0c38..0000000 --- a/build/lib/sorn/test_sorn.py +++ /dev/null @@ -1,227 +0,0 @@ -import unittest -import pickle -import numpy as np -from sorn.sorn import RunSorn, Generator -from sorn.utils import Plotter, Statistics, Initializer -from sorn import Simulator, Trainer - -num_features = 10 -inputs = np.random.rand(num_features, 1) - -# Get the pickled matrices: -with open("sample_matrices.pkl", "rb") as f: - ( - matrices_dict, - Exc_activity, - Inh_activity, - Rec_activity, - num_active_connections, - ) = pickle.load(f) - - -class TestSorn(unittest.TestCase): - def test_runsorn(self): - - self.assertRaises( - Exception, Generator().get_initial_matrices("./sorn/")) - - matrices_dict = Generator().get_initial_matrices("./sorn") - - self.assertRaises( - Exception, RunSorn(phase="Plasticity", - matrices=None).run_sorn([0.0]) - ) - - self.assertRaises( - Exception, RunSorn( - phase="Training", matrices=matrices_dict).run_sorn([0.0]) - ) - - self.assertRaises( - Exception, - Simulator.simulate_sorn( - inputs=[0.0], - phase="plasticity", - matrices=None, - noise=True, - time_steps=2, - ne=20, - nu=1, - ), - ) - - self.assertRaises( - Exception, - Simulator.simulate_sorn( - inputs=[0.0], - phase="plasticity", - matrices=matrices_dict, - noise=True, - time_steps=2, - ne=20, - nu=10, - ), - ) - - self.assertRaises( - Exception, - Trainer.train_sorn( - inputs=inputs, - phase="Training", - matrices=matrices_dict, - nu=num_features, - time_steps=1, - ), - ) - - def test_plotter(self): - - self.assertRaises( - Exception, - Plotter.hist_outgoing_conn( - weights=matrices_dict["Wee"], bin_size=5, histtype="bar", savefig=False - ), - ) - - self.assertRaises( - Exception, - Plotter.hist_outgoing_conn( - weights=matrices_dict["Wee"], bin_size=5, histtype="bar", savefig=False - ), - ) - - self.assertRaises( - Exception, - Plotter.hist_incoming_conn( - weights=matrices_dict["Wee"], bin_size=5, histtype="bar", savefig=False - ), - ) - - self.assertRaises( - Exception, - Plotter.network_connection_dynamics( - connection_counts=num_active_connections, - initial_steps=10, - final_steps=10, - savefig=False, - ), - ) - - self.assertRaises( - Exception, - Plotter.hist_firing_rate_network( - spike_train=np.asarray(Exc_activity), bin_size=5, savefig=False - ), - ) - - self.assertRaises( - Exception, - Plotter.scatter_plot(spike_train=np.asarray( - Exc_activity), savefig=False), - ) - - self.assertRaises( - Exception, - Plotter.raster_plot(spike_train=np.asarray( - Exc_activity), savefig=False), - ) - - self.assertRaises( - Exception, - Plotter.isi_exponential_fit( - spike_train=np.asarray(Exc_activity), - neuron=10, - bin_size=5, - savefig=False, - ), - ) - - self.assertRaises( - Exception, - Plotter.weight_distribution( - weights=matrices_dict["Wee"], bin_size=5, savefig=False - ), - ) - - self.assertRaises( - Exception, - Plotter.linear_lognormal_fit( - weights=matrices_dict["Wee"], num_points=10, savefig=False - ), - ) - - self.assertRaises( - Exception, - Plotter.hamming_distance( - hamming_dist=[0, 0, 0, 1, 1, 1, 1, 1, 1], savefig=False - ), - ) - - def test_statistics(self): - - self.assertRaises( - Exception, - Statistics.firing_rate_neuron( - spike_train=np.asarray(Exc_activity), neuron=10, bin_size=5 - ), - ) - - self.assertRaises( - Exception, - Statistics.firing_rate_network( - spike_train=np.asarray(Exc_activity)), - ) - - self.assertRaises( - Exception, - Statistics.scale_dependent_smoothness_measure( - firing_rates=[1, 1, 5, 6, 3, 7] - ), - ) - - self.assertRaises( - Exception, Statistics.autocorr( - firing_rates=[1, 1, 5, 6, 3, 7], t=2) - ) - - self.assertRaises( - Exception, Statistics.avg_corr_coeff( - spike_train=np.asarray(Exc_activity)) - ) - - self.assertRaises( - Exception, Statistics.spike_times( - spike_train=np.asarray(Exc_activity)) - ) - - self.assertRaises( - Exception, - Statistics.hamming_distance( - actual_spike_train=np.asarray(Exc_activity), - perturbed_spike_train=np.asarray(Exc_activity), - ), - ) - - self.assertRaises( - Exception, - Statistics.spike_time_intervals( - spike_train=np.asarray(Exc_activity)), - ) - - self.assertRaises( - Exception, - Statistics.fanofactor( - spike_train=np.asarray(Exc_activity), neuron=10, window_size=10 - ), - ) - - self.assertRaises( - Exception, - Statistics.spike_source_entropy( - spike_train=np.asarray(Exc_activity), neurons_in_reservoir=200 - ), - ) - - -if __name__ == "__main__": - unittest.main() diff --git a/build/lib/sorn/utils.py b/build/lib/sorn/utils.py deleted file mode 100644 index f72b638..0000000 --- a/build/lib/sorn/utils.py +++ /dev/null @@ -1,1163 +0,0 @@ -from __future__ import division -import numpy as np -from scipy.stats import norm -import random -import matplotlib.pyplot as plt -import seaborn as sns -from scipy.optimize import curve_fit -from scipy import stats -import networkx as nx -import pandas as pd -from mpl_toolkits.axes_grid1.inset_locator import InsetPosition - - -class Initializer(object): - """ - Helper class to initialize the matrices for the SORN - """ - - def __init__(self): - pass - - @staticmethod - def generate_strong_inp(length: int, reservoir_size: int): - """Generate strong one-hot vector of input. Random neurons in the reservoir acts as inputs - - Args: - length (int): Number of input neurons - - Returns: - inp (array): Input vector of length equals the number of neurons in the reservoir - with randomly chosen neuron set active - - idx (list): List of chosen input neurons """ - - inp = [0] * reservoir_size - x = [0] * length - idx = np.random.choice(length, np.random.randint(reservoir_size)) - - for i in idx: - x[i] = 1.0e4 - - inp[: len(x)] = x - - return inp, idx - - # Generate multi-node one-hot strong inputs - - @staticmethod - def multi_one_hot_inp(ne: int, inputs: list, n_nodes_per_inp: int): - """Generate multi(n_nodes_per_inp) one hot vector for each input. - For each input, set n_nodes_per_inp equals one and the rest of - neurons in the pool recieves no external stimuli - - Args: - ne (int): Number of excitatory units in sorn - - inputs (list): input labels - - n_nodes_per_inp(int): Number of target units in pool that receives single input - - Returns: - one_hot_vector for each label with length equals ne - - """ - - one_hot = np.zeros((ne, len(inputs))) - - idxs = [] - - for _ in range(n_nodes_per_inp): - idxs.append(random.sample(range(0, ne), len(inputs))) - - idxs = list(zip(*idxs)) - - j = 0 # Max(j) = len(inputs) - for idx_list in idxs: - for i in idx_list: - one_hot[i][j] = 1 - j += 1 - - return one_hot, idxs - - @staticmethod - def generate_gaussian_inputs(length: int, reservoir_size: int): - - """Generate external stimuli sampled from Gaussian distribution. - Randomly neurons in the reservoir receives this input at each timestep - - Args: - length (int): Number of input neurons - - Returns: - out (array): Input vector of length equals the number of neurons in the reservoir - with randomly chosen neuron set active - - idx (int): List of chosen input neurons - """ - - out = [0] * reservoir_size - x = [0] * length - idx = np.random.choice(length, np.random.randint(reservoir_size)) - inp = np.random.normal(length) - - for i in idx: - x[i] = inp[i] - - out[: len(x)] = x - - return out, idx - - @staticmethod - def normalize_weight_matrix(weight_matrix: np.array): - - # Applied only while initializing the weight. During simulation, Synaptic scaling applied on weight matrices - - """ Normalize the weights in the matrix such that incoming connections to a neuron sum up to 1 - - Args: - weight_matrix (array): Incoming Weights from W_ee or W_ei or W_ie - - Returns: - weight_matrix (array): Normalized weight matrix""" - - normalized_weight_matrix = weight_matrix / np.sum(weight_matrix, axis=0) - - return normalized_weight_matrix - - @staticmethod - def generate_lambd_connections( - synaptic_connection: str, ne: int, ni: int, lambd_w: int, lambd_std: int - ): - - """Generate lambda incoming connections for Excitatory neurons and outgoing connections per Inhibitory neuron - - Args: - synaptic_connection (str): Type of sysnpatic connection (EE,EI or IE) - - ne (int): Number of excitatory units - - ni (int): Number of inhibitory units - - lambd_w (int): Average number of incoming connections - - lambd_std (int): Standard deviation of average number of connections per neuron - - Returns: - connection_weights (array) - Weight matrix - - """ - - if synaptic_connection == "EE": - - """Choose random lamda connections per neuron""" - - # Draw normally distributed ne integers with mean lambd_w - - lambdas_incoming = norm.ppf( - np.random.random(ne), loc=lambd_w, scale=lambd_std - ).astype(int) - - # lambdas_outgoing = norm.ppf(np.random.random(ne), loc=lambd_w, scale=lambd_std).astype(int) - - # List of neurons - - list_neurons = list(range(ne)) - - # Connection weights - - connection_weights = np.zeros((ne, ne)) - - # For each lambd value in the above list, - # generate weights for incoming and outgoing connections - - # -------------Gaussian Distribution of weights -------------- - - # weight_matrix = np.random.randn(Sorn.ne, Sorn.ni) + 2 # Small random values from gaussian distribution - # Centered around 2 to make all values positive - - # ------------Uniform Distribution -------------------------- - global_incoming_weights = np.random.uniform(0.0, 0.1, sum(lambdas_incoming)) - - # Index Counter - global_incoming_weights_idx = 0 - - # Choose the neurons in order [0 to 199] - - for neuron in list_neurons: - - # Choose ramdom unique (lambdas[neuron]) neurons from list_neurons - possible_connections = list_neurons.copy() - - possible_connections.remove( - neuron - ) # Remove the selected neuron from possible connections i!=j - - # Choose random presynaptic neurons - possible_incoming_connections = random.sample( - possible_connections, lambdas_incoming[neuron] - ) - - incoming_weights_neuron = global_incoming_weights[ - global_incoming_weights_idx : global_incoming_weights_idx - + lambdas_incoming[neuron] - ] - - # ---------- Update the connection weight matrix ------------ - - # Update incoming connection weights for selected 'neuron' - - for incoming_idx, incoming_weight in enumerate(incoming_weights_neuron): - connection_weights[possible_incoming_connections[incoming_idx]][ - neuron - ] = incoming_weight - - global_incoming_weights_idx += lambdas_incoming[neuron] - - return connection_weights - - if synaptic_connection == "EI": - - """Choose random lamda connections per neuron""" - - # Draw normally distributed ni integers with mean lambd_w - lambdas = norm.ppf( - np.random.random(ni), loc=lambd_w, scale=lambd_std - ).astype(int) - - # List of neurons - - list_neurons = list(range(ni)) # Each i can connect with random ne neurons - - # Initializing connection weights variable - - connection_weights = np.zeros((ni, ne)) - - # ------------Uniform Distribution ----------------------------- - global_outgoing_weights = np.random.uniform(0.0, 0.1, sum(lambdas)) - - # Index Counter - global_outgoing_weights_idx = 0 - - # Choose the neurons in order [0 to 40] - - for neuron in list_neurons: - - # Choose random unique (lambdas[neuron]) neurons from list_neurons - possible_connections = list(range(ne)) - - possible_outgoing_connections = random.sample( - possible_connections, lambdas[neuron] - ) # possible_outgoing connections to the neuron - - # Update weights - outgoing_weights = global_outgoing_weights[ - global_outgoing_weights_idx : global_outgoing_weights_idx - + lambdas[neuron] - ] - - # ---------- Update the connection weight matrix ------------ - - # Update outgoing connections for the neuron - - for outgoing_idx, outgoing_weight in enumerate( - outgoing_weights - ): # Update the columns in the connection matrix - connection_weights[neuron][ - possible_outgoing_connections[outgoing_idx] - ] = outgoing_weight - - # Update the global weight values index - global_outgoing_weights_idx += lambdas[neuron] - - return connection_weights - - @staticmethod - def get_incoming_connection_dict(weights: np.array): - """ Get the non-zero entries in columns is the incoming connections for the neurons - - Args: - weights (np.array): Connection/Synaptic weights - - Returns: - dict : Dictionary of incoming connections to each neuron - """ - - # Indices of nonzero entries in the columns - connection_dict = dict.fromkeys(range(1, len(weights) + 1), 0) - - for i in range(len(weights[0])): # For each neuron - connection_dict[i] = list(np.nonzero(weights[:, i])[0]) - - return connection_dict - - @staticmethod - def get_outgoing_connection_dict(weights: np.array): - """Get the non-zero entries in rows is the outgoing connections for the neurons - - Args: - weights (np.array): Connection/Synaptic weights - - Returns: - dict : Dictionary of outgoing connections from each neuron - """ - - # Indices of nonzero entries in the rows - connection_dict = dict.fromkeys(range(1, len(weights) + 1), 1) - - for i in range(len(weights[0])): # For each neuron - connection_dict[i] = list(np.nonzero(weights[i, :])[0]) - - return connection_dict - - @staticmethod - def prune_small_weights(weights: np.array, cutoff_weight: float): - """Prune the connections with negative connection strength. The weights less than cutoff_weight set to 0 - - Args: - weights (np.array): Synaptic strengths - - cutoff_weight (float): Lower weight threshold - - Returns: - array: Connections weights with values less than cutoff_weight set to 0 - """ - - weights[weights <= cutoff_weight] = cutoff_weight - - return weights - - @staticmethod - def set_max_cutoff_weight(weights: np.array, cutoff_weight: float): - """ Set cutoff limit for the values in given array - - Args: - weights (np.array): Synaptic strengths - - cutoff_weight (float): Higher weight threshold - - Returns: - array: Connections weights with values greater than cutoff_weight set to 1 - """ - - weights[weights > cutoff_weight] = cutoff_weight - - return weights - - @staticmethod - def get_unconnected_indexes(wee: np.array): - """ Helper function for Structural plasticity to randomly select the unconnected units - - Args: - wee (array): Weight matrix - - Returns: - list (indices): (row_idx,col_idx)""" - - i, j = np.where(wee <= 0.0) - indices = list(zip(i, j)) - - self_conn_removed = [] - for i, idxs in enumerate(indices): - - if idxs[0] != idxs[1]: - self_conn_removed.append(indices[i]) - - return self_conn_removed - - @staticmethod - def white_gaussian_noise(mu: float, sigma: float, t: int): - - """Generates white gaussian noise with mean mu, standard deviation sigma and - the noise length equals t - - Args: - mu (float): Mean value of Gaussian noise - - sigma (float): Standard deviation of Gaussian noise - - t (int): Length of noise vector - - Returns: - array: White gaussian noise of length t - """ - - noise = np.random.normal(mu, sigma, t) - - return np.expand_dims(noise, 1) - - @staticmethod - def zero_sum_incoming_check(weights: np.array): - """Make sure, each neuron in the pool has atleast 1 incoming connection - - Args: - weights (array): Synaptic strengths - - Returns: - array: Synaptic weights of neurons with atleast one positive (non-zero) incoming connection strength - """ - zero_sum_incomings = np.where(np.sum(weights, axis=0) == 0.0) - if len(zero_sum_incomings[-1]) == 0: - return weights - else: - for zero_sum_incoming in zero_sum_incomings[-1]: - - rand_indices = np.random.randint( - int(weights.shape[0] * 0.2), size=2 - ) - rand_values = np.random.uniform(0.0, 0.1, 2) - - for i, idx in enumerate(rand_indices): - weights[:, zero_sum_incoming][idx] = rand_values[i] - - return weights - - -class Plotter(object): - """Wrapper class to call plotting methods - """ - - def __init__(self): - pass - - @staticmethod - def hist_incoming_conn( - weights: np.array, bin_size: int, histtype: str, savefig: bool - ): - """Plot the histogram of number of presynaptic connections per neuron - - Args: - weights (array): Connection weights - - bin_size (int): Histogram bin size - - histtype (str): Same as histtype matplotlib - - savefig (bool): If True plot will be saved as png file in the cwd - - Returns: - plot (matplotlib.pyplot): plot object - """ - num_incoming_weights = np.sum(np.array(weights) > 0, axis=0) - - plt.figure(figsize=(12, 5)) - plt.xlabel("Number of connections") - plt.ylabel("Probability") - - # Fit a normal distribution to the data - mu, std = norm.fit(num_incoming_weights) - plt.hist(num_incoming_weights, bins=bin_size, density=True, alpha=0.6, color='b') - - # PDF - xmin, xmax = plt.xlim() - x = np.linspace(xmin, xmax, max(num_incoming_weights)) - p = norm.pdf(x, mu, std) - plt.plot(x, p, 'k', linewidth=2) - title = "Distribution of presynaptic connections: mu = %.2f, std = %.2f" % (mu, std) - plt.title(title) - - if savefig: - plt.savefig("hist_incoming_conn") - - return plt.show() - - - @staticmethod - def hist_outgoing_conn( - weights: np.array, bin_size: int, histtype: str, savefig: bool - ): - """Plot the histogram of number of incoming connections per neuron - - Args: - weights (array): Connection weights - - bin_size (int): Histogram bin size - - histtype (str): Same as histtype matplotlib - - savefig (bool): If True plot will be saved as png file in the cwd - - Returns: - plot object """ - - # Plot the histogram of distribution of number of incoming connections in the network - - num_outgoing_weights = np.sum(np.array(weights) > 0, axis=1) - - plt.figure(figsize=(12, 5)) - plt.xlabel("Number of connections") - plt.ylabel("Probability") - - # Fit a normal distribution to the data - mu, std = norm.fit(num_outgoing_weights) - plt.hist(num_outgoing_weights, bins=bin_size, density=True, alpha=0.6, color='b') - - # PDF - xmin, xmax = plt.xlim() - x = np.linspace(xmin, xmax, max(num_outgoing_weights)) - p = norm.pdf(x, mu, std) - plt.plot(x, p, 'k', linewidth=2) - title = "Distribution of post synaptic connections: mu = %.2f, std = %.2f" % (mu, std) - plt.title(title) - - if savefig: - plt.savefig("hist_outgoing_conn") - - return plt.show() - - @staticmethod - def network_connection_dynamics( - connection_counts: np.array, savefig: bool - ): - """Plot number of positive connection in the excitatory pool - - Args: - connection_counts (array) - 1D Array of number of connections in the network per time step - - savefig (bool) - If True plot will be saved as png file in the cwd - - Returns: - plot object - """ - - # Plot graph for entire simulation time period - _, ax1 = plt.subplots(figsize=(12, 5)) - ax1.plot(connection_counts, label="Connection dynamics") - plt.margins(x=0) - ax1.set_xticks(ax1.get_xticks()[::2]) - - ax1.set_title("Network connection dynamics") - plt.ylabel("Number of active connections") - plt.xlabel("Time step") - plt.legend(loc="upper right") - plt.tight_layout() - - if savefig: - plt.savefig("connection_dynamics") - - return plt.show() - - @staticmethod - def hist_firing_rate_network(spike_train: np.array, bin_size: int, savefig: bool): - - """ Plot the histogram of firing rate (total number of neurons spike at each time step) - - Args: - spike_train (array): Array of spike trains - - bin_size (int): Histogram bin size - - savefig (bool): If True, plot will be saved in the cwd - - Returns: - plot object """ - - fr = np.count_nonzero(spike_train.tolist(), 1) - - # Filter zero entries in firing rate list above - fr = list(filter(lambda a: a != 0, fr)) - plt.title("Distribution of population activity without inactive time steps") - plt.xlabel("Spikes/time step") - plt.ylabel("Count") - - plt.hist(fr, bin_size) - - if savefig: - plt.savefig("hist_firing_rate_network.png") - - return plt.show() - - @staticmethod - def scatter_plot(spike_train: np.array, savefig: bool): - - """Scatter plot of spike trains - - Args: - spike_train (list): Array of spike trains - - with_firing_rates (bool): If True, firing rate of the network will be plotted - - savefig (bool): If True, plot will be saved in the cwd - - Returns: - plot object""" - - # Conver the list of spike train into array - spike_train = np.asarray(spike_train) - # Get the indices where spike_train is 1 - x, y = np.argwhere(spike_train.T == 1).T - - plt.figure(figsize=(8, 5)) - - firing_rates = Statistics.firing_rate_network(spike_train).tolist() - plt.plot(firing_rates, label="Firing rate") - plt.legend(loc="upper left") - - plt.scatter(y, x, s=0.1, color="black") - plt.title('Spike Trains') - plt.xlabel("Time step") - plt.ylabel("Neuron") - plt.legend(loc="upper left") - - if savefig: - plt.savefig("ScatterSpikeTrain.png") - return plt.show() - - @staticmethod - def raster_plot(spike_train: np.array, savefig: bool): - - """Raster plot of spike trains - - Args: - spike_train (array): Array of spike trains - - with_firing_rates (bool): If True, firing rate of the network will be plotted - - savefig (bool): If True, plot will be saved in the cwd - - Returns: - plot object""" - - # Conver the list of spike train into array - spike_train = np.asarray(spike_train) - - plt.figure(figsize=(11, 6)) - - firing_rates = Statistics.firing_rate_network(spike_train).tolist() - plt.plot(firing_rates, label="Firing rate") - plt.legend(loc="upper left") - plt.title('Spike Trains') - # Get the indices where spike_train is 1 - x, y = np.argwhere(spike_train.T == 1).T - - plt.plot(y, x, "|r") - plt.xlabel("Time step") - plt.ylabel("Neuron") - - if savefig: - plt.savefig("RasterSpikeTrain.png") - return plt.show() - - @staticmethod - def correlation(corr: np.array, savefig: bool): - - """Plot correlation between neurons - - Args: - corr (array): Correlation matrix - - savefig (bool): If true will save the plot at the current working directory - - Returns: - matplotlib.pyplot: Neuron Correlation plot - """ - - # Generate a mask for the upper triangle - mask = np.zeros_like(corr, dtype=np.bool) - mask[np.triu_indices_from(mask)] = True - - f, ax = plt.subplots(figsize=(11, 9)) - - # Custom diverging colormap - cmap = sns.diverging_palette(220, 10, as_cmap=True) - - sns.heatmap( - corr, - mask=mask, - cmap=cmap, - xticklabels=5, - yticklabels=5, - vmax=0.1, - center=0, - square=False, - linewidths=0.0, - cbar_kws={"shrink": 0.9}, - ) - - if savefig: - plt.savefig("Correlation between neurons") - return None - - @staticmethod - def isi_exponential_fit( - spike_train: np.array, neuron: int, bin_size: int, savefig: bool - ): - - """Plot Exponential fit on the inter-spike intervals during training or simulation phase - - Args: - spike_train (array): Array of spike trains - - neuron (int): Target neuron - - bin_size (int): Spike train will be splitted into bins of size bin_size - - savefig (bool): If True, plot will be saved in the cwd - - Returns: - plot object""" - - - isi = Statistics.spike_time_intervals(spike_train[:,neuron]) - - y, x = np.histogram(sorted(isi), bins=bin_size) - - x = [int(i) for i in x] - y = [float(i) for i in y] - - def exponential_func(y, a, b, c): - return a * np.exp(-b * np.array(y)) - c - - # Curve fit - popt, _ = curve_fit(exponential_func, x[1:bin_size], y[1:bin_size]) - - plt.plot( - x[1:bin_size], - exponential_func(x[1:bin_size], *popt), - label="Exponential fit", - ) - plt.title('Distribution of Inter Spike Intervals and Exponential Curve Fit') - plt.scatter(x[1:bin_size], y[1:bin_size], s=2.0, color="black", label="ISI") - plt.xlabel("ISI") - plt.ylabel("Frequency") - plt.legend() - - if savefig: - plt.savefig("isi_exponential_fit") - return plt.show() - - @staticmethod - def weight_distribution(weights: np.array, bin_size: int, savefig: bool): - - """Plot the distribution of synaptic weights - - Args: - weights (array): Connection weights - - bin_size (int): Spike train will be splited into bins of size bin_size - - savefig (bool): If True, plot will be saved in the cwd - - Returns: - plot object""" - - weights = weights[ - weights >= 0.01 - ] # Remove the weight values less than 0.01 # As reported in article SORN 2013 - y, x = np.histogram(weights, bins=bin_size) # Create histogram with bin_size - plt.title('Synaptic weight distribution') - plt.scatter(x[:-1], y, s=2.0, c="black") - plt.xlabel("Connection strength") - plt.ylabel("Frequency") - - if savefig: - plt.savefig("weight_distribution") - - return plt.show() - - @staticmethod - def linear_lognormal_fit(weights: np.array, num_points: int, savefig: bool): - - """Lognormal curve fit on connection weight distribution - - Args: - weights (array): Connection weights - - num_points(int): Number of points to be plotted in the x axis - - savefig(bool): If True, plot will be saved in the cwd - - Returns: - plot object""" - - weights = np.array(weights.tolist()) - weights = weights[weights >= 0.01] - - M = float(np.mean(weights)) # Geometric mean - s = float(np.std(weights)) # Geometric standard deviation - - # Lognormal distribution parameters - - mu = float(np.mean(np.log(weights))) # Mean of log(X) - sigma = float(np.std(np.log(weights))) # Standard deviation of log(X) - shape = sigma # Scipy's shape parameter - scale = np.exp(mu) # Scipy's scale parameter - median = np.exp(mu) - - mode = np.exp(mu - sigma ** 2) # Note that mode depends on both M and s - mean = np.exp(mu + (sigma ** 2 / 2)) # Note that mean depends on both M and s - x = np.linspace( - np.min(weights), np.max(weights), num=num_points - ) - - pdf = stats.lognorm.pdf( - x, shape, loc=0, scale=scale - ) - - plt.figure(figsize=(12, 4.5)) - plt.title('Curve fit on connection weight distribution') - # Figure on linear scale - plt.subplot(121) - plt.plot(x, pdf) - - plt.vlines(mode, 0, pdf.max(), linestyle=":", label="Mode") - plt.vlines( - mean, - 0, - stats.lognorm.pdf(mean, shape, loc=0, scale=scale), - linestyle="--", - color="green", - label="Mean", - ) - plt.vlines( - median, - 0, - stats.lognorm.pdf(median, shape, loc=0, scale=scale), - color="blue", - label="Median", - ) - plt.ylim(ymin=0) - plt.xlabel("Weight") - plt.title("Linear scale") - plt.legend() - - # Figure on logarithmic scale - plt.subplot(122) - plt.semilogx(x, pdf) - - plt.vlines(mode, 0, pdf.max(), linestyle=":", label="Mode") - plt.vlines( - mean, - 0, - stats.lognorm.pdf(mean, shape, loc=0, scale=scale), - linestyle="--", - color="green", - label="Mean", - ) - plt.vlines( - median, - 0, - stats.lognorm.pdf(median, shape, loc=0, scale=scale), - color="blue", - label="Median", - ) - plt.ylim(ymin=0) - plt.xlabel("Weight") - plt.title("Logarithmic scale") - plt.legend() - - if savefig: - plt.savefig("LinearLognormalFit") - - return plt.show() - - @staticmethod - def plot_network(corr: np.array, corr_thres: float, fig_name: str = None): - - """Network x graphical visualization of the network using the correlation matrix - - Args: - corr (array): Correlation between neurons - - corr_thres (array): Threshold to prune the connection. Smaller the threshold, - higher the density of connections - - fig_name (array, optional): Name of the figure. Defaults to None. - - Returns: - matplotlib.pyplot: Plot instance - """ - - df = pd.DataFrame(corr) - - links = df.stack().reset_index() - links.columns = ["var1", "var2", "value"] - links_filtered = links.loc[ - (links["value"] > corr_thres) & (links["var1"] != links["var2"]) - ] - - G = nx.from_pandas_edgelist(links_filtered, "var1", "var2") - - plt.figure(figsize=(50, 50)) - nx.draw( - G, - with_labels=True, - node_color="orange", - node_size=50, - linewidths=5, - font_size=10, - ) - plt.text(0.1, 0.9, "%s" % corr_thres) - plt.savefig("%s" % fig_name) - plt.show() - - @staticmethod - def hamming_distance(hamming_dist: list, savefig: bool): - """Hamming distance between true netorks states and perturbed network states - - Args: - hamming_dist (list): Hamming distance values - - savefig (bool): If True, save the fig at current working directory - - Returns: - matplotlib.pyplot: Hamming distance between true and perturbed network states - """ - - plt.figure(figsize=(15, 6)) - plt.title("Hamming distance between actual and perturbed states") - plt.xlabel("Time steps") - plt.ylabel("Hamming distance") - plt.plot(hamming_dist) - - if savefig: - plt.savefig("HammingDistance") - - return plt.show() - - -class Statistics(object): - """ Wrapper class for statistical analysis methods """ - - def __init__(self): - pass - - @staticmethod - def firing_rate_neuron(spike_train: np.array, neuron: int, bin_size: int): - - """Measure spike rate of given neuron during given time window - - Args: - spike_train (array): Array of spike trains - - neuron (int): Target neuron in the reservoir - - bin_size (int): Divide the spike trains into bins of size bin_size - - Returns: - int: firing_rate """ - - time_period = len(spike_train[:, 0]) - - neuron_spike_train = spike_train[:, neuron] - - # Split the list(neuron_spike_train) into sub lists of length time_step - samples_spike_train = [ - neuron_spike_train[i : i + bin_size] - for i in range(0, len(neuron_spike_train), bin_size) - ] - - spike_rate = 0.0 - - for _, spike_train in enumerate(samples_spike_train): - spike_rate += list(spike_train).count(1.0) - - spike_rate = spike_rate * bin_size / time_period - - return time_period, bin_size, spike_rate - - @staticmethod - def firing_rate_network(spike_train: np.array): - - """Calculate number of neurons spikes at each time step.Firing rate of the network - - Args: - spike_train (array): Array of spike trains - - Returns: - int: firing_rate """ - - firing_rate = np.count_nonzero(spike_train.tolist(), 1) - - return firing_rate - - @staticmethod - def scale_dependent_smoothness_measure(firing_rates: list): - - """Smoothem the firing rate depend on its scale. Smaller values corresponds to smoother series - - Args: - firing_rates (list): List of number of active neurons per time step - - Returns: - sd_diff (list): Float value signifies the smoothness of the semantic changes in firing rates - """ - - diff = np.diff(firing_rates) - sd_diff = np.std(diff) - - return sd_diff - - @staticmethod - def scale_independent_smoothness_measure(firing_rates: list): - - """Smoothem the firing rate independent of its scale. Smaller values corresponds to smoother series - - Args: - firing_rates (list): List of number of active neurons per time step - - Returns: - coeff_var (list):Float value signifies the smoothness of the semantic changes in firing rates """ - - diff = np.diff(firing_rates) - mean_diff = np.mean(diff) - sd_diff = np.std(diff) - - coeff_var = sd_diff / abs(mean_diff) - - return coeff_var - - @staticmethod - def autocorr(firing_rates: list, t: int = 2): - """ - Score interpretation - - scores near 1 imply a smoothly varying series - - scores near 0 imply that there's no overall linear relationship between a data point and the following one (that is, plot(x[-length(x)],x[-1]) won't give a scatter plot with any apparent linearity) - - - scores near -1 suggest that the series is jagged in a particular way: if one point is above the mean, the next is likely to be below the mean by about the same amount, and vice versa. - - Args: - firing_rates (list): Firing rates of the network - - t (int, optional): Window size. Defaults to 2. - - Returns: - array: Autocorrelation between neurons given their firing rates - """ - - return np.corrcoef( - np.array( - [ - firing_rates[0 : len(firing_rates) - t], - firing_rates[t : len(firing_rates)], - ] - ) - ) - - @staticmethod - def avg_corr_coeff(spike_train: np.array): - - """Measure Average Pearson correlation coeffecient between neurons - - Args: - spike_train (array): Neural activity - - Returns: - array: Average correlation coeffecient""" - - corr_mat = np.corrcoef(np.asarray(spike_train).T) - avg_corr = np.sum(corr_mat, axis=1) / 200 - corr_coeff = ( - avg_corr.sum() / 200 - ) # 2D to 1D and either upper or lower half of correlation matrix. - - return corr_mat, corr_coeff - - @staticmethod - def spike_times(spike_train: np.array): - - """Get the time instants at which neuron spikes - - Args: - spike_train (array): Spike trains of neurons - - Returns: - (array): Spike time of each neurons in the pool""" - - times = np.where(spike_train == 1.0) - return times - - @staticmethod - def spike_time_intervals(spike_train): - - """Generate spike time intervals spike_trains - - Args: - spike_train (array): Network activity - - Returns: - list: Inter spike intervals for each neuron in the reservoir - """ - - spike_times = Statistics.spike_times(spike_train) - isi = np.diff(spike_times[-1]) - return isi - - @staticmethod - def hamming_distance(actual_spike_train: np.array, perturbed_spike_train: np.array): - """Hamming distance between true netorks states and perturbed network states - - Args: - actual_spike_train (np.array): True network's states - - perturbed_spike_train (np.array): Perturbated network's states - - Returns: - float: Hamming distance between true and perturbed network states - """ - hd = [ - np.count_nonzero(actual_spike_train[i] != perturbed_spike_train[i]) - for i in range(len(actual_spike_train)) - ] - return hd - - @staticmethod - def fanofactor(spike_train: np.array, neuron: int, window_size: int): - - """Investigate whether neuronal spike generation is a poisson process - - Args: - spike_train (np.array): Spike train of neurons in the reservoir - - neuron (int): Target neuron in the pool - - window_size (int): Sliding window size for time step ranges to be considered for measuring the fanofactor - - Returns: - float : Fano factor of the neuron spike train - """ - - # Choose activity of random neuron - neuron_act = spike_train[:, neuron] - - # Divide total observations into 'tws' time windows of size 'ws' for a neuron 60 - - tws = np.split(neuron_act, window_size) - fr = [] - for i in range(len(tws)): - fr.append(np.count_nonzero(tws[i])) - - # print('Firing rate of the neuron during each time window of size %s is %s' %(ws,fr)) - - mean_firing_rate = np.mean(fr) - variance_firing_rate = np.var(fr) - - fano_factor = variance_firing_rate / mean_firing_rate - - return mean_firing_rate, variance_firing_rate, fano_factor - - - @staticmethod - def spike_source_entropy(spike_train: np.array, num_neurons: int): - - """Measure the uncertainty about the origin of spike from the network using entropy - - Args: - spike_train (np.array): Spike train of neurons - - num_neurons (int): Number of neurons in the reservoir - - Returns: - int : Spike source entropy of the network - """ - # Number of spikes from each neuron during the interval - n_spikes = np.count_nonzero(spike_train, axis=0) - p = n_spikes / np.count_nonzero( - spike_train - ) # Probability of each neuron that can generate spike in next step - # print(p) # Note: pi shouldn't be zero - sse = np.sum([pi * np.log(pi) for pi in p]) / np.log( - 1 / num_neurons - ) # Spike source entropy - - return sse diff --git a/dist/sorn-0.6.2-py3-none-any.whl b/dist/sorn-0.6.2-py3-none-any.whl deleted file mode 100644 index 81a09a6..0000000 Binary files a/dist/sorn-0.6.2-py3-none-any.whl and /dev/null differ diff --git a/dist/sorn-0.6.2.tar.gz b/dist/sorn-0.6.2.tar.gz deleted file mode 100644 index fe9329a..0000000 Binary files a/dist/sorn-0.6.2.tar.gz and /dev/null differ diff --git a/sorn.egg-info/PKG-INFO b/sorn.egg-info/PKG-INFO deleted file mode 100644 index f4683ab..0000000 --- a/sorn.egg-info/PKG-INFO +++ /dev/null @@ -1,133 +0,0 @@ -Metadata-Version: 2.1 -Name: sorn -Version: 0.6.2 -Summary: Self-Organizing Recurrent Neural Networks -Home-page: https://github.com/Saran-nns/sorn -Author: Saranraj Nambusubramaniyan -Author-email: saran_nns@hotmail.com -License: OSI Approved :: MIT License -Description: - # Self-Organizing Recurrent Neural Networks - - SORN is a class of neuro-inspired artificial network build based on plasticity mechanisms in biological brain and mimic neocortical circuits ability of learning and adaptation. SORN consists of pool of excitatory neurons and small population of inhibitory neurons which are controlled by 5 plasticity mechanisms found in neocortex, namely Spike Timing Dependent Plasticity (STDP), Intrinsic Plasticity (IP), Synaptic Scaling (SS),Synaptic Normalization(SN) and inhibitory Spike Timing Dependent Plasticity (iSTDP). Using mathematical tools, SORN network simplifies the underlying structural and functional connectivity mechanisms responsible for learning and memory in the brain - - 'sorn' is a Python package designed for Self Organizing Recurrent Neural Networks. It provides a research environment for computational neuroscientists to study the self-organization, adaption, learning,memory and behavior of brain circuits by reverse engineering neural plasticity mechanisms. Further to extend the potential applications of `sorn`, a demostrative example of a neuro-robotics experiment using OpenAI gym is also [documented](https://self-organizing-recurrent-neural-networks.readthedocs.io/en/latest/usage.html). - - - [![Build Status](https://github.com/saran-nns/sorn/workflows/Build/badge.svg)](https://github.com/saran-nns/sorn/actions) - [![codecov](https://codecov.io/gh/Saran-nns/sorn/branch/master/graph/badge.svg)](https://codecov.io/gh/Saran-nns/sorn) - [![Documentation Status](https://readthedocs.org/projects/self-organizing-recurrent-neural-networks/badge/?version=latest)](https://self-organizing-recurrent-neural-networks.readthedocs.io/en/latest/?badge=latest) - [![PyPI version](https://badge.fury.io/py/sorn.svg)](https://badge.fury.io/py/sorn) - [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) - [![Downloads](https://pepy.tech/badge/sorn)](https://pepy.tech/project/sorn) - [![DOI](https://zenodo.org/badge/174756058.svg)](https://zenodo.org/badge/latestdoi/174756058) - [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/164AKTA-iCVLq-iR-treLA_Y9keRYrQkH#scrollTo=Rt2YZptMtC14) - [![status](https://joss.theoj.org/papers/7dc447f7a0d17d774b59c8ee15c223c2/status.svg)](https://joss.theoj.org/papers/7dc447f7a0d17d774b59c8ee15c223c2) - -

SORN Reservoir and the evolution of synaptic efficacies

- - - ## Installation - - ```python - pip install sorn - ``` - - The library is still in alpha stage, so you may also want to install the latest version from the development branch - - ```python - pip install git+https://github.com/Saran-nns/sorn - ``` - - ## Dependencies - SORN supports Python 3.5+ ONLY. For older Python versions please use the official Python client. - To install all optional dependencies, - - ```python - pip install 'sorn[all]' - ``` - ## Usage - ### Plasticity Phase - - ```python - import sorn - from sorn import Simulator - import numpy as np - - # Sample input - num_features = 10 - time_steps = 200 - inputs = np.random.rand(num_features,time_steps) - - # Simulate the network with default hyperparameters under gaussian white noise - state_dict, E, I, R, C = Simulator.simulate_sorn(inputs = inputs, phase='plasticity', - matrices=None, noise = True, - time_steps=time_steps) - - ``` - ``` - Network Initialized - Number of connections in Wee 3909 , Wei 1574, Wie 8000 - Shapes Wee (200, 200) Wei (40, 200) Wie (200, 40) - ``` - ### Training Phase - ```python - from sorn import Trainer - # NOTE: During training phase, input to `sorn` should have second (time) dimension set to 1. ie., input shape should be (input_features,1). - - inputs = np.random.rand(num_features,1) - - # SORN network is frozen during training phase - state_dict, E, I, R, C = Trainer.train_sorn(inputs = inputs, phase='training', - matrices=state_dict, noise= False, - time_steps=1, - ne = 100, nu=num_features, - lambda_ee = 10, eta_stdp=0.001 ) - ``` - ### Network Output Descriptions - `state_dict` - Dictionary of connection weights (`Wee`,`Wei`,`Wie`) , Excitatory network activity (`X`), Inhibitory network activities(`Y`), Threshold values (`Te`,`Ti`) - - `E` - Excitatory network activity of entire simulation period - - `I` - Inhibitory network activity of entire simulation period - - `R` - Recurrent network activity of entire simulation period - - `C` - Number of active connections in the Excitatory pool at each time step - - ### Documentation - For detailed documentation about development, analysis, plotting methods and a sample experiment with OpenAI Gym, please visit [SORN Documentation](https://self-organizing-recurrent-neural-networks.readthedocs.io/en/latest/) - - ### Citation - - ```Python - @software{saranraj_nambusubramaniyan_2020_4184103, - author = {Saranraj Nambusubramaniyan}, - title = {Saran-nns/sorn: Stable alpha release}, - month = nov, - year = 2020, - publisher = {Zenodo}, - version = {v0.3.1}, - doi = {10.5281/zenodo.4184103}, - url = {https://doi.org/10.5281/zenodo.4184103} - } - ``` - - ### Contributions - I am welcoming contributions. If you wish to contribute, please create a branch with a pull request and the changes can be discussed there. - If you find a bug in the code or errors in the documentation, please open a new issue in the Github repository and report the bug or the error. Please provide sufficient information for the bug to be reproduced. - - - -Keywords: Brain-Inspired Computing,Artificial Neural Networks,Neuro Informatics,Spiking Cortical Networks,Neural Connectomics,Neuroscience,Artificial General Intelligence,Neural Information Processing -Platform: UNKNOWN -Classifier: Development Status :: 3 - Alpha -Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Description-Content-Type: text/markdown diff --git a/sorn.egg-info/SOURCES.txt b/sorn.egg-info/SOURCES.txt deleted file mode 100644 index 5857bdb..0000000 --- a/sorn.egg-info/SOURCES.txt +++ /dev/null @@ -1,13 +0,0 @@ -LICENSE.md -README.md -setup.py -sorn/__init__.py -sorn/sorn.py -sorn/test_sorn.py -sorn/utils.py -sorn.egg-info/PKG-INFO -sorn.egg-info/SOURCES.txt -sorn.egg-info/dependency_links.txt -sorn.egg-info/not-zip-safe -sorn.egg-info/requires.txt -sorn.egg-info/top_level.txt \ No newline at end of file diff --git a/sorn.egg-info/dependency_links.txt b/sorn.egg-info/dependency_links.txt deleted file mode 100644 index 8b13789..0000000 --- a/sorn.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/sorn.egg-info/not-zip-safe b/sorn.egg-info/not-zip-safe deleted file mode 100644 index 8b13789..0000000 --- a/sorn.egg-info/not-zip-safe +++ /dev/null @@ -1 +0,0 @@ - diff --git a/sorn.egg-info/requires.txt b/sorn.egg-info/requires.txt deleted file mode 100644 index f64786e..0000000 --- a/sorn.egg-info/requires.txt +++ /dev/null @@ -1,6 +0,0 @@ -numpy -configparser -scipy -seaborn -pandas -networkx diff --git a/sorn.egg-info/top_level.txt b/sorn.egg-info/top_level.txt deleted file mode 100644 index c5bfb78..0000000 --- a/sorn.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -sorn