diff --git a/README.md b/README.md
index b293117..225e64a 100644
--- a/README.md
+++ b/README.md
@@ -9,7 +9,7 @@ The network is developed as part of my Master thesis at Universität Osnabrück,
[![Join the chat at https://gitter.im/Self-Organizing-Recurrent-Neural-Networks](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Self-Organizing-Recurrent-Neural-Networks?utm_source=badge&utm_medium=badge&utm_content=badge)
[![PyPI version](https://badge.fury.io/py/sorn.svg)](https://badge.fury.io/py/sorn)
![PyPI - Downloads](https://img.shields.io/pypi/dw/sorn.svg)
-[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.2590449.svg)](https://doi.org/10.5281/zenodo.2590449)
+[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4174137.svg)](https://doi.org/10.5281/zenodo.2590449)
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
SORN Reservoir and the evolution of synaptic efficacies
diff --git a/build/lib/sorn/__init__.py b/build/lib/sorn/__init__.py
index e69de29..9df5ae9 100644
--- a/build/lib/sorn/__init__.py
+++ b/build/lib/sorn/__init__.py
@@ -0,0 +1,7 @@
+from . import sorn
+import logging
+
+__author__ = "Saranraj Nambusubramaniyan"
+__version__ = "0.2.10"
+
+logging.basicConfig(level=logging.INFO)
\ No newline at end of file
diff --git a/build/lib/sorn/sorn.py b/build/lib/sorn/sorn.py
index 04a0658..fe9ef16 100644
--- a/build/lib/sorn/sorn.py
+++ b/build/lib/sorn/sorn.py
@@ -4,6 +4,7 @@
import numpy as np
import os
from sorn.utils import Initializer
+
from configparser import ConfigParser
import random
import tqdm
@@ -257,11 +258,11 @@ def structural_plasticity(wee):
indexes = Initializer.get_unconnected_indexes(wee)
- # Choose any idx randomly
- idx_rand = random.choice(indexes)
-
- if idx_rand[0] == idx_rand[1]:
+ # Choose any idx randomly such that i!=j
+ while True:
idx_rand = random.choice(indexes)
+ if idx_rand[0] != idx_rand[1]:
+ break
wee[idx_rand[0]][idx_rand[1]] = 0.001
@@ -325,14 +326,13 @@ def reorganize_network():
return NotImplementedError
-
class MatrixCollection(Sorn):
def __init__(self, phase, matrices=None):
super().__init__()
self.phase = phase
self.matrices = matrices
- if self.phase == 'Plasticity' and self.matrices == None:
+ if self.phase == 'plasticity' and self.matrices == None:
self.time_steps = Sorn.time_steps + 1 # Total training steps
self.Wee, self.Wei, self.Wie, self.Te, self.Ti, self.X, self.Y = [0] * self.time_steps, [
@@ -353,7 +353,7 @@ def __init__(self, phase, matrices=None):
self.X[0] = x
self.Y[0] = y
- elif self.phase == 'Plasticity' and self.matrices != None:
+ elif self.phase == 'plasticity' and self.matrices != None:
self.time_steps = Sorn.time_steps + 1 # Total training steps
self.Wee, self.Wei, self.Wie, self.Te, self.Ti, self.X, self.Y = [0] * self.time_steps, [
@@ -372,7 +372,7 @@ def __init__(self, phase, matrices=None):
self.X[0] = matrices['X']
self.Y[0] = matrices['Y']
- elif self.phase == 'Training':
+ elif self.phase == 'training':
"""NOTE:
time_steps here is diferent for plasticity and training phase"""
@@ -425,18 +425,22 @@ def network_activity_t_1(self, x, y, i):
return x_1, y_1
-
class NetworkState(Plasticity):
+
"""The evolution of network states"""
-
+
def __init__(self, v_t):
super().__init__()
self.v_t = v_t
-
+ # Check the input feature size
+
+ assert Sorn.nu==len(self.v_t),"Input units and input size mismatch: {} != {}".format(Sorn.nu,len(self.v_t))
+ if Sorn.nu!=Sorn.ne:
+ self.v_t = list(self.v_t) + [0.]*(Sorn.ne- Sorn.nu)
+ self.v_t = np.expand_dims(self.v_t, 1)
+
def incoming_drive(self, weights, activity_vector):
-
# Broadcasting weight*acivity vectors
-
incoming = weights * activity_vector
incoming = np.array(incoming.sum(axis=0))
return incoming
@@ -452,21 +456,17 @@ def excitatory_network_state(self, wee, wei, te, x, y, white_noise_e):
incoming_drive_e = np.expand_dims(self.incoming_drive(weights=wee, activity_vector=xt), 1)
incoming_drive_i = np.expand_dims(self.incoming_drive(weights=wei, activity_vector=yt), 1)
-
tot_incoming_drive = incoming_drive_e - incoming_drive_i + white_noise_e + np.asarray(self.v_t) - te
"""Heaviside step function"""
heaviside_step = np.expand_dims([0.] * len(tot_incoming_drive),1)
heaviside_step[tot_incoming_drive > 0] = 1.
-
xt_next = np.asarray(heaviside_step.copy()) # Additional Memory cost just for the sake of variable name
-
return xt_next
def inhibitory_network_state(self, wie, ti, x, white_noise_i):
# Activity of inhibitory neurons
-
wie = np.asarray(wie)
xt = x[:, 1]
xt = xt.reshape(Sorn.ne, 1)
@@ -474,7 +474,6 @@ def inhibitory_network_state(self, wie, ti, x, white_noise_i):
incoming_drive_e = np.expand_dims(self.incoming_drive(weights=wie, activity_vector=xt), 1)
tot_incoming_drive = incoming_drive_e + white_noise_i - ti
-
heaviside_step = np.expand_dims([0.] * len(tot_incoming_drive),1)
heaviside_step[tot_incoming_drive > 0] = 1.
@@ -505,16 +504,31 @@ def recurrent_drive(self, wee, wei, te, x, y, white_noise_e):
# Simulate / Train SORN
-class RunSorn(Sorn):
+class Simulator_(Sorn):
- def __init__(self, phase, matrices, time_steps):
+ def __init__(self):
super().__init__()
+ pass
+
+ def simulate_sorn(self, inputs: np.array=None, phase:str='plasticity', matrices: dict=None, time_steps:int=1,noise:bool=True, **kwargs):
+
+ assert phase == 'plasticity' or 'training',"Phase can be either 'plasticity' or 'training'"
+
self.time_steps = time_steps
Sorn.time_steps = time_steps
self.phase = phase
self.matrices = matrices
-
- def run_sorn(self, inp):
+
+ kwargs_ = ['ne', 'nu', 'network_type_ee', 'network_type_ei', 'network_type_ie',
+ 'lambda_ee','lambda_ei', 'lambda_ie',
+ 'eta_stdp','eta_inhib', 'eta_ip',
+ 'te_max', 'ti_max', 'ti_min', 'te_min', 'mu_ip','sigma_ip']
+ for key, value in kwargs.items():
+ if key in kwargs_:
+ setattr(Sorn, key, value)
+ # assert Sorn.nu == len(inputs[:,0]),"Size mismatch: Input != Nu "
+ Sorn.ni=int(0.2 * Sorn.ne)
+
# Initialize/Get the weight, threshold matrices and activity vectors
matrix_collection = MatrixCollection(phase=self.phase, matrices=self.matrices)
@@ -527,26 +541,22 @@ def run_sorn(self, inp):
frac_pos_active_conn = []
# To get the last activation status of Exc and Inh neurons
-
for i in tqdm.tqdm(range(self.time_steps)):
""" Generate white noise"""
- white_noise_e = Initializer.white_gaussian_noise(mu=0., sigma=0.04, t=Sorn.ne)
- white_noise_i = Initializer.white_gaussian_noise(mu=0., sigma=0.04, t=Sorn.ni)
-
- # Generate inputs
- # inp_ = np.expand_dims(Initializer.generate_normal_inp(10), 1)
-
- network_state = NetworkState(inp) # Feed input and initialize network state
+ if noise:
+ white_noise_e = Initializer.white_gaussian_noise(mu=0., sigma=0.04, t=Sorn.ne)
+ white_noise_i = Initializer.white_gaussian_noise(mu=0., sigma=0.04, t=Sorn.ni)
+ else:
+ white_noise_e, white_noise_i = 0.,0.
+
+ network_state = NetworkState(inputs[:,i]) # Feed input and initialize network state
# Buffers to get the resulting x and y vectors at the current time step and update the master matrix
-
x_buffer, y_buffer = np.zeros((Sorn.ne, 2)), np.zeros((Sorn.ni, 2))
-
# TODO: Return te,ti values in next version # UNUSED
te_buffer, ti_buffer = np.zeros((Sorn.ne, 1)), np.zeros((Sorn.ni, 1))
# Get the matrices and rename them for ease of reading
-
Wee, Wei, Wie = matrix_collection.Wee, matrix_collection.Wei, matrix_collection.Wie
Te, Ti = matrix_collection.Te, matrix_collection.Ti
X, Y = matrix_collection.X, matrix_collection.Y
@@ -555,15 +565,12 @@ def run_sorn(self, inp):
frac_pos_active_conn.append((Wee[i] > 0.0).sum())
""" Recurrent drive"""
-
r = network_state.recurrent_drive(Wee[i], Wei[i], Te[i], X[i], Y[i], white_noise_e)
"""Get excitatory states and inhibitory states given the weights and thresholds"""
-
# x(t+1), y(t+1)
excitatory_state_xt_buffer = network_state.excitatory_network_state(Wee[i], Wei[i], Te[i], X[i], Y[i],
white_noise_e)
-
inhibitory_state_yt_buffer = network_state.inhibitory_network_state(Wie[i], Ti[i], X[i], white_noise_i)
""" Update X and Y """
@@ -574,7 +581,6 @@ def run_sorn(self, inp):
y_buffer[:, 1] = inhibitory_state_yt_buffer.T
"""Plasticity phase"""
-
plasticity = Plasticity()
# TODO
@@ -615,43 +621,144 @@ def run_sorn(self, inp):
return plastic_matrices, X_all, Y_all, R_all, frac_pos_active_conn
-class Generator(object):
-
+class Trainer_(Sorn):
+
+ """Args:
+ inputs - one hot vector of inputs
+
+ Returns:
+ matrix_collection - collection of all weight matrices in dictionaries
+ """
+
def __init__(self):
+ super().__init__()
pass
+
+ def train_sorn(self, inputs: np.array=None, phase: str='plasticity', matrices: np.array=None, noise: bool=True,**kwargs):
+ """[summary]
- def get_initial_matrices(self):
-
- return Plasticity.initialize_plasticity()
-
-# SAMPLE USAGE
-
-"""
-# Start the Simulation step
-
-# Used only during linear output layer optimization: During simulation, use input generator from utils
-
-_inputs = None # Can also simulate the network without inputs else pass the input values here
-
-# During first batch of training; Pass matrices as None:
-# SORN will initialize the matrices based on the configuration settings
-
-plastic_matrices, X_all, Y_all, R_all, frac_pos_active_conn = RunSorn(phase='Plasticity', matrices=None,
- time_steps=10000).run_sorn(_inputs)
+ Args:
+ phase (str, optional): [description]. Defaults to 'plasticity'.
+ matrices (np.array, optional): [description]. Defaults to None.
+ inputs (np.array, optional): [description]. Defaults to None.
+ noise (bool, optional): [description]. Defaults to True.
-# Pickle the simulaion matrices for reuse
+ Returns:
+ [type]: [description]
+ """
+ assert phase == 'plasticity' or 'training',"Phase can be either 'plasticity' or 'training'"
+
+ kwargs_ = ['ne', 'ni', 'network_type_ee', 'network_type_ei', 'network_type_ie',
+ 'lambda_ee','lambda_ei', 'lambda_ie',
+ 'eta_stdp','eta_inhib', 'eta_ip',
+ 'te_max', 'ti_max', 'ti_min', 'te_min', 'mu_ip','sigma_ip']
+ for key, value in kwargs.items():
+ if key in kwargs_:
+ setattr(Sorn, key, value)
+ Sorn.ni=int(0.2 * Sorn.ne)
+ # assert Sorn.nu == len(inputs[:,0]),"Size mismatch: Input != Nu "
+
+ self.phase = phase
+ self.matrices = matrices
+ self.time_steps = 1
+ Sorn.time_steps = 1
+ self.inputs = np.asarray(inputs)
+
+ # Collect the network activity at all time steps
+ X_all = [0]*self.time_steps
+ Y_all = [0]*self.time_steps
+ R_all = [0]*self.time_steps
+
+ frac_pos_active_conn = []
+
+ matrix_collection = MatrixCollection(phase = self.phase, matrices = self.matrices)
+
+ for i in range(1):
+
+ if noise:
+ white_noise_e = Initializer.white_gaussian_noise(mu= 0., sigma = 0.04,t = Sorn.ne)
+ white_noise_i = Initializer.white_gaussian_noise(mu= 0., sigma = 0.04,t = Sorn.ni)
+ else:
+ white_noise_e = 0.
+ white_noise_i = 0.
+
+ network_state = NetworkState(self.inputs) # Feed Input as an argument to the class
+
+ # Buffers to get the resulting x and y vectors at the current time step and update the master matrix
+ x_buffer, y_buffer = np.zeros(( Sorn.ne, 2)), np.zeros((Sorn.ni, 2))
+ te_buffer, ti_buffer = np.zeros((Sorn.ne, 1)), np.zeros((Sorn.ni, 1))
-with open('stdp2013_10k.pkl', 'wb') as f:
- pickle.dump([plastic_matrices, X_all, Y_all, R_all, frac_pos_active_conn], f)
+ # Get the matrices and rename them for ease of reading
+ Wee, Wei, Wie = matrix_collection.Wee, matrix_collection.Wei, matrix_collection.Wie
+ Te, Ti = matrix_collection.Te, matrix_collection.Ti
+ X, Y = matrix_collection.X, matrix_collection.Y
+
+ """ Fraction of active connections between E-E network"""
+ frac_pos_active_conn.append((Wee[i] > 0.0).sum())
+
+ # Recurrent drive at t+1 used to predict the next external stimuli
+ r = network_state.recurrent_drive(Wee[i], Wei[i], Te[i], X[i], Y[i],white_noise_e = white_noise_e)
-# While re simulate the network using any already simulated/ acquired matrices
+ """Get excitatory states and inhibitory states given the weights and thresholds"""
+ # x(t+1), y(t+1)
+ excitatory_state_xt_buffer = network_state.excitatory_network_state(Wee[i], Wei[i], Te[i], X[i], Y[i],white_noise_e = white_noise_e)
+ inhibitory_state_yt_buffer = network_state.inhibitory_network_state(Wie[i], Ti[i], X[i],white_noise_i = white_noise_i)
+
+ """ Update X and Y """
+ x_buffer[:, 0] = X[i][:, 1] # xt -->xt_1
+ x_buffer[:, 1] = excitatory_state_xt_buffer.T # x_buffer --> xt
+ y_buffer[:, 0] = Y[i][:, 1]
+ y_buffer[:, 1] = inhibitory_state_yt_buffer.T
+
+ if self.phase=='plasticity':
+ plasticity = Plasticity()
+
+ # STDP
+ Wee_t = plasticity.stdp(Wee[i],x_buffer,cutoff_weights = (0.0,1.0))
+
+ # Intrinsic plasticity
+ Te_t = plasticity.ip(Te[i],x_buffer)
+
+ # Structural plasticity
+ Wee_t = plasticity.structural_plasticity(Wee_t)
+
+ # iSTDP
+ Wei_t = plasticity.istdp(Wei[i],x_buffer,y_buffer,cutoff_weights = (0.0,1.0))
+
+ # Synaptic scaling Wee
+ Wee_t = Plasticity().ss(Wee_t)
+
+ # Synaptic scaling Wei
+ Wei_t = Plasticity().ss(Wei_t)
+ else:
+ Wee_t, Wei_t, Te_t = Wee[i],Wei[i], Te[i]
+
+ """Assign the matrices to the matrix collections"""
+ matrix_collection.weight_matrix(Wee_t, Wei_t, Wie[i], i)
+ matrix_collection.threshold_matrix(Te_t, Ti[i], i)
+ matrix_collection.network_activity_t(x_buffer, y_buffer, i)
+
+ X_all[i] = x_buffer[:,1]
+ Y_all[i] = y_buffer[:,1]
+ R_all[i] = r
+
+ plastic_matrices = {'Wee':matrix_collection.Wee[-1],
+ 'Wei': matrix_collection.Wei[-1],
+ 'Wie':matrix_collection.Wie[-1],
+ 'Te': matrix_collection.Te[-1], 'Ti': matrix_collection.Ti[-1],
+ 'X': X[-1], 'Y': Y[-1]}
+
+ return plastic_matrices,X_all,Y_all,R_all,frac_pos_active_conn
-with open('stdp2013_10k.pkl', 'rb') as f:
- plastic_matrices, X_all, Y_all, R_all, frac_pos_active_conn = pickle.load(f)
+# global Trainer
+# global Simulator
+Trainer = Trainer_()
+Simulator = Simulator_()
+if __name__ == "__main__":
+ pass
+ # Instantiate Trainer and Simulator objects while import
+# Trainer = Trainer_()
+# Simulator = Simulator_()
-plastic_matrices1, X_all1, Y_all1, R_all1, frac_pos_active_conn1 = RunSorn(phase='Plasticity',
- matrices=plastic_matrices,
- time_steps=20000).run_sorn(inp=None)
-"""
diff --git a/dist/sorn-0.2.10-py3-none-any.whl b/dist/sorn-0.2.10-py3-none-any.whl
deleted file mode 100644
index 6732620..0000000
Binary files a/dist/sorn-0.2.10-py3-none-any.whl and /dev/null differ
diff --git a/dist/sorn-0.2.10.tar.gz b/dist/sorn-0.2.10.tar.gz
deleted file mode 100644
index ac666a4..0000000
Binary files a/dist/sorn-0.2.10.tar.gz and /dev/null differ
diff --git a/dist/sorn-0.3.1-py3-none-any.whl b/dist/sorn-0.3.1-py3-none-any.whl
new file mode 100644
index 0000000..6d20d0f
Binary files /dev/null and b/dist/sorn-0.3.1-py3-none-any.whl differ
diff --git a/dist/sorn-0.3.1.tar.gz b/dist/sorn-0.3.1.tar.gz
new file mode 100644
index 0000000..72ec5a3
Binary files /dev/null and b/dist/sorn-0.3.1.tar.gz differ
diff --git a/setup.py b/setup.py
index e71f2fd..49c1a8a 100644
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@ def read(fname):
setup(
name = "sorn",
- version = "0.2.10",
+ version = "0.3.1",
author = "Saranraj Nambusubramaniyan",
author_email = "saran_nns@hotmail.com",
description ="Self-Organizing Recurrent Neural Networks",
diff --git a/sorn.egg-info/PKG-INFO b/sorn.egg-info/PKG-INFO
index ab3b77e..2334581 100644
--- a/sorn.egg-info/PKG-INFO
+++ b/sorn.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: sorn
-Version: 0.2.10
+Version: 0.3.1
Summary: Self-Organizing Recurrent Neural Networks
Home-page: https://github.com/Saran-nns/sorn
Author: Saranraj Nambusubramaniyan
@@ -10,25 +10,20 @@ Description: ## Self-Organizing Recurrent Neural Networks
SORN is a class of neuro-inspired artificial network build based on plasticity mechanisms in biological brain and mimic neocortical circuits ability of learning and adaptation through neuroplasticity mechanisms.
- For ease of maintanance, example use cases and the API(under developement) are moved to https://github.com/Saran-nns/PySORN_0.1
+ The network is developed as part of my Master thesis at Universität Osnabrück, Germany. For the ease of maintainance, the notebooks, use cases and the API(under developement) are moved to https://github.com/Saran-nns/PySORN_0.1
[![Build Status](https://travis-ci.org/Saran-nns/sorn.svg?branch=master)](https://travis-ci.org/Saran-nns/sorn)
[![codecov](https://codecov.io/gh/Saran-nns/sorn/branch/master/graph/badge.svg)](https://codecov.io/gh/Saran-nns/sorn)
[![Join the chat at https://gitter.im/Self-Organizing-Recurrent-Neural-Networks](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Self-Organizing-Recurrent-Neural-Networks?utm_source=badge&utm_medium=badge&utm_content=badge)
[![PyPI version](https://badge.fury.io/py/sorn.svg)](https://badge.fury.io/py/sorn)
![PyPI - Downloads](https://img.shields.io/pypi/dw/sorn.svg)
- [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.2593681.svg)](https://doi.org/10.5281/zenodo.2593681)
+ [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.2590449.svg)](https://doi.org/10.5281/zenodo.2590449)
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
- SORN Reservoir and the evolution of synaptic efficacies
-
+ SORN Reservoir and the evolution of synaptic efficacies
+
- Neural Connectome
-
-
-
-
- #### To install the latest release:
+ ### To install the latest release:
```python
pip install sorn
@@ -40,17 +35,16 @@ Description: ## Self-Organizing Recurrent Neural Networks
pip install git+https://github.com/Saran-nns/sorn
```
- #### Dependencies
+ ### Dependencies
SORN supports Python 3.5+ ONLY. For older Python versions please use the official Python client
- #### Usage:
-
- ##### Update Network configurations
-
- Navigate to home/conda/envs/ENVNAME/Lib/site-packages/sorn
+ ### Usage:
- or if you are unsure about the directory of sorn
+ #### Update Network configurations
+ There are two ways to update/configure the network parameters,
+ 1. Navigate to home/conda/envs/ENVNAME/Lib/site-packages/sorn
+ ```or``` if you are unsure about the directory of ```sorn```
Run
@@ -61,53 +55,61 @@ Description: ## Self-Organizing Recurrent Neural Networks
```
to find the location of the sorn package
- Then, update/edit the configuration.ini
+ Then, update/edit arguments in ```configuration.ini```
-
- ##### Plasticity Phase
+ 2. Pass the arguments with valid names (listed below). This will override the default values at ```configuration.ini```
+ . The allowed ```kwargs``` are,
+ ```Python
+ kwargs_ = ['ne', 'nu', 'network_type_ee', 'network_type_ei', 'network_type_ie', 'lambda_ee','lambda_ei', 'lambda_ie', 'eta_stdp','eta_inhib', 'eta_ip', 'te_max', 'ti_max', 'ti_min', 'te_min', 'mu_ip','sigma_ip']
+ ```
+ #### Simulation: Plasticity Phase
+ The default ```ne, nu``` values are overriden by passing them while calling the ```simulate_sorn``` method.
```Python
# Import
- from sorn.sorn import RunSorn
+ from sorn import Simulator
# Sample input
- inputs = [0.]
+ num_features = 10
+ time_steps = 200
+ inputs = np.random.rand(num_features,timesteps)
# To simulate the network;
- matrices_dict, Exc_activity, Inh_activity, Rec_activity, num_active_connections = RunSorn(phase='Plasticity', matrices=None,
- time_steps=100).run_sorn(inputs)
+ matrices_dict, Exc_activity, Inh_activity, Rec_activity, num_active_connections = Simulator.simulate_sorn(inputs = inputs, phase='Plasticity', matrices=None, noise = True, time_steps=time_steps, ne = 200, nu=num_features)
# To resume the simulation, load the matrices_dict from previous simulation;
- matrices_dict, Exc_activity, Inh_activity, Rec_activity, num_active_connections = RunSorn(phase='Plasticity', matrices=matrices_dict,
- time_steps=100).run_sorn(inputs)
+ matrices_dict, Exc_activity, Inh_activity, Rec_activity, num_active_connections = Simulator.simulate_sorn(inputs = inputs, phase='Plasticity', matrices=matrices_dict, noise= True, time_steps=time_steps,ne = 200, nu=num_features)
```
- ##### Training phase:
+ #### Training phase:
```Python
- matrices_dict, Exc_activity, Inh_activity, Rec_activity, num_active_connections = RunSorn(phase='Training', matrices=matrices_dict,
- time_steps=100).run_sorn(inputs)
+ inputs = np.random.rand(num_features,1)
+
+ # SORN network is frozen during training phase
+ matrices_dict, Exc_activity, Inh_activity, Rec_activity, num_active_connections = Trainer.train_sorn(inputs = inputs, phase='Training', matrices=matrices_dict,nu=num_features, time_steps=1)
```
- #### Network Output Descriptions:
- matrices_dict - Dictionary of connection weights ('Wee','Wei','Wie') , Excitatory network activity ('X'), Inhibitory network activities('Y'), Threshold values ('Te','Ti')
+ ### Network Output Descriptions:
+ ```matrices_dict``` - Dictionary of connection weights ('Wee','Wei','Wie') , Excitatory network activity ('X'), Inhibitory network activities('Y'), Threshold values ('Te','Ti')
+
+ ```Exc_activity``` - Collection of Excitatory network activity of entire simulation period
- Exc_activity - Collection of Excitatory network activity of entire simulation period
+ ```Inh_activity``` - Collection of Inhibitory network activity of entire simulation period
- Inh_activitsy - Collection of Inhibitory network activity of entire simulation period
+ ```Rec_activity``` - Collection of Recurrent network activity of entire simulation period
- Rec_activity - Collection of Recurrent network activity of entire simulation period
+ ```num_active_connections``` - List of number of active connections in the Excitatory pool at each time step
- num_active_connections - List of number of active connections in the Excitatory pool at each time step
+ ### Sample use with OpenAI gym :
+ #### Cartpole balance problem
+ Without changing the default network parameters.
- #### Sample use with OpenAI gym :
- ##### Cartpole balance problem
```python
# Imports
- import utils.InitHelper as initializer
- from sorn.sorn import Sorn, Plasticity, TrainSorn, TrainSornPlasticity
+ from sorn.sorn import Simulator, Trainer
import gym
# Load the simulated network matrices
@@ -116,7 +118,6 @@ Description: ## Self-Organizing Recurrent Neural Networks
with open('simulation_matrices.pkl','rb') as f:
sim_matrices,excit_states,inhib_states,recur_states,num_reservoir_conn = pickle.load(f)
-
# Training parameters
NUM_EPISODES = 2e6
@@ -130,21 +131,15 @@ Description: ## Self-Organizing Recurrent Neural Networks
state = env.reset()[None,:]
# Play the episode
-
while True:
-
if EPISODE < NUM_PLASTICITY_EPISODE:
# Plasticity phase
- sim_matrices,excit_states,inhib_states,recur_states,num_reservoir_conn = TrainSornPlasticity.train_sorn(phase = 'Plasticity',
- matrices = sim_matrices,
- inputs = state)
+ sim_matrices,excit_states,inhib_states,recur_states,num_reservoir_conn = Simulator.simulate_sorn(inputs = state, phase ='Plasticity', matrices = sim_matrices, noise=False)
else:
# Training phase with frozen reservoir connectivity
- sim_matrices,excit_states,inhib_states,recur_states,num_reservoir_conn = TrainSorn.train_sorn(phase = 'Training',
- matrices = sim_matrices,
- inputs = state)
+ sim_matrices,excit_states,inhib_states,recur_states,num_reservoir_conn = Trainer.train_sorn(inputs = state, phase = 'Training', matrices = sim_matrices, noise= False)
# Feed excit_states as input states to your RL algorithm, below goes for simple policy gradient algorithm
# Sample policy w.r.t excitatory states and take action in the environment
@@ -157,8 +152,7 @@ Description: ## Self-Organizing Recurrent Neural Networks
break
```
-
- #### Sample Plotting functions
+ ### Sample Plotting functions
```Python
from sorn.utils import Plotter
@@ -172,7 +166,7 @@ Description: ## Self-Organizing Recurrent Neural Networks
Plotter.raster_plot(spike_train = np.asarray(Exc_activity), savefig=False)
```
- #### Sample Statistical analysis functions
+ ### Sample Statistical analysis functions
```Python
from sorn.utils import Statistics
@@ -183,7 +177,7 @@ Description: ## Self-Organizing Recurrent Neural Networks
Statistics.fanofactor(spike_train= np.asarray(Exc_activity),neuron = 10,window_size = 10)
```
- #### The network is inspired by folowing articles:
+ ### The network is inspired by following articles:
Lazar, A. (2009). SORN: a Self-organizing Recurrent Neural Network. Frontiers in Computational Neuroscience, 3. https://doi.org/10.3389/neuro.10.023.2009