diff --git a/.ipynb_checkpoints/Untitled-checkpoint.ipynb b/.ipynb_checkpoints/Untitled-checkpoint.ipynb new file mode 100644 index 0000000..a9c453c --- /dev/null +++ b/.ipynb_checkpoints/Untitled-checkpoint.ipynb @@ -0,0 +1,142 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from sorn import Simulator\n", + "import numpy as np\n", + "\n", + "# Sample input\n", + "num_features = 10\n", + "time_steps = 200\n", + "inputs = np.random.rand(num_features,time_steps)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Network Initialized\n", + "Number of connections in Wee 3894 , Wei 1580, Wie 8000\n", + "Shapes Wee (200, 200) Wei (40, 200) Wie (200, 40)\n" + ] + } + ], + "source": [ + "# To simulate the network;\n", + "matrices_dict, E, I, R, C = Simulator.simulate_sorn(inputs = inputs, phase='plasticity', \n", + " matrices=None, noise = True, \n", + " time_steps=time_steps,\n", + " _ne = 200, _nu=num_features)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "matrices_dict, E, I, R, C = Simulator.simulate_sorn(inputs = inputs, phase='plasticity', \n", + " matrices=matrices_dict, noise= True, \n", + " time_steps=time_steps,_ne = 200, \n", + " _nu=num_features)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "matrices_dict, E, I, R, C = Simulator.simulate_sorn(inputs = inputs, phase='plasticity', \n", + " matrices=matrices_dict, noise= True, \n", + " time_steps=time_steps,_ne = 200, \n", + " _nu=num_features, freeze=['ip'])" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "'dict' object has no attribute 'Wee'", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mAttributeError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mmatrices_dict\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mWee\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[1;31mAttributeError\u001b[0m: 'dict' object has no attribute 'Wee'" + ] + } + ], + "source": [ + "matrices_dict.Wee" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "'MatrixCollection' object has no attribute 'Wee'", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mAttributeError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 4\u001b[0m matrices_dict, E, I, R, C = Trainer.train_sorn(inputs = inputs, phase='Training', \n\u001b[0;32m 5\u001b[0m \u001b[0mmatrices\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmatrices_dict\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0m_nu\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mnum_features\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 6\u001b[1;33m time_steps=1)\n\u001b[0m", + "\u001b[1;32mN:\\sorn\\sorn\\sorn.py\u001b[0m in \u001b[0;36mtrain_sorn\u001b[1;34m(self, inputs, phase, matrices, time_steps, noise, freeze, **kwargs)\u001b[0m\n\u001b[0;32m 1135\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1136\u001b[0m Wee, Wei, Wie = (\n\u001b[1;32m-> 1137\u001b[1;33m \u001b[0mmatrix_collection\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mWee\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1138\u001b[0m \u001b[0mmatrix_collection\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mWei\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1139\u001b[0m \u001b[0mmatrix_collection\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mWie\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;31mAttributeError\u001b[0m: 'MatrixCollection' object has no attribute 'Wee'" + ] + } + ], + "source": [ + "from sorn import Trainer\n", + "inputs = np.random.rand(num_features,1)\n", + "\n", + "matrices_dict, E, I, R, C = Trainer.train_sorn(inputs = inputs, phase='training', \n", + " matrices=matrices_dict,_nu=num_features, \n", + " time_steps=1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/README.md b/README.md index 4203b96..f20dd58 100644 --- a/README.md +++ b/README.md @@ -107,7 +107,7 @@ from sorn import Trainer inputs = np.random.rand(num_features,1) # SORN network is frozen during training phase -matrices_dict, Exc_activity, Inh_activity, Rec_activity, num_active_connections = Trainer.train_sorn(inputs = inputs, phase='Training', matrices=matrices_dict,_nu=num_features, time_steps=1) +matrices_dict, Exc_activity, Inh_activity, Rec_activity, num_active_connections = Trainer.train_sorn(inputs = inputs, phase='training', matrices=matrices_dict,_nu=num_features, time_steps=1) ``` To turn off any plasticity mechanisms during simulation or training phase, you can use `freeze` argument. diff --git a/Untitled.ipynb b/Untitled.ipynb new file mode 100644 index 0000000..f0717fc --- /dev/null +++ b/Untitled.ipynb @@ -0,0 +1,140 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from sorn import Simulator\n", + "import numpy as np\n", + "\n", + "# Sample input\n", + "num_features = 10\n", + "time_steps = 200\n", + "inputs = np.random.rand(num_features,time_steps)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Network Initialized\n", + "Number of connections in Wee 3894 , Wei 1580, Wie 8000\n", + "Shapes Wee (200, 200) Wei (40, 200) Wie (200, 40)\n" + ] + } + ], + "source": [ + "# To simulate the network;\n", + "matrices_dict, E, I, R, C = Simulator.simulate_sorn(inputs = inputs, phase='plasticity', \n", + " matrices=None, noise = True, \n", + " time_steps=time_steps,\n", + " _ne = 200, _nu=num_features)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "matrices_dict, E, I, R, C = Simulator.simulate_sorn(inputs = inputs, phase='plasticity', \n", + " matrices=matrices_dict, noise= True, \n", + " time_steps=time_steps,_ne = 200, \n", + " _nu=num_features)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "from sorn import Trainer\n", + "inputs = np.random.rand(num_features,1)\n", + "\n", + "matrices_dict, E, I, R, C = Trainer.train_sorn(inputs = inputs, phase='training', \n", + " matrices=matrices_dict,_nu=num_features, \n", + " time_steps=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Network Initialized\n", + "Number of connections in Wee 3891 , Wei 1580, Wie 8000\n", + "Shapes Wee (200, 200) Wei (40, 200) Wie (200, 40)\n" + ] + } + ], + "source": [ + "matrices_dict, E, I, R, C = Simulator.simulate_sorn(inputs = inputs, phase='plasticity', \n", + " matrices=None, noise = True, time_steps=1, \n", + " _ne = 200, _nu=num_features, \n", + " freeze=['ip'])\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Network Initialized\n", + "Number of connections in Wee 3900 , Wei 1582, Wie 8000\n", + "Shapes Wee (200, 200) Wei (40, 200) Wie (200, 40)\n" + ] + } + ], + "source": [ + "matrices_dict, E, I, R, C = Simulator.simulate_sorn(inputs = inputs, phase='plasticity', \n", + " matrices=None, noise = True, time_steps=1, \n", + " _ne = 200, _nu=num_features, \n", + " freeze=['ip','sp','ss'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/build/lib/sorn/__init__.py b/build/lib/sorn/__init__.py index 5a225f7..6223714 100644 --- a/build/lib/sorn/__init__.py +++ b/build/lib/sorn/__init__.py @@ -3,6 +3,6 @@ from .utils import * __author__ = "Saranraj Nambusubramaniyan" -__version__ = "0.5.1" +__version__ = "0.5.2" logging.basicConfig(level=logging.INFO) diff --git a/build/lib/sorn/sorn.py b/build/lib/sorn/sorn.py index 67656ca..3d4edf7 100644 --- a/build/lib/sorn/sorn.py +++ b/build/lib/sorn/sorn.py @@ -287,7 +287,8 @@ def ip(self, te: np.array, x: np.array): return te_update - def ss(self, wee: np.array): + @staticmethod + def ss(wee: np.array): """Synaptic Scaling or Synaptic Normalization Args: @@ -853,7 +854,7 @@ def simulate_sorn( Sorn._time_steps = time_steps self.phase = phase self.matrices = matrices - self.freeze = freeze + self.freeze = [] if freeze == None else freeze kwargs_ = [ "_ne", @@ -916,7 +917,6 @@ def simulate_sorn( te_buffer, ti_buffer = np.zeros( (Sorn._ne, 1)), np.zeros((Sorn._ni, 1)) - # Get the matrices and rename them for ease of reading Wee, Wei, Wie = ( matrix_collection.Wee, matrix_collection.Wei, @@ -956,31 +956,31 @@ def simulate_sorn( # STDP if 'stdp' not in self.freeze: - Wee_t = plasticity.stdp( + Wee[i] = plasticity.stdp( Wee[i], x_buffer, cutoff_weights=(0.0, 1.0)) # Intrinsic plasticity if 'ip' not in self.freeze: - Te_t = plasticity.ip(Te[i], x_buffer) + Te[i] = plasticity.ip(Te[i], x_buffer) # Structural plasticity - if 'sp' not in freeze: - Wee_t = plasticity.structural_plasticity(Wee_t) + if 'sp' not in self.freeze: + Wee[i] = plasticity.structural_plasticity(Wee[i]) # iSTDP - if 'istdp' not in freeze: - Wei_t = plasticity.istdp( + if 'istdp' not in self.freeze: + Wei[i] = plasticity.istdp( Wei[i], x_buffer, y_buffer, cutoff_weights=(0.0, 1.0) ) # Synaptic scaling Wee if 'ss' not in self.freeze: - Wee_t = Plasticity().ss(Wee_t) - Wei_t = Plasticity().ss(Wei_t) + Wee[i] = plasticity.ss(Wee[i]) + Wei[i] = plasticity.ss(Wei[i]) # Assign the matrices to the matrix collections - matrix_collection.weight_matrix(Wee_t, Wei_t, Wie[i], i) - matrix_collection.threshold_matrix(Te_t, Ti[i], i) + matrix_collection.weight_matrix(Wee[i], Wei[i], Wie[i], i) + matrix_collection.threshold_matrix(Te[i], Ti[i], i) matrix_collection.network_activity_t(x_buffer, y_buffer, i) X_all[i] = x_buffer[:, 1] @@ -1099,8 +1099,8 @@ def train_sorn( self.time_steps = time_steps Sorn._time_steps = time_steps self.inputs = np.asarray(inputs) - self.freeze = freeze - # Collect the network activity at all time steps + self.freeze = [] if freeze == None else freeze + X_all = [0] * self.time_steps Y_all = [0] * self.time_steps R_all = [0] * self.time_steps @@ -1125,7 +1125,7 @@ def train_sorn( network_state = NetworkState( self.inputs[:, i] - ) # Feed Input as an argument to the class + ) # Buffers to get the resulting x and y vectors at the current time step and update the master matrix x_buffer, y_buffer = np.zeros( @@ -1133,7 +1133,6 @@ def train_sorn( te_buffer, ti_buffer = np.zeros( (Sorn._ne, 1)), np.zeros((Sorn._ni, 1)) - # Get the matrices and rename them for ease of reading Wee, Wei, Wie = ( matrix_collection.Wee, matrix_collection.Wei, @@ -1170,35 +1169,36 @@ def train_sorn( # STDP if 'stdp' not in self.freeze: - Wee_t = plasticity.stdp( + Wee[i] = plasticity.stdp( Wee[i], x_buffer, cutoff_weights=(0.0, 1.0)) # Intrinsic plasticity if 'ip' not in self.freeze: - Te_t = plasticity.ip(Te[i], x_buffer) + Te[i] = plasticity.ip(Te[i], x_buffer) # Structural plasticity if 'sp' not in self.freeze: - Wee_t = plasticity.structural_plasticity(Wee_t) + Wee[i] = plasticity.structural_plasticity(Wee[i]) # iSTDP if 'istdp' not in self.freeze: - Wei_t = plasticity.istdp( + Wei[i] = plasticity.istdp( Wei[i], x_buffer, y_buffer, cutoff_weights=(0.0, 1.0) ) # Synaptic scaling Wee - if 'sc' not in self.freeze: - Wee_t = Plasticity().ss(Wee_t) + if 'ss' not in self.freeze: + Wee[i] = plasticity.ss(Wee[i]) # Synaptic scaling Wei - Wei_t = Plasticity().ss(Wei_t) + Wei[i] = plasticity.ss(Wei[i]) else: - Wee_t, Wei_t, Te_t = Wee[i], Wei[i], Te[i] + # Wee[i], Wei[i], Te[i] remain same + pass # Assign the matrices to the matrix collections - matrix_collection.weight_matrix(Wee_t, Wei_t, Wie[i], i) - matrix_collection.threshold_matrix(Te_t, Ti[i], i) + matrix_collection.weight_matrix(Wee[i], Wei[i], Wie[i], i) + matrix_collection.threshold_matrix(Te[i], Ti[i], i) matrix_collection.network_activity_t(x_buffer, y_buffer, i) X_all[i] = x_buffer[:, 1] diff --git a/dist/sorn-0.5.1-py3-none-any.whl b/dist/sorn-0.5.1-py3-none-any.whl deleted file mode 100644 index 8c6d091..0000000 Binary files a/dist/sorn-0.5.1-py3-none-any.whl and /dev/null differ diff --git a/dist/sorn-0.5.1.tar.gz b/dist/sorn-0.5.1.tar.gz deleted file mode 100644 index dd7eb89..0000000 Binary files a/dist/sorn-0.5.1.tar.gz and /dev/null differ diff --git a/dist/sorn-0.5.2-py3-none-any.whl b/dist/sorn-0.5.2-py3-none-any.whl new file mode 100644 index 0000000..c0d84a4 Binary files /dev/null and b/dist/sorn-0.5.2-py3-none-any.whl differ diff --git a/dist/sorn-0.5.2.tar.gz b/dist/sorn-0.5.2.tar.gz new file mode 100644 index 0000000..85cbcc7 Binary files /dev/null and b/dist/sorn-0.5.2.tar.gz differ diff --git a/setup.py b/setup.py index c1b3d31..a79960d 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ def read(fname): setup( name="sorn", - version="0.5.1", + version="0.5.2", author="Saranraj Nambusubramaniyan", author_email="saran_nns@hotmail.com", description="Self-Organizing Recurrent Neural Networks", @@ -31,6 +31,7 @@ def read(fname): "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", ], include_package_data=True, install_requires=["numpy", "configparser", "scipy", "seaborn", "networkx"], diff --git a/sorn.egg-info/PKG-INFO b/sorn.egg-info/PKG-INFO index 0a1d249..4c729bd 100644 --- a/sorn.egg-info/PKG-INFO +++ b/sorn.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: sorn -Version: 0.5.1 +Version: 0.5.2 Summary: Self-Organizing Recurrent Neural Networks Home-page: https://github.com/Saran-nns/sorn Author: Saranraj Nambusubramaniyan @@ -273,4 +273,5 @@ Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 Description-Content-Type: text/markdown diff --git a/sorn/__init__.py b/sorn/__init__.py index 5a225f7..6223714 100644 --- a/sorn/__init__.py +++ b/sorn/__init__.py @@ -3,6 +3,6 @@ from .utils import * __author__ = "Saranraj Nambusubramaniyan" -__version__ = "0.5.1" +__version__ = "0.5.2" logging.basicConfig(level=logging.INFO) diff --git a/sorn/sorn.py b/sorn/sorn.py index 67656ca..3d4edf7 100644 --- a/sorn/sorn.py +++ b/sorn/sorn.py @@ -287,7 +287,8 @@ def ip(self, te: np.array, x: np.array): return te_update - def ss(self, wee: np.array): + @staticmethod + def ss(wee: np.array): """Synaptic Scaling or Synaptic Normalization Args: @@ -853,7 +854,7 @@ def simulate_sorn( Sorn._time_steps = time_steps self.phase = phase self.matrices = matrices - self.freeze = freeze + self.freeze = [] if freeze == None else freeze kwargs_ = [ "_ne", @@ -916,7 +917,6 @@ def simulate_sorn( te_buffer, ti_buffer = np.zeros( (Sorn._ne, 1)), np.zeros((Sorn._ni, 1)) - # Get the matrices and rename them for ease of reading Wee, Wei, Wie = ( matrix_collection.Wee, matrix_collection.Wei, @@ -956,31 +956,31 @@ def simulate_sorn( # STDP if 'stdp' not in self.freeze: - Wee_t = plasticity.stdp( + Wee[i] = plasticity.stdp( Wee[i], x_buffer, cutoff_weights=(0.0, 1.0)) # Intrinsic plasticity if 'ip' not in self.freeze: - Te_t = plasticity.ip(Te[i], x_buffer) + Te[i] = plasticity.ip(Te[i], x_buffer) # Structural plasticity - if 'sp' not in freeze: - Wee_t = plasticity.structural_plasticity(Wee_t) + if 'sp' not in self.freeze: + Wee[i] = plasticity.structural_plasticity(Wee[i]) # iSTDP - if 'istdp' not in freeze: - Wei_t = plasticity.istdp( + if 'istdp' not in self.freeze: + Wei[i] = plasticity.istdp( Wei[i], x_buffer, y_buffer, cutoff_weights=(0.0, 1.0) ) # Synaptic scaling Wee if 'ss' not in self.freeze: - Wee_t = Plasticity().ss(Wee_t) - Wei_t = Plasticity().ss(Wei_t) + Wee[i] = plasticity.ss(Wee[i]) + Wei[i] = plasticity.ss(Wei[i]) # Assign the matrices to the matrix collections - matrix_collection.weight_matrix(Wee_t, Wei_t, Wie[i], i) - matrix_collection.threshold_matrix(Te_t, Ti[i], i) + matrix_collection.weight_matrix(Wee[i], Wei[i], Wie[i], i) + matrix_collection.threshold_matrix(Te[i], Ti[i], i) matrix_collection.network_activity_t(x_buffer, y_buffer, i) X_all[i] = x_buffer[:, 1] @@ -1099,8 +1099,8 @@ def train_sorn( self.time_steps = time_steps Sorn._time_steps = time_steps self.inputs = np.asarray(inputs) - self.freeze = freeze - # Collect the network activity at all time steps + self.freeze = [] if freeze == None else freeze + X_all = [0] * self.time_steps Y_all = [0] * self.time_steps R_all = [0] * self.time_steps @@ -1125,7 +1125,7 @@ def train_sorn( network_state = NetworkState( self.inputs[:, i] - ) # Feed Input as an argument to the class + ) # Buffers to get the resulting x and y vectors at the current time step and update the master matrix x_buffer, y_buffer = np.zeros( @@ -1133,7 +1133,6 @@ def train_sorn( te_buffer, ti_buffer = np.zeros( (Sorn._ne, 1)), np.zeros((Sorn._ni, 1)) - # Get the matrices and rename them for ease of reading Wee, Wei, Wie = ( matrix_collection.Wee, matrix_collection.Wei, @@ -1170,35 +1169,36 @@ def train_sorn( # STDP if 'stdp' not in self.freeze: - Wee_t = plasticity.stdp( + Wee[i] = plasticity.stdp( Wee[i], x_buffer, cutoff_weights=(0.0, 1.0)) # Intrinsic plasticity if 'ip' not in self.freeze: - Te_t = plasticity.ip(Te[i], x_buffer) + Te[i] = plasticity.ip(Te[i], x_buffer) # Structural plasticity if 'sp' not in self.freeze: - Wee_t = plasticity.structural_plasticity(Wee_t) + Wee[i] = plasticity.structural_plasticity(Wee[i]) # iSTDP if 'istdp' not in self.freeze: - Wei_t = plasticity.istdp( + Wei[i] = plasticity.istdp( Wei[i], x_buffer, y_buffer, cutoff_weights=(0.0, 1.0) ) # Synaptic scaling Wee - if 'sc' not in self.freeze: - Wee_t = Plasticity().ss(Wee_t) + if 'ss' not in self.freeze: + Wee[i] = plasticity.ss(Wee[i]) # Synaptic scaling Wei - Wei_t = Plasticity().ss(Wei_t) + Wei[i] = plasticity.ss(Wei[i]) else: - Wee_t, Wei_t, Te_t = Wee[i], Wei[i], Te[i] + # Wee[i], Wei[i], Te[i] remain same + pass # Assign the matrices to the matrix collections - matrix_collection.weight_matrix(Wee_t, Wei_t, Wie[i], i) - matrix_collection.threshold_matrix(Te_t, Ti[i], i) + matrix_collection.weight_matrix(Wee[i], Wei[i], Wie[i], i) + matrix_collection.threshold_matrix(Te[i], Ti[i], i) matrix_collection.network_activity_t(x_buffer, y_buffer, i) X_all[i] = x_buffer[:, 1]