superneuroabm 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- superneuroabm-1.0.0/LICENSE +28 -0
- superneuroabm-1.0.0/PKG-INFO +100 -0
- superneuroabm-1.0.0/README.md +61 -0
- superneuroabm-1.0.0/setup.cfg +4 -0
- superneuroabm-1.0.0/setup.py +48 -0
- superneuroabm-1.0.0/superneuroabm/__init__.py +3 -0
- superneuroabm-1.0.0/superneuroabm/component_base_config.yaml +129 -0
- superneuroabm-1.0.0/superneuroabm/io/__init__.py +3 -0
- superneuroabm-1.0.0/superneuroabm/io/nx.py +425 -0
- superneuroabm-1.0.0/superneuroabm/io/synthetic_networks.py +770 -0
- superneuroabm-1.0.0/superneuroabm/model.py +689 -0
- superneuroabm-1.0.0/superneuroabm/step_functions/soma/izh.py +86 -0
- superneuroabm-1.0.0/superneuroabm/step_functions/soma/lif.py +98 -0
- superneuroabm-1.0.0/superneuroabm/step_functions/soma/lif_soma_adaptive_thr.py +111 -0
- superneuroabm-1.0.0/superneuroabm/step_functions/synapse/single_exp.py +71 -0
- superneuroabm-1.0.0/superneuroabm/step_functions/synapse/stdp/Low_resolution_synapse.py +117 -0
- superneuroabm-1.0.0/superneuroabm/step_functions/synapse/stdp/Three-bit_exp_pair_wise.py +130 -0
- superneuroabm-1.0.0/superneuroabm/step_functions/synapse/stdp/Three_bit_exp_pair_wise.py +133 -0
- superneuroabm-1.0.0/superneuroabm/step_functions/synapse/stdp/exp_pair_wise_stdp.py +119 -0
- superneuroabm-1.0.0/superneuroabm/step_functions/synapse/stdp/learning_rule_selector.py +72 -0
- superneuroabm-1.0.0/superneuroabm/step_functions/synapse/util.py +49 -0
- superneuroabm-1.0.0/superneuroabm/util.py +38 -0
- superneuroabm-1.0.0/superneuroabm.egg-info/PKG-INFO +100 -0
- superneuroabm-1.0.0/superneuroabm.egg-info/SOURCES.txt +30 -0
- superneuroabm-1.0.0/superneuroabm.egg-info/dependency_links.txt +1 -0
- superneuroabm-1.0.0/superneuroabm.egg-info/requires.txt +4 -0
- superneuroabm-1.0.0/superneuroabm.egg-info/top_level.txt +1 -0
- superneuroabm-1.0.0/tests/test_internal_state_tracking.py +200 -0
- superneuroabm-1.0.0/tests/test_lif_mixed_synapses_stdp_mpi.py +164 -0
- superneuroabm-1.0.0/tests/test_model_reset_stdp.py +363 -0
- superneuroabm-1.0.0/tests/test_mpi_comparison.py +271 -0
- superneuroabm-1.0.0/tests/test_synapse_and_soma_models.py +482 -0
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
BSD 3-Clause License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2023, Oak Ridge National Laboratory
|
|
4
|
+
|
|
5
|
+
Redistribution and use in source and binary forms, with or without
|
|
6
|
+
modification, are permitted provided that the following conditions are met:
|
|
7
|
+
|
|
8
|
+
1. Redistributions of source code must retain the above copyright notice, this
|
|
9
|
+
list of conditions and the following disclaimer.
|
|
10
|
+
|
|
11
|
+
2. Redistributions in binary form must reproduce the above copyright notice,
|
|
12
|
+
this list of conditions and the following disclaimer in the documentation
|
|
13
|
+
and/or other materials provided with the distribution.
|
|
14
|
+
|
|
15
|
+
3. Neither the name of the copyright holder nor the names of its
|
|
16
|
+
contributors may be used to endorse or promote products derived from
|
|
17
|
+
this software without specific prior written permission.
|
|
18
|
+
|
|
19
|
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
20
|
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
21
|
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
22
|
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
23
|
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
24
|
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
25
|
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
26
|
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
27
|
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
28
|
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: superneuroabm
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: A GPU-based multi-agent simulation framework for neuromorphic computing.
|
|
5
|
+
Home-page: https://github.com/ORNL/superneuroabm
|
|
6
|
+
Author: Chathika Gunaratne, Shruti Kulkarni, Ashish Gautam, Xi Zhang, Prasanna Date
|
|
7
|
+
Author-email: gunaratnecs@ornl.gov
|
|
8
|
+
License: BSD-3-Clause
|
|
9
|
+
Project-URL: Source, https://github.com/ORNL/superneuroabm
|
|
10
|
+
Project-URL: Bug Tracker, https://github.com/ORNL/superneuroabm/issues
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Intended Audience :: Science/Research
|
|
13
|
+
Classifier: License :: OSI Approved :: BSD License
|
|
14
|
+
Classifier: Operating System :: OS Independent
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Requires-Python: >=3.11
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
License-File: LICENSE
|
|
23
|
+
Requires-Dist: sagesim==0.5.0
|
|
24
|
+
Requires-Dist: pyyaml
|
|
25
|
+
Requires-Dist: networkx
|
|
26
|
+
Requires-Dist: matplotlib
|
|
27
|
+
Dynamic: author
|
|
28
|
+
Dynamic: author-email
|
|
29
|
+
Dynamic: classifier
|
|
30
|
+
Dynamic: description
|
|
31
|
+
Dynamic: description-content-type
|
|
32
|
+
Dynamic: home-page
|
|
33
|
+
Dynamic: license
|
|
34
|
+
Dynamic: license-file
|
|
35
|
+
Dynamic: project-url
|
|
36
|
+
Dynamic: requires-dist
|
|
37
|
+
Dynamic: requires-python
|
|
38
|
+
Dynamic: summary
|
|
39
|
+
|
|
40
|
+
# SuperNeuroABM
|
|
41
|
+
|
|
42
|
+
**SuperNeuroABM** is a GPU-based multi-agent simulation framework for neuromorphic computing. Built on top of [SAGESim](https://github.com/ORNL/SAGESim), it enables fast and scalable simulation of spiking neural networks on both NVIDIA and AMD GPUs.
|
|
43
|
+
|
|
44
|
+
## Key Features
|
|
45
|
+
|
|
46
|
+
- **GPU Acceleration**: Leverages CUDA (NVIDIA) or ROCm (AMD) for high-performance simulation
|
|
47
|
+
- **Scalable**: From single GPU to multi-GPU HPC clusters via MPI
|
|
48
|
+
- **Flexible Neuron Models**: Support for various soma and synapse step functions
|
|
49
|
+
- **STDP Support**: Built-in spike-timing-dependent plasticity mechanisms
|
|
50
|
+
- **Network I/O**: Import/export neural network topologies
|
|
51
|
+
|
|
52
|
+
## Requirements
|
|
53
|
+
|
|
54
|
+
- Python 3.11+
|
|
55
|
+
- NVIDIA GPU with CUDA drivers **or** AMD GPU with ROCm 5.7.1+
|
|
56
|
+
- MPI implementation (OpenMPI, MPICH, etc.) for multi-GPU execution
|
|
57
|
+
|
|
58
|
+
## Installation
|
|
59
|
+
|
|
60
|
+
Your system might require specific steps to install `mpi4py` and/or `cupy` depending on your hardware. In that case, use your system's recommended instructions to install these dependencies first.
|
|
61
|
+
|
|
62
|
+
```bash
|
|
63
|
+
pip install superneuroabm
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
## Quick Start
|
|
67
|
+
|
|
68
|
+
```python
|
|
69
|
+
from superneuroabm.model import SuperNeuroModel
|
|
70
|
+
|
|
71
|
+
# Create model
|
|
72
|
+
model = SuperNeuroModel()
|
|
73
|
+
|
|
74
|
+
# Create neurons
|
|
75
|
+
n1 = model.create_neuron()
|
|
76
|
+
n2 = model.create_neuron()
|
|
77
|
+
|
|
78
|
+
# Connect with synapse
|
|
79
|
+
model.create_synapse(n1, n2, weight=1.0)
|
|
80
|
+
|
|
81
|
+
# Setup and run
|
|
82
|
+
model.setup(use_gpu=True)
|
|
83
|
+
model.simulate(ticks=100)
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
## Unit Tests
|
|
87
|
+
|
|
88
|
+
To run unit tests:
|
|
89
|
+
|
|
90
|
+
```bash
|
|
91
|
+
python -m unittest tests.test_synapse_and_soma_models
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
## Publications
|
|
95
|
+
|
|
96
|
+
[Date, Prasanna, Chathika Gunaratne, Shruti R. Kulkarni, Robert Patton, Mark Coletti, and Thomas Potok. "SuperNeuro: A fast and scalable simulator for neuromorphic computing." In Proceedings of the 2023 International Conference on Neuromorphic Systems, pp. 1-4. 2023.](https://dl.acm.org/doi/abs/10.1145/3589737.3606000)
|
|
97
|
+
|
|
98
|
+
## License
|
|
99
|
+
|
|
100
|
+
BSD-3-Clause License - Oak Ridge National Laboratory
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# SuperNeuroABM
|
|
2
|
+
|
|
3
|
+
**SuperNeuroABM** is a GPU-based multi-agent simulation framework for neuromorphic computing. Built on top of [SAGESim](https://github.com/ORNL/SAGESim), it enables fast and scalable simulation of spiking neural networks on both NVIDIA and AMD GPUs.
|
|
4
|
+
|
|
5
|
+
## Key Features
|
|
6
|
+
|
|
7
|
+
- **GPU Acceleration**: Leverages CUDA (NVIDIA) or ROCm (AMD) for high-performance simulation
|
|
8
|
+
- **Scalable**: From single GPU to multi-GPU HPC clusters via MPI
|
|
9
|
+
- **Flexible Neuron Models**: Support for various soma and synapse step functions
|
|
10
|
+
- **STDP Support**: Built-in spike-timing-dependent plasticity mechanisms
|
|
11
|
+
- **Network I/O**: Import/export neural network topologies
|
|
12
|
+
|
|
13
|
+
## Requirements
|
|
14
|
+
|
|
15
|
+
- Python 3.11+
|
|
16
|
+
- NVIDIA GPU with CUDA drivers **or** AMD GPU with ROCm 5.7.1+
|
|
17
|
+
- MPI implementation (OpenMPI, MPICH, etc.) for multi-GPU execution
|
|
18
|
+
|
|
19
|
+
## Installation
|
|
20
|
+
|
|
21
|
+
Your system might require specific steps to install `mpi4py` and/or `cupy` depending on your hardware. In that case, use your system's recommended instructions to install these dependencies first.
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
pip install superneuroabm
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
## Quick Start
|
|
28
|
+
|
|
29
|
+
```python
|
|
30
|
+
from superneuroabm.model import SuperNeuroModel
|
|
31
|
+
|
|
32
|
+
# Create model
|
|
33
|
+
model = SuperNeuroModel()
|
|
34
|
+
|
|
35
|
+
# Create neurons
|
|
36
|
+
n1 = model.create_neuron()
|
|
37
|
+
n2 = model.create_neuron()
|
|
38
|
+
|
|
39
|
+
# Connect with synapse
|
|
40
|
+
model.create_synapse(n1, n2, weight=1.0)
|
|
41
|
+
|
|
42
|
+
# Setup and run
|
|
43
|
+
model.setup(use_gpu=True)
|
|
44
|
+
model.simulate(ticks=100)
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Unit Tests
|
|
48
|
+
|
|
49
|
+
To run unit tests:
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
python -m unittest tests.test_synapse_and_soma_models
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
## Publications
|
|
56
|
+
|
|
57
|
+
[Date, Prasanna, Chathika Gunaratne, Shruti R. Kulkarni, Robert Patton, Mark Coletti, and Thomas Potok. "SuperNeuro: A fast and scalable simulator for neuromorphic computing." In Proceedings of the 2023 International Conference on Neuromorphic Systems, pp. 1-4. 2023.](https://dl.acm.org/doi/abs/10.1145/3589737.3606000)
|
|
58
|
+
|
|
59
|
+
## License
|
|
60
|
+
|
|
61
|
+
BSD-3-Clause License - Oak Ridge National Laboratory
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
from setuptools import setup, find_packages
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
# Read README for long description
|
|
5
|
+
this_directory = Path(__file__).parent
|
|
6
|
+
long_description = (this_directory / "README.md").read_text()
|
|
7
|
+
|
|
8
|
+
setup(
|
|
9
|
+
name="superneuroabm",
|
|
10
|
+
version="1.0.0",
|
|
11
|
+
author="Chathika Gunaratne, Shruti Kulkarni, Ashish Gautam, Xi Zhang, Prasanna Date",
|
|
12
|
+
author_email="gunaratnecs@ornl.gov",
|
|
13
|
+
packages=[
|
|
14
|
+
"superneuroabm",
|
|
15
|
+
"superneuroabm.step_functions",
|
|
16
|
+
"superneuroabm.step_functions.soma",
|
|
17
|
+
"superneuroabm.step_functions.synapse",
|
|
18
|
+
"superneuroabm.step_functions.synapse.stdp",
|
|
19
|
+
"superneuroabm.io",
|
|
20
|
+
],
|
|
21
|
+
package_data={
|
|
22
|
+
"superneuroabm": ["*.yaml"],
|
|
23
|
+
},
|
|
24
|
+
include_package_data=True,
|
|
25
|
+
url="https://github.com/ORNL/superneuroabm",
|
|
26
|
+
license="BSD-3-Clause",
|
|
27
|
+
description="A GPU-based multi-agent simulation framework for neuromorphic computing.",
|
|
28
|
+
long_description=long_description,
|
|
29
|
+
long_description_content_type="text/markdown",
|
|
30
|
+
project_urls={
|
|
31
|
+
"Source": "https://github.com/ORNL/superneuroabm",
|
|
32
|
+
"Bug Tracker": "https://github.com/ORNL/superneuroabm/issues",
|
|
33
|
+
},
|
|
34
|
+
install_requires=["sagesim==0.5.0", "pyyaml", "networkx", "matplotlib"],
|
|
35
|
+
python_requires=">=3.11",
|
|
36
|
+
classifiers=[
|
|
37
|
+
"Development Status :: 4 - Beta",
|
|
38
|
+
"Intended Audience :: Science/Research",
|
|
39
|
+
"License :: OSI Approved :: BSD License",
|
|
40
|
+
"Operating System :: OS Independent",
|
|
41
|
+
"Programming Language :: Python :: 3",
|
|
42
|
+
"Programming Language :: Python :: 3.11",
|
|
43
|
+
"Programming Language :: Python :: 3.12",
|
|
44
|
+
"Programming Language :: Python :: 3.13",
|
|
45
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
46
|
+
],
|
|
47
|
+
)
|
|
48
|
+
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
soma:
|
|
2
|
+
lif_soma:
|
|
3
|
+
config_0:
|
|
4
|
+
hyperparameters:
|
|
5
|
+
C: 10e-9 # Membrane capacitance in Farads (10 nF)
|
|
6
|
+
R: 1e6 # Membrane resistance in Ohms (1 TΩ)
|
|
7
|
+
vthr: -45 # Spike threshold voltage (mV)
|
|
8
|
+
tref: 5e-3 # Refractory period (5 ms)
|
|
9
|
+
vrest: -60 # Resting potential (mV)
|
|
10
|
+
vreset: -60 # Reset potential after spike (mV)
|
|
11
|
+
tref_allows_integration: 1 # Whether to allow integration during refractory period
|
|
12
|
+
I_in: 0 # Input current (40 nA)
|
|
13
|
+
scaling_factor: 1e-5 # Scaling factor for synaptic current
|
|
14
|
+
internal_state:
|
|
15
|
+
v: -60.0 # Initial membrane voltage
|
|
16
|
+
tcount: 0.0 # Time counter
|
|
17
|
+
tlast: 0.0 # Last spike time
|
|
18
|
+
izh_soma:
|
|
19
|
+
config_0: #intrinsic bursting
|
|
20
|
+
hyperparameters:
|
|
21
|
+
k: 1.2
|
|
22
|
+
vthr: -45
|
|
23
|
+
C: 150
|
|
24
|
+
a: 0.01
|
|
25
|
+
b: 5
|
|
26
|
+
vpeak: 50
|
|
27
|
+
vrest: -75
|
|
28
|
+
d: 130
|
|
29
|
+
vreset: -56
|
|
30
|
+
I_in: 420
|
|
31
|
+
internal_state:
|
|
32
|
+
v: -75 # Initial membrane voltage
|
|
33
|
+
u: 0 # Initial recovery variable
|
|
34
|
+
|
|
35
|
+
config_1: # regular spiking
|
|
36
|
+
hyperparameters:
|
|
37
|
+
k: 0.7
|
|
38
|
+
vthr: -40
|
|
39
|
+
C: 100
|
|
40
|
+
a: 0.03
|
|
41
|
+
b: -2
|
|
42
|
+
vpeak: 35
|
|
43
|
+
vrest: -60
|
|
44
|
+
d: 100
|
|
45
|
+
vreset: -50
|
|
46
|
+
I_in: 100
|
|
47
|
+
internal_state:
|
|
48
|
+
v: -60 # Initial membrane voltage
|
|
49
|
+
u: 0 # Initial recovery variable
|
|
50
|
+
|
|
51
|
+
lif_soma_adaptive_thr:
|
|
52
|
+
config_0:
|
|
53
|
+
hyperparameters:
|
|
54
|
+
C: 10e-9 # Membrane capacitance in Farads (10 nF)
|
|
55
|
+
R: 1e6 # Membrane resistance in Ohms (1 TΩ)
|
|
56
|
+
vthr_initial: -45 # Spike threshold voltage (mV)
|
|
57
|
+
tref: 5e-3 # Refractory period (5 ms)
|
|
58
|
+
vrest: -60 # Resting potential (mV)
|
|
59
|
+
vreset: -60 # Reset potential after spike (mV)
|
|
60
|
+
tref_allows_integration: 1 # Whether to allow integration during refractory period
|
|
61
|
+
I_in: 0 # Input current (40 nA)
|
|
62
|
+
scaling_factor: 1e-5 # Scaling factor for synaptic current
|
|
63
|
+
delta_thr: 1.0 # Threshold increase after spike (mV)
|
|
64
|
+
tau_decay_thr: 30e-3 # Time constant for threshold decay (100 ms)
|
|
65
|
+
internal_state:
|
|
66
|
+
v: -60.0 # Initial membrane voltage
|
|
67
|
+
tcount: 0.0 # Time counter
|
|
68
|
+
tlast: 0.0 # Last spike time
|
|
69
|
+
vthr: -45.0 # Initial spike threshold voltage
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
synapse:
|
|
73
|
+
single_exp_synapse:
|
|
74
|
+
no_learning_config_0:
|
|
75
|
+
hyperparameters:
|
|
76
|
+
weight: 14.0 # Synaptic weight (strength)
|
|
77
|
+
synaptic_delay: 1.0 # Transmission delay (ms)
|
|
78
|
+
scale: 1.0 # Scaling factor
|
|
79
|
+
tau_fall: 1e-2 # Decay time constant (1 ms)
|
|
80
|
+
tau_rise: 0 # Rise time constant (instantaneous)
|
|
81
|
+
internal_state:
|
|
82
|
+
I_synapse: 0.0 # Initial synaptic current
|
|
83
|
+
learning_hyperparameters:
|
|
84
|
+
stdp_type: -1 # No learning
|
|
85
|
+
exp_pair_wise_stdp_config_0:
|
|
86
|
+
hyperparameters:
|
|
87
|
+
weight: 14.0 # Synaptic weight (strength)
|
|
88
|
+
synaptic_delay: 1.0 # Transmission delay (ms)
|
|
89
|
+
scale: 1.0 # Scaling factor
|
|
90
|
+
tau_fall: 1e-2 # Decay time constant (1 ms)
|
|
91
|
+
tau_rise: 0 # Rise time constant (instantaneous)
|
|
92
|
+
internal_state:
|
|
93
|
+
I_synapse: 0.0 # Initial synaptic current
|
|
94
|
+
learning_hyperparameters:
|
|
95
|
+
stdp_type: 0.0 # Exp pair-wise STDP
|
|
96
|
+
|
|
97
|
+
tau_pre_stdp: 10e-3 # Pre-synaptic STDP time constant (10 ms)
|
|
98
|
+
tau_post_stdp: 10e-3 # Post-synaptic STDP time constant (10 ms)
|
|
99
|
+
a_exp_pre: 0.005 # Pre-synaptic STDP learning rate
|
|
100
|
+
a_exp_post: 0.005 # Post-synaptic STDP learning rate
|
|
101
|
+
stdp_history_length: 100 # Length of STDP history buffer
|
|
102
|
+
internal_learning_state:
|
|
103
|
+
pre_trace: 0 # Pre-synaptic trace
|
|
104
|
+
post_trace: 0 # Post-synaptic trace
|
|
105
|
+
dW: 0 # Weight change accumulator
|
|
106
|
+
|
|
107
|
+
three_bit_exp_pair_wise_stdp_config_0:
|
|
108
|
+
hyperparameters:
|
|
109
|
+
weight: 14.0 # Synaptic weight (strength)
|
|
110
|
+
synaptic_delay: 1.0 # Transmission delay (ms)
|
|
111
|
+
scale: 1.0 # Scaling factor
|
|
112
|
+
tau_fall: 1e-2 # Decay time constant (1 ms)
|
|
113
|
+
tau_rise: 0 # Rise time constant (instantaneous)
|
|
114
|
+
internal_state:
|
|
115
|
+
I_synapse: 0.0 # Initial synaptic current
|
|
116
|
+
learning_hyperparameters:
|
|
117
|
+
stdp_type: 1.0 # Exp pair-wise STDP
|
|
118
|
+
tau_pre_stdp: 10e-3 # Pre-synaptic STDP time constant (10 ms)
|
|
119
|
+
tau_post_stdp: 10e-3 # Post-synaptic STDP time constant (10 ms)
|
|
120
|
+
a_exp_pre: 0.005 # Pre-synaptic STDP learning rate
|
|
121
|
+
a_exp_post: 0.005 # Post-synaptic STDP learning rate
|
|
122
|
+
stdp_history_length: 100 # Length of STDP history buffer
|
|
123
|
+
wmin: 0.0 # Minimum synaptic weight
|
|
124
|
+
wmax: 24.0 # Maximum synaptic weight
|
|
125
|
+
num_levels: 8 # Number of quantization levels (3 bits)
|
|
126
|
+
internal_learning_state:
|
|
127
|
+
pre_trace: 0 # Pre-synaptic trace
|
|
128
|
+
post_trace: 0 # Post-synaptic trace
|
|
129
|
+
dW: 0 # Weight change accumulator
|