superneuroabm 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,133 @@
1
+ """
2
+ Exponential STDP (Spike-Timing Dependent Plasticity) step function for spiking neural networks
3
+
4
+ """
5
+
6
+ import cupy as cp
7
+ from cupyx import jit
8
+
9
+ from superneuroabm.step_functions.synapse.util import get_soma_spike
10
+
11
+
12
+ @jit.rawkernel(device="cuda")
13
+ def exp_pair_wise_stdp_quantized(
14
+ tick,
15
+ agent_index,
16
+ globals,
17
+ agent_ids,
18
+ breeds,
19
+ locations,
20
+ synapse_params, # scale, time constant (tau_rise and tau_fall)
21
+ learning_params,
22
+ internal_state, #
23
+ internal_learning_state, # learning state variables
24
+ synapse_history, # delay
25
+ input_spikes_tensor, # input spikes
26
+ output_spikes_tensor,
27
+ internal_states_buffer,
28
+ internal_learning_states_buffer,
29
+ ):
30
+ t_current = int(tick)
31
+
32
+ dt = globals[0] # time step size
33
+
34
+ # Get the synapse parameters:
35
+ weight = synapse_params[agent_index][0]
36
+ synaptic_delay = synapse_params[agent_index][1]
37
+
38
+ # Get the learning parameters:
39
+ # stdpType = 0 # Parsed in the learning rule selector
40
+ tau_pre_stdp = learning_params[agent_index][1]
41
+ tau_post_stdp = learning_params[agent_index][2]
42
+ a_exp_pre = learning_params[agent_index][3]
43
+ a_exp_post = learning_params[agent_index][4]
44
+ stdp_history_length = learning_params[agent_index][5]
45
+ # Wmax, Wmin
46
+
47
+ pre_trace = internal_learning_state[agent_index][0]
48
+ post_trace = internal_learning_state[agent_index][1]
49
+ dW = internal_learning_state[agent_index][2]
50
+
51
+ # Get pre and post soma IDs from connectivity (contains agent IDs, not converted by SAGESim)
52
+ pre_soma_index = locations[agent_index][0]
53
+ post_soma_index = locations[agent_index][1]
54
+
55
+ # Get the pre-soma spike
56
+ pre_soma_spike = get_soma_spike(
57
+ tick,
58
+ agent_index,
59
+ globals,
60
+ agent_ids,
61
+ pre_soma_index,
62
+ t_current,
63
+ input_spikes_tensor,
64
+ output_spikes_tensor,
65
+ )
66
+
67
+ post_soma_spike = get_soma_spike(
68
+ tick,
69
+ agent_index,
70
+ globals,
71
+ agent_ids,
72
+ post_soma_index,
73
+ t_current,
74
+ input_spikes_tensor,
75
+ output_spikes_tensor,
76
+ )
77
+
78
+ pre_trace = pre_trace * (1 - dt / tau_pre_stdp) + pre_soma_spike * a_exp_pre
79
+ post_trace = post_trace * (1 - dt / tau_post_stdp) + post_soma_spike * a_exp_post
80
+ dW = pre_trace * post_soma_spike - post_trace * pre_soma_spike
81
+
82
+ weight += dW # Update the weight
83
+
84
+ # === 3-bit quantization ===
85
+ wmin = learning_params[agent_index][6] #0.0 #learning_params[agent_index][6] # assuming stored in learning_params
86
+ wmax = learning_params[agent_index][7] #14.0 #learning_params[agent_index][7]
87
+ num_levels = learning_params[agent_index][8] #8 # 3 bits -> 8 quantization levels#learning_params[agent_index][8]
88
+ delta = (wmax - wmin) / (num_levels - 1)
89
+ # weight = cp.clip(weight, wmin, wmax)
90
+ weight = weight if weight <= wmax else wmax
91
+ weight = weight if weight >= wmin else wmin
92
+ # quantized_weight = cp.round((weight - wmin) / delta) * delta + wmin
93
+ quantized_weight = cp.rint((weight - wmin) / delta) * delta + wmin
94
+ # quantized_weight = int((weight - wmin) / delta + 0.5) * delta + wmin
95
+ weight = quantized_weight
96
+ # ==========================
97
+
98
+ synapse_params[agent_index][0] = weight # Update quantized weight
99
+
100
+ internal_learning_state[agent_index][0] = pre_trace
101
+ internal_learning_state[agent_index][1] = post_trace
102
+ internal_learning_state[agent_index][2] = dW
103
+
104
+ # Safe buffer indexing: use modulo to prevent out-of-bounds access
105
+ # When tracking is disabled, buffer length is 1, so t_current % 1 = 0 always
106
+ buffer_idx = t_current % len(internal_learning_states_buffer[agent_index])
107
+ internal_learning_states_buffer[agent_index][buffer_idx][0] = pre_trace
108
+ internal_learning_states_buffer[agent_index][buffer_idx][1] = post_trace
109
+ internal_learning_states_buffer[agent_index][buffer_idx][2] = dW
110
+
111
+ # spike_pre_[t_current] = pre_soma_spike #spike_pre_ is an array of size (stdp_history_length, number of input neurons), pre_soma_spike is (number of input neurons,)
112
+ # spike_post_[:, t_current] = post_soma_spike#spike_post_ is an array of size (number of output neurons,stdp_history_length), post_soma_spike is (number of output neurons,)
113
+ # trace_pre_[t_current] = pre_trace #Corresponding traces an array of size (stdp_history_length,number of input neurons), pre_trace is (number of input neurons,)
114
+ # trace_post_[:, t_current] = post_trace #Corresponding traces is an array of size (number of output neurons,stdp_history_length)
115
+
116
+ # if t_current == stdp_history_length:
117
+ # dW = cp.dot(spike_post_, trace_pre_)#(1,stdp_history_length) dot (stdp_history_length,1) we might need additional learning rate and multiplicative STDP*(wmax - W)*
118
+ # dW -=cp.dot(trace_post_, spike_pre_)#(1,stdp_history_length) dot (stdp_history_length,1), add learning rat*W for multiplicative STDP
119
+ # clipped_dW = cp.clip(dW / stdp_history_length, dw_max, dw_min) # Clip the weight change if needed
120
+ # weight = cp.clip(weight+clipped_dW,wmin, wmax) # Update the weight
121
+ # #reset the traces and spikes buffers
122
+ # spike_pre_ = cp.zeros((stdp_history_length, number_of_input_neurons), dtype=cp.float32)
123
+ # spike_post_ = cp.zeros((number_of_output_neurons, stdp_history_length), dtype=cp.float32)
124
+ # trace_pre_ = cp.zeros((stdp_history_length, number_of_input_neurons), dtype=cp.float32)
125
+ # trace_post_ = cp.zeros((number_of_output_neurons, stdp_history_length), dtype=cp.float32)
126
+
127
+ internal_state[agent_index][2] = pre_trace
128
+ internal_state[agent_index][3] = post_trace
129
+
130
+ # Safe buffer indexing for internal_states_buffer (reuse buffer_idx from above)
131
+ state_buffer_idx = t_current % len(internal_states_buffer[agent_index])
132
+ internal_states_buffer[agent_index][state_buffer_idx][2] = post_soma_spike
133
+ internal_states_buffer[agent_index][state_buffer_idx][3] = post_trace
@@ -0,0 +1,119 @@
1
+ """
2
+ Exponential STDP (Spike-Timing Dependent Plasticity) step function for spiking neural networks
3
+
4
+ """
5
+
6
+ import cupy as cp
7
+ from cupyx import jit
8
+
9
+ from superneuroabm.step_functions.synapse.util import get_soma_spike
10
+
11
+
12
+ @jit.rawkernel(device="cuda")
13
+ def exp_pair_wise_stdp(
14
+ tick,
15
+ agent_index,
16
+ globals,
17
+ agent_ids,
18
+ breeds,
19
+ locations,
20
+ synapse_params, # scale, time constant (tau_rise and tau_fall)
21
+ learning_params,
22
+ internal_state, #
23
+ internal_learning_state, # learning state variables
24
+ synapse_history, # delay
25
+ input_spikes_tensor, # input spikes
26
+ output_spikes_tensor,
27
+ internal_states_buffer,
28
+ internal_learning_states_buffer,
29
+ ):
30
+ t_current = int(tick)
31
+
32
+ dt = globals[0] # time step size
33
+
34
+ # Get the synapse parameters:
35
+ weight = synapse_params[agent_index][0]
36
+ synaptic_delay = synapse_params[agent_index][1]
37
+
38
+ # Get the learning parameters:
39
+ # stdpType = 0 # Parsed in the learning rule selector
40
+ tau_pre_stdp = learning_params[agent_index][1]
41
+ tau_post_stdp = learning_params[agent_index][2]
42
+ a_exp_pre = learning_params[agent_index][3]
43
+ a_exp_post = learning_params[agent_index][4]
44
+ stdp_history_length = learning_params[agent_index][5]
45
+ # Wmax, Wmin
46
+
47
+ pre_trace = internal_learning_state[agent_index][0]
48
+ post_trace = internal_learning_state[agent_index][1]
49
+ dW = internal_learning_state[agent_index][2]
50
+
51
+ # locations[agent_index] = [pre_soma_index, post_soma_index]
52
+ # SAGESim has already converted agent IDs to local indices
53
+ pre_soma_index = locations[agent_index][0]
54
+ post_soma_index = locations[agent_index][1]
55
+
56
+ # Get the pre-soma spike
57
+ pre_soma_spike = get_soma_spike(
58
+ tick,
59
+ agent_index,
60
+ globals,
61
+ agent_ids,
62
+ pre_soma_index,
63
+ t_current,
64
+ input_spikes_tensor,
65
+ output_spikes_tensor,
66
+ )
67
+
68
+ post_soma_spike = get_soma_spike(
69
+ tick,
70
+ agent_index,
71
+ globals,
72
+ agent_ids,
73
+ post_soma_index,
74
+ t_current,
75
+ input_spikes_tensor,
76
+ output_spikes_tensor,
77
+ )
78
+
79
+ pre_trace = pre_trace * (1 - dt / tau_pre_stdp) + pre_soma_spike * a_exp_pre
80
+ post_trace = post_trace * (1 - dt / tau_post_stdp) + post_soma_spike * a_exp_post
81
+ dW = pre_trace * post_soma_spike - post_trace * pre_soma_spike
82
+
83
+ weight += dW # Update the weight
84
+ synapse_params[agent_index][0] = weight # Update the weight in synapse_params
85
+
86
+ internal_learning_state[agent_index][0] = pre_trace
87
+ internal_learning_state[agent_index][1] = post_trace
88
+ internal_learning_state[agent_index][2] = dW
89
+
90
+ # Safe buffer indexing: use modulo to prevent out-of-bounds access
91
+ # When tracking is disabled, buffer length is 1, so t_current % 1 = 0 always
92
+ buffer_idx = t_current % len(internal_learning_states_buffer[agent_index])
93
+ internal_learning_states_buffer[agent_index][buffer_idx][0] = pre_trace
94
+ internal_learning_states_buffer[agent_index][buffer_idx][1] = post_trace
95
+ internal_learning_states_buffer[agent_index][buffer_idx][2] = dW
96
+
97
+ # spike_pre_[t_current] = pre_soma_spike #spike_pre_ is an array of size (stdp_history_length, number of input neurons), pre_soma_spike is (number of input neurons,)
98
+ # spike_post_[:, t_current] = post_soma_spike#spike_post_ is an array of size (number of output neurons,stdp_history_length), post_soma_spike is (number of output neurons,)
99
+ # trace_pre_[t_current] = pre_trace #Corresponding traces an array of size (stdp_history_length,number of input neurons), pre_trace is (number of input neurons,)
100
+ # trace_post_[:, t_current] = post_trace #Corresponding traces is an array of size (number of output neurons,stdp_history_length)
101
+
102
+ # if t_current == stdp_history_length:
103
+ # dW = cp.dot(spike_post_, trace_pre_)#(1,stdp_history_length) dot (stdp_history_length,1) we might need additional learning rate and multiplicative STDP*(wmax - W)*
104
+ # dW -=cp.dot(trace_post_, spike_pre_)#(1,stdp_history_length) dot (stdp_history_length,1), add learning rat*W for multiplicative STDP
105
+ # clipped_dW = cp.clip(dW / stdp_history_length, dw_max, dw_min) # Clip the weight change if needed
106
+ # weight = cp.clip(weight+clipped_dW,wmin, wmax) # Update the weight
107
+ # #reset the traces and spikes buffers
108
+ # spike_pre_ = cp.zeros((stdp_history_length, number_of_input_neurons), dtype=cp.float32)
109
+ # spike_post_ = cp.zeros((number_of_output_neurons, stdp_history_length), dtype=cp.float32)
110
+ # trace_pre_ = cp.zeros((stdp_history_length, number_of_input_neurons), dtype=cp.float32)
111
+ # trace_post_ = cp.zeros((number_of_output_neurons, stdp_history_length), dtype=cp.float32)
112
+
113
+ internal_state[agent_index][2] = pre_trace
114
+ internal_state[agent_index][3] = post_trace
115
+
116
+ # Safe buffer indexing for internal_states_buffer (reuse buffer_idx from above)
117
+ state_buffer_idx = t_current % len(internal_states_buffer[agent_index])
118
+ internal_states_buffer[agent_index][state_buffer_idx][2] = post_soma_spike
119
+ internal_states_buffer[agent_index][state_buffer_idx][3] = post_trace
@@ -0,0 +1,72 @@
1
+ from cupyx import jit
2
+
3
+ from superneuroabm.step_functions.synapse.util import get_soma_spike
4
+ from superneuroabm.step_functions.synapse.stdp.exp_pair_wise_stdp import (
5
+ exp_pair_wise_stdp,
6
+ )
7
+ from superneuroabm.step_functions.synapse.stdp.Three_bit_exp_pair_wise import (
8
+ exp_pair_wise_stdp_quantized,
9
+ )
10
+
11
+
12
+ @jit.rawkernel(device="cuda")
13
+ def learning_rule_selector(
14
+ tick,
15
+ agent_index,
16
+ globals,
17
+ agent_ids,
18
+ breeds,
19
+ locations,
20
+ synapse_params,
21
+ learning_params, # STDP_function name,
22
+ internal_state, #
23
+ internal_learning_state,
24
+ synapse_history, # delay
25
+ input_spikes_tensor, # input spikes
26
+ output_spikes_tensor,
27
+ internal_states_buffer,
28
+ internal_learning_states_buffer,
29
+ ):
30
+
31
+ stdpType = learning_params[agent_index][0] # 0 for None, 1 for exp_pair_wise_stdp
32
+ # Wmax, Wmin
33
+ if stdpType == -1:
34
+ pass
35
+ elif stdpType == 0:
36
+
37
+ exp_pair_wise_stdp(
38
+ tick,
39
+ agent_index,
40
+ globals,
41
+ agent_ids,
42
+ breeds,
43
+ locations,
44
+ synapse_params, # scale, time constant (tau_rise and tau_fall)
45
+ learning_params,
46
+ internal_state, #
47
+ internal_learning_state, # learning state variables
48
+ synapse_history, # delay
49
+ input_spikes_tensor, # input spikes
50
+ output_spikes_tensor,
51
+ internal_states_buffer,
52
+ internal_learning_states_buffer,
53
+ )
54
+ elif stdpType == 1:
55
+
56
+ exp_pair_wise_stdp_quantized(
57
+ tick,
58
+ agent_index,
59
+ globals,
60
+ agent_ids,
61
+ breeds,
62
+ locations,
63
+ synapse_params, # scale, time constant (tau_rise and tau_fall)
64
+ learning_params,
65
+ internal_state, #
66
+ internal_learning_state, # learning state variables
67
+ synapse_history, # delay
68
+ input_spikes_tensor, # input spikes
69
+ output_spikes_tensor,
70
+ internal_states_buffer,
71
+ internal_learning_states_buffer,
72
+ )
@@ -0,0 +1,49 @@
1
+ import math
2
+ import cupy as cp
3
+ from cupyx import jit
4
+
5
+
6
+ @jit.rawkernel(device="cuda")
7
+ def get_soma_spike(
8
+ tick,
9
+ agent_index,
10
+ globals,
11
+ agent_ids,
12
+ pre_soma_index, # Used to be pre_soma_id, now it's already an index
13
+ t_current,
14
+ input_spikes_tensor, # input spikes
15
+ output_spikes_tensor,
16
+ ):
17
+ """
18
+ Get spike from pre-soma using its local index (already converted by SAGESim).
19
+
20
+ Args:
21
+ pre_soma_index: Local index of the pre-synaptic soma (-1 for external input)
22
+ globals, agent_ids: Kept for signature compatibility, not used anymore
23
+
24
+ NOTE: Due to double buffering, this reads from the PREVIOUS tick's spikes.
25
+ Somas write spikes at priority 0, synapses read at priority 1, but the
26
+ write buffer isn't copied to read buffer until after all priorities complete.
27
+ This introduces a 1-tick synaptic delay, which is actually realistic.
28
+ """
29
+ t_current = int(tick)
30
+
31
+ if pre_soma_index >= 0:
32
+ # pre_soma_index is already a local index (no search needed!)
33
+ # At tick 0, there are no previous spikes, so spike will be 0
34
+ if t_current > 0:
35
+ spike = output_spikes_tensor[pre_soma_index][t_current - 1]
36
+ else:
37
+ spike = 0.0
38
+ else:
39
+ spike = 0.0
40
+ spike_buffer_max_len = len(input_spikes_tensor[agent_index])
41
+ i = 0
42
+
43
+ # input_spikes_tensor is now flattened as [tick, value, tick, value, ...]
44
+ # Check i+1 < max_len to avoid reading past array bounds!
45
+ while i + 1 < spike_buffer_max_len and not cp.isnan(input_spikes_tensor[agent_index][i]):
46
+ if input_spikes_tensor[agent_index][i] == t_current: # tick at even index
47
+ spike += input_spikes_tensor[agent_index][i+1] # value at odd index
48
+ i += 2 # Skip by 2 to move to next tick (not i+1!)
49
+ return spike
superneuroabm/util.py ADDED
@@ -0,0 +1,38 @@
1
+ """
2
+ SuperNeuroABM utilities
3
+
4
+ """
5
+
6
+ from pathlib import Path
7
+
8
+ import yaml
9
+
10
+
11
+ current_dir = Path(__file__).parent
12
+ base_config_fpath = current_dir / "component_base_config.yaml"
13
+
14
+
15
+ def load_component_configurations(config_file: str = base_config_fpath) -> dict:
16
+ """
17
+ Load component configurations from a YAML file.
18
+
19
+ Args:
20
+ config_file: Path to the YAML configuration file.
21
+
22
+ Returns:
23
+ A dictionary containing the component configurations.
24
+ """
25
+ with open(config_file, "r", encoding="utf-8") as f:
26
+ configurations = yaml.safe_load(f)
27
+ # Make sure all end values are floats
28
+ for component_class in configurations:
29
+ for breed in configurations[component_class]:
30
+ for config_name in configurations[component_class][breed]:
31
+ for type in configurations[component_class][breed][config_name]:
32
+ for key, value in configurations[component_class][breed][
33
+ config_name
34
+ ][type].items():
35
+ configurations[component_class][breed][config_name][type][
36
+ key
37
+ ] = float(value)
38
+ return configurations
@@ -0,0 +1,100 @@
1
+ Metadata-Version: 2.4
2
+ Name: superneuroabm
3
+ Version: 1.0.0
4
+ Summary: A GPU-based multi-agent simulation framework for neuromorphic computing.
5
+ Home-page: https://github.com/ORNL/superneuroabm
6
+ Author: Chathika Gunaratne, Shruti Kulkarni, Ashish Gautam, Xi Zhang, Prasanna Date
7
+ Author-email: gunaratnecs@ornl.gov
8
+ License: BSD-3-Clause
9
+ Project-URL: Source, https://github.com/ORNL/superneuroabm
10
+ Project-URL: Bug Tracker, https://github.com/ORNL/superneuroabm/issues
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Science/Research
13
+ Classifier: License :: OSI Approved :: BSD License
14
+ Classifier: Operating System :: OS Independent
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Requires-Python: >=3.11
21
+ Description-Content-Type: text/markdown
22
+ License-File: LICENSE
23
+ Requires-Dist: sagesim==0.5.0
24
+ Requires-Dist: pyyaml
25
+ Requires-Dist: networkx
26
+ Requires-Dist: matplotlib
27
+ Dynamic: author
28
+ Dynamic: author-email
29
+ Dynamic: classifier
30
+ Dynamic: description
31
+ Dynamic: description-content-type
32
+ Dynamic: home-page
33
+ Dynamic: license
34
+ Dynamic: license-file
35
+ Dynamic: project-url
36
+ Dynamic: requires-dist
37
+ Dynamic: requires-python
38
+ Dynamic: summary
39
+
40
+ # SuperNeuroABM
41
+
42
+ **SuperNeuroABM** is a GPU-based multi-agent simulation framework for neuromorphic computing. Built on top of [SAGESim](https://github.com/ORNL/SAGESim), it enables fast and scalable simulation of spiking neural networks on both NVIDIA and AMD GPUs.
43
+
44
+ ## Key Features
45
+
46
+ - **GPU Acceleration**: Leverages CUDA (NVIDIA) or ROCm (AMD) for high-performance simulation
47
+ - **Scalable**: From single GPU to multi-GPU HPC clusters via MPI
48
+ - **Flexible Neuron Models**: Support for various soma and synapse step functions
49
+ - **STDP Support**: Built-in spike-timing-dependent plasticity mechanisms
50
+ - **Network I/O**: Import/export neural network topologies
51
+
52
+ ## Requirements
53
+
54
+ - Python 3.11+
55
+ - NVIDIA GPU with CUDA drivers **or** AMD GPU with ROCm 5.7.1+
56
+ - MPI implementation (OpenMPI, MPICH, etc.) for multi-GPU execution
57
+
58
+ ## Installation
59
+
60
+ Your system might require specific steps to install `mpi4py` and/or `cupy` depending on your hardware. In that case, use your system's recommended instructions to install these dependencies first.
61
+
62
+ ```bash
63
+ pip install superneuroabm
64
+ ```
65
+
66
+ ## Quick Start
67
+
68
+ ```python
69
+ from superneuroabm.model import SuperNeuroModel
70
+
71
+ # Create model
72
+ model = SuperNeuroModel()
73
+
74
+ # Create neurons
75
+ n1 = model.create_neuron()
76
+ n2 = model.create_neuron()
77
+
78
+ # Connect with synapse
79
+ model.create_synapse(n1, n2, weight=1.0)
80
+
81
+ # Setup and run
82
+ model.setup(use_gpu=True)
83
+ model.simulate(ticks=100)
84
+ ```
85
+
86
+ ## Unit Tests
87
+
88
+ To run unit tests:
89
+
90
+ ```bash
91
+ python -m unittest tests.test_synapse_and_soma_models
92
+ ```
93
+
94
+ ## Publications
95
+
96
+ [Date, Prasanna, Chathika Gunaratne, Shruti R. Kulkarni, Robert Patton, Mark Coletti, and Thomas Potok. "SuperNeuro: A fast and scalable simulator for neuromorphic computing." In Proceedings of the 2023 International Conference on Neuromorphic Systems, pp. 1-4. 2023.](https://dl.acm.org/doi/abs/10.1145/3589737.3606000)
97
+
98
+ ## License
99
+
100
+ BSD-3-Clause License - Oak Ridge National Laboratory
@@ -0,0 +1,22 @@
1
+ superneuroabm/__init__.py,sha256=Iai-c25bZkqC-wMZ8wjYwd524ZpliVV9rM_ZTnIPSMU,96
2
+ superneuroabm/component_base_config.yaml,sha256=g8Mhejh_aNelqDZ6hhOCs9keEZj9KZhB-mzpukgr1Bk,4858
3
+ superneuroabm/model.py,sha256=TsN1-ehGir8Gc3xKS6ObfyxxJMLFVvGu0jJ6tJhNL_A,29711
4
+ superneuroabm/util.py,sha256=lAS9Ui42F5w0bIRQ4_TKHymwK3IhmYj3b9oJzcBIU8I,1236
5
+ superneuroabm/io/__init__.py,sha256=ml5mbnr9Y74h2ACu959i5XbWzEFefBojeIhPtQFJGNk,50
6
+ superneuroabm/io/nx.py,sha256=0WyuWRIYLRbvNgTGIMiNWDIQ-Cb5SXzsrKk4_SbV8x4,15966
7
+ superneuroabm/io/synthetic_networks.py,sha256=TO5VnXGGk8QmAQabjgsuzALYAKj3EUBwkZL8p9s5icI,32649
8
+ superneuroabm/step_functions/soma/izh.py,sha256=uZrZRII0b2SrqCTpxpVixapdeG9CuYwTod2alAyRvOw,2983
9
+ superneuroabm/step_functions/soma/lif.py,sha256=Wjq03h6dhoeUUKU0qAHP-8wNauNSm-gq_9c7IZJymss,3740
10
+ superneuroabm/step_functions/soma/lif_soma_adaptive_thr.py,sha256=cf5ShkcV0L8pYaBRnDkB9YtmWzT2YWwqmBzBIFpVyz8,4517
11
+ superneuroabm/step_functions/synapse/single_exp.py,sha256=CJu27OeyEtXr9IDva4zPggbLVaF3ast9pgUtymlIdJc,2059
12
+ superneuroabm/step_functions/synapse/util.py,sha256=YDhy8BiYEfhX7JLXG_y-XlrSTVAe-OBiphkrRrCdjuQ,1855
13
+ superneuroabm/step_functions/synapse/stdp/Low_resolution_synapse.py,sha256=CnqanSudAPM3jRec46IZ4YM-NebYA6y8shyj9BiV3P8,4729
14
+ superneuroabm/step_functions/synapse/stdp/Three-bit_exp_pair_wise.py,sha256=wYQGxMkVOQfH-GQrSBCaMV7XvczHyPON7FMzAlbi4wA,5578
15
+ superneuroabm/step_functions/synapse/stdp/Three_bit_exp_pair_wise.py,sha256=DRPVcBUanL8rZNtkpTmltMcPSd_v8GVVqytKSjhf7b4,5919
16
+ superneuroabm/step_functions/synapse/stdp/exp_pair_wise_stdp.py,sha256=IgktItPgVLN3CvwG0OhrYrBPp9ap7MErbk3t80hY8o4,5128
17
+ superneuroabm/step_functions/synapse/stdp/learning_rule_selector.py,sha256=UTFZLNiBE7Jk9woqbcfabIdTp-1g0CviXlMAgtF4sbk,2075
18
+ superneuroabm-1.0.0.dist-info/licenses/LICENSE,sha256=fbF_nc2RGJWubxK8nWWphBUDBLT1TsdqSydSzTHUZGA,1544
19
+ superneuroabm-1.0.0.dist-info/METADATA,sha256=-2a9L5KntOGo5hnzpXBIO6flUG_-aprf9y3u9J5lwc4,3280
20
+ superneuroabm-1.0.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
21
+ superneuroabm-1.0.0.dist-info/top_level.txt,sha256=8ZFp3qGDYMaQXEzM4vDONgYPC-MCSUWrWamYPSplAGw,14
22
+ superneuroabm-1.0.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,28 @@
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2023, Oak Ridge National Laboratory
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions are met:
7
+
8
+ 1. Redistributions of source code must retain the above copyright notice, this
9
+ list of conditions and the following disclaimer.
10
+
11
+ 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ this list of conditions and the following disclaimer in the documentation
13
+ and/or other materials provided with the distribution.
14
+
15
+ 3. Neither the name of the copyright holder nor the names of its
16
+ contributors may be used to endorse or promote products derived from
17
+ this software without specific prior written permission.
18
+
19
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@@ -0,0 +1 @@
1
+ superneuroabm