brainstate 0.0.1__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brainstate/__init__.py +45 -0
- brainstate/_module.py +1466 -0
- brainstate/_module_test.py +133 -0
- brainstate/_state.py +378 -0
- brainstate/_state_test.py +41 -0
- brainstate/_utils.py +21 -0
- brainstate/environ.py +375 -0
- brainstate/functional/__init__.py +25 -0
- brainstate/functional/_activations.py +754 -0
- brainstate/functional/_normalization.py +69 -0
- brainstate/functional/_spikes.py +90 -0
- brainstate/init/__init__.py +26 -0
- brainstate/init/_base.py +36 -0
- brainstate/init/_generic.py +175 -0
- brainstate/init/_random_inits.py +489 -0
- brainstate/init/_regular_inits.py +109 -0
- brainstate/math/__init__.py +21 -0
- brainstate/math/_einops.py +787 -0
- brainstate/math/_einops_parsing.py +169 -0
- brainstate/math/_einops_parsing_test.py +126 -0
- brainstate/math/_einops_test.py +346 -0
- brainstate/math/_misc.py +298 -0
- brainstate/math/_misc_test.py +58 -0
- brainstate/mixin.py +373 -0
- brainstate/mixin_test.py +73 -0
- brainstate/nn/__init__.py +68 -0
- brainstate/nn/_base.py +248 -0
- brainstate/nn/_connections.py +686 -0
- brainstate/nn/_dynamics.py +406 -0
- brainstate/nn/_elementwise.py +1437 -0
- brainstate/nn/_misc.py +132 -0
- brainstate/nn/_normalizations.py +389 -0
- brainstate/nn/_others.py +100 -0
- brainstate/nn/_poolings.py +1228 -0
- brainstate/nn/_poolings_test.py +231 -0
- brainstate/nn/_projection/__init__.py +32 -0
- brainstate/nn/_projection/_align_post.py +528 -0
- brainstate/nn/_projection/_align_pre.py +599 -0
- brainstate/nn/_projection/_delta.py +241 -0
- brainstate/nn/_projection/_utils.py +17 -0
- brainstate/nn/_projection/_vanilla.py +101 -0
- brainstate/nn/_rate_rnns.py +393 -0
- brainstate/nn/_readout.py +130 -0
- brainstate/nn/_synouts.py +166 -0
- brainstate/nn/functional/__init__.py +25 -0
- brainstate/nn/functional/_activations.py +754 -0
- brainstate/nn/functional/_normalization.py +69 -0
- brainstate/nn/functional/_spikes.py +90 -0
- brainstate/nn/init/__init__.py +26 -0
- brainstate/nn/init/_base.py +36 -0
- brainstate/nn/init/_generic.py +175 -0
- brainstate/nn/init/_random_inits.py +489 -0
- brainstate/nn/init/_regular_inits.py +109 -0
- brainstate/nn/surrogate.py +1740 -0
- brainstate/optim/__init__.py +23 -0
- brainstate/optim/_lr_scheduler.py +486 -0
- brainstate/optim/_lr_scheduler_test.py +36 -0
- brainstate/optim/_sgd_optimizer.py +1148 -0
- brainstate/random.py +5148 -0
- brainstate/random_test.py +576 -0
- brainstate/surrogate.py +1740 -0
- brainstate/transform/__init__.py +36 -0
- brainstate/transform/_autograd.py +585 -0
- brainstate/transform/_autograd_test.py +1183 -0
- brainstate/transform/_control.py +665 -0
- brainstate/transform/_controls_test.py +220 -0
- brainstate/transform/_jit.py +239 -0
- brainstate/transform/_jit_error.py +158 -0
- brainstate/transform/_jit_test.py +102 -0
- brainstate/transform/_make_jaxpr.py +573 -0
- brainstate/transform/_make_jaxpr_test.py +133 -0
- brainstate/transform/_progress_bar.py +113 -0
- brainstate/typing.py +69 -0
- brainstate/util.py +747 -0
- brainstate-0.0.1.dist-info/LICENSE +202 -0
- brainstate-0.0.1.dist-info/METADATA +101 -0
- brainstate-0.0.1.dist-info/RECORD +79 -0
- brainstate-0.0.1.dist-info/WHEEL +6 -0
- brainstate-0.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,406 @@
|
|
1
|
+
# Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
|
16
|
+
# -*- coding: utf-8 -*-
|
17
|
+
|
18
|
+
from __future__ import annotations
|
19
|
+
|
20
|
+
from typing import Callable, Optional
|
21
|
+
|
22
|
+
import jax
|
23
|
+
import jax.numpy as jnp
|
24
|
+
|
25
|
+
from ._base import ExplicitInOutSize
|
26
|
+
from ._misc import exp_euler_step
|
27
|
+
from .. import environ, init, surrogate
|
28
|
+
from .._module import Dynamics
|
29
|
+
from .._state import ShortTermState
|
30
|
+
from ..mixin import DelayedInit, Mode, AlignPost
|
31
|
+
from ..typing import DTypeLike, ArrayLike, Size
|
32
|
+
|
33
|
+
__all__ = [
|
34
|
+
# neuron models
|
35
|
+
'Neuron', 'IF', 'LIF', 'ALIF',
|
36
|
+
|
37
|
+
# synapse models
|
38
|
+
'Synapse', 'Expon', 'STP', 'STD',
|
39
|
+
]
|
40
|
+
|
41
|
+
|
42
|
+
class Neuron(Dynamics, ExplicitInOutSize, DelayedInit):
|
43
|
+
"""
|
44
|
+
Base class for neuronal dynamics.
|
45
|
+
|
46
|
+
Note here we use the ``ExplicitInOutSize`` mixin to explicitly specify the input and output shape.
|
47
|
+
|
48
|
+
Moreover, all neuron models are differentiable since they use surrogate gradient functions to
|
49
|
+
generate the spiking state.
|
50
|
+
"""
|
51
|
+
__module__ = 'brainstate.nn'
|
52
|
+
|
53
|
+
def __init__(
|
54
|
+
self,
|
55
|
+
in_size: Size,
|
56
|
+
keep_size: bool = False,
|
57
|
+
spk_fun: Callable = surrogate.InvSquareGrad(),
|
58
|
+
spk_dtype: DTypeLike = None,
|
59
|
+
spk_reset: str = 'soft',
|
60
|
+
detach_spk: bool = False,
|
61
|
+
mode: Optional[Mode] = None,
|
62
|
+
name: Optional[str] = None,
|
63
|
+
):
|
64
|
+
super().__init__(in_size, keep_size=keep_size, mode=mode, name=name)
|
65
|
+
self.in_size = tuple(self.varshape)
|
66
|
+
self.out_size = tuple(self.varshape)
|
67
|
+
self.spk_reset = spk_reset
|
68
|
+
self.spk_dtype = spk_dtype
|
69
|
+
self.spk_fun = spk_fun
|
70
|
+
self.detach_spk = detach_spk
|
71
|
+
|
72
|
+
def get_spike(self, *args, **kwargs):
|
73
|
+
raise NotImplementedError
|
74
|
+
|
75
|
+
|
76
|
+
class IF(Neuron):
|
77
|
+
"""Integrate-and-fire neuron model."""
|
78
|
+
__module__ = 'brainstate.nn'
|
79
|
+
|
80
|
+
def __init__(
|
81
|
+
self,
|
82
|
+
in_size: Size,
|
83
|
+
keep_size: bool = False,
|
84
|
+
tau: ArrayLike = 5.,
|
85
|
+
V_th: ArrayLike = 1.,
|
86
|
+
spk_fun: Callable = surrogate.ReluGrad(),
|
87
|
+
spk_dtype: DTypeLike = None,
|
88
|
+
spk_reset: str = 'soft',
|
89
|
+
mode: Mode = None,
|
90
|
+
name: str = None,
|
91
|
+
):
|
92
|
+
super().__init__(in_size, keep_size=keep_size, name=name, mode=mode,
|
93
|
+
spk_fun=spk_fun, spk_dtype=spk_dtype, spk_reset=spk_reset)
|
94
|
+
|
95
|
+
# parameters
|
96
|
+
self.tau = init.param(tau, self.varshape)
|
97
|
+
self.V_th = init.param(V_th, self.varshape)
|
98
|
+
|
99
|
+
def dv(self, v, t, x):
|
100
|
+
x = self.sum_current_inputs(v, init=x)
|
101
|
+
return (-v + x) / self.tau
|
102
|
+
|
103
|
+
def init_state(self, batch_size: int = None, **kwargs):
|
104
|
+
self.V = ShortTermState(init.param(jnp.zeros, self.varshape, batch_size))
|
105
|
+
|
106
|
+
def get_spike(self, V=None):
|
107
|
+
V = self.V.value if V is None else V
|
108
|
+
v_scaled = (V - self.V_th) / self.V_th
|
109
|
+
return self.spk_fun(v_scaled)
|
110
|
+
|
111
|
+
def update(self, x=0.):
|
112
|
+
# reset
|
113
|
+
last_V = self.V.value
|
114
|
+
last_spike = self.get_spike(self.V.value)
|
115
|
+
V_th = self.V_th if self.spk_reset == 'soft' else jax.lax.stop_gradient(last_V)
|
116
|
+
V = last_V - V_th * last_spike
|
117
|
+
# membrane potential
|
118
|
+
V = exp_euler_step(self.dv, V, environ.get('t'), x)
|
119
|
+
V = V + self.sum_delta_inputs()
|
120
|
+
self.V.value = V
|
121
|
+
return self.get_spike(V)
|
122
|
+
|
123
|
+
|
124
|
+
class LIF(Neuron):
|
125
|
+
"""Leaky integrate-and-fire neuron model."""
|
126
|
+
__module__ = 'brainstate.nn'
|
127
|
+
|
128
|
+
def __init__(
|
129
|
+
self,
|
130
|
+
in_size: Size,
|
131
|
+
keep_size: bool = False,
|
132
|
+
tau: ArrayLike = 5.,
|
133
|
+
V_th: ArrayLike = 1.,
|
134
|
+
V_reset: ArrayLike = 0.,
|
135
|
+
V_rest: ArrayLike = 0.,
|
136
|
+
spk_fun: Callable = surrogate.ReluGrad(),
|
137
|
+
spk_dtype: DTypeLike = None,
|
138
|
+
spk_reset: str = 'soft',
|
139
|
+
mode: Mode = None,
|
140
|
+
name: str = None,
|
141
|
+
):
|
142
|
+
super().__init__(in_size,
|
143
|
+
keep_size=keep_size,
|
144
|
+
name=name,
|
145
|
+
mode=mode,
|
146
|
+
spk_fun=spk_fun,
|
147
|
+
spk_dtype=spk_dtype,
|
148
|
+
spk_reset=spk_reset)
|
149
|
+
|
150
|
+
# parameters
|
151
|
+
self.tau = init.param(tau, self.varshape)
|
152
|
+
self.V_th = init.param(V_th, self.varshape)
|
153
|
+
self.V_rest = init.param(V_rest, self.varshape)
|
154
|
+
self.V_reset = init.param(V_reset, self.varshape)
|
155
|
+
|
156
|
+
def dv(self, v, t, x):
|
157
|
+
x = self.sum_current_inputs(v, init=x)
|
158
|
+
return (-v + self.V_rest + x) / self.tau
|
159
|
+
|
160
|
+
def init_state(self, batch_size: int = None, **kwargs):
|
161
|
+
self.V = ShortTermState(init.param(init.Constant(self.V_reset), self.varshape, batch_size))
|
162
|
+
|
163
|
+
def get_spike(self, V=None):
|
164
|
+
V = self.V.value if V is None else V
|
165
|
+
v_scaled = (V - self.V_th) / self.V_th
|
166
|
+
return self.spk_fun(v_scaled)
|
167
|
+
|
168
|
+
def update(self, x=0.):
|
169
|
+
last_v = self.V.value
|
170
|
+
lst_spk = self.get_spike(last_v)
|
171
|
+
V_th = self.V_th if self.spk_reset == 'soft' else jax.lax.stop_gradient(last_v)
|
172
|
+
V = last_v - (V_th - self.V_reset) * lst_spk
|
173
|
+
# membrane potential
|
174
|
+
V = exp_euler_step(self.dv, V, environ.get('t'), x) + self.sum_delta_inputs()
|
175
|
+
self.V.value = V
|
176
|
+
return self.get_spike(V)
|
177
|
+
|
178
|
+
|
179
|
+
class ALIF(Neuron):
|
180
|
+
"""Adaptive Leaky Integrate-and-Fire (LIF) neuron model."""
|
181
|
+
__module__ = 'brainstate.nn'
|
182
|
+
|
183
|
+
def __init__(
|
184
|
+
self,
|
185
|
+
in_size: Size,
|
186
|
+
keep_size: bool = False,
|
187
|
+
tau: ArrayLike = 5.,
|
188
|
+
tau_a: ArrayLike = 100.,
|
189
|
+
V_th: ArrayLike = 1.,
|
190
|
+
beta: ArrayLike = 0.1,
|
191
|
+
spk_fun: Callable = surrogate.ReluGrad(),
|
192
|
+
spk_dtype: DTypeLike = None,
|
193
|
+
spk_reset: str = 'soft',
|
194
|
+
mode: Mode = None,
|
195
|
+
name: str = None,
|
196
|
+
):
|
197
|
+
super().__init__(in_size, keep_size=keep_size, name=name, mode=mode, spk_fun=spk_fun,
|
198
|
+
spk_dtype=spk_dtype, spk_reset=spk_reset)
|
199
|
+
|
200
|
+
# parameters
|
201
|
+
self.tau = init.param(tau, self.varshape)
|
202
|
+
self.tau_a = init.param(tau_a, self.varshape)
|
203
|
+
self.V_th = init.param(V_th, self.varshape)
|
204
|
+
self.beta = init.param(beta, self.varshape)
|
205
|
+
|
206
|
+
def dv(self, v, t, x):
|
207
|
+
x = self.sum_current_inputs(v, init=x)
|
208
|
+
return (-v + x) / self.tau
|
209
|
+
|
210
|
+
def da(self, a, t):
|
211
|
+
return -a / self.tau_a
|
212
|
+
|
213
|
+
def init_state(self, batch_size: int = None, **kwargs):
|
214
|
+
self.V = ShortTermState(init.param(init.Constant(0.), self.varshape, batch_size))
|
215
|
+
self.a = ShortTermState(init.param(init.Constant(0.), self.varshape, batch_size))
|
216
|
+
|
217
|
+
def get_spike(self, V=None, a=None):
|
218
|
+
V = self.V.value if V is None else V
|
219
|
+
a = self.a.value if a is None else a
|
220
|
+
v_scaled = (V - self.V_th - self.beta * a) / self.V_th
|
221
|
+
return self.spk_fun(v_scaled)
|
222
|
+
|
223
|
+
def update(self, x=0.):
|
224
|
+
last_v = self.V.value
|
225
|
+
last_a = self.a.value
|
226
|
+
lst_spk = self.get_spike(last_v, last_a)
|
227
|
+
V_th = self.V_th if self.spk_reset == 'soft' else jax.lax.stop_gradient(last_v)
|
228
|
+
V = last_v - V_th * lst_spk
|
229
|
+
a = last_a + lst_spk
|
230
|
+
# membrane potential
|
231
|
+
V = exp_euler_step(self.dv, V, environ.get('t'), x)
|
232
|
+
a = exp_euler_step(self.da, a, environ.get('t'))
|
233
|
+
self.V.value = V + self.sum_delta_inputs()
|
234
|
+
self.a.value = a
|
235
|
+
return self.get_spike(self.V.value, self.a.value)
|
236
|
+
|
237
|
+
|
238
|
+
class Synapse(Dynamics, AlignPost, DelayedInit):
|
239
|
+
"""
|
240
|
+
Base class for synapse dynamics.
|
241
|
+
"""
|
242
|
+
__module__ = 'brainstate.nn'
|
243
|
+
|
244
|
+
|
245
|
+
class Expon(Synapse):
|
246
|
+
r"""Exponential decay synapse model.
|
247
|
+
|
248
|
+
Args:
|
249
|
+
tau: float. The time constant of decay. [ms]
|
250
|
+
%s
|
251
|
+
"""
|
252
|
+
__module__ = 'brainstate.nn'
|
253
|
+
|
254
|
+
def __init__(
|
255
|
+
self,
|
256
|
+
size: Size,
|
257
|
+
keep_size: bool = False,
|
258
|
+
name: Optional[str] = None,
|
259
|
+
mode: Optional[Mode] = None,
|
260
|
+
tau: ArrayLike = 8.0,
|
261
|
+
):
|
262
|
+
super().__init__(
|
263
|
+
name=name,
|
264
|
+
mode=mode,
|
265
|
+
size=size,
|
266
|
+
keep_size=keep_size
|
267
|
+
)
|
268
|
+
|
269
|
+
# parameters
|
270
|
+
self.tau = init.param(tau, self.varshape)
|
271
|
+
|
272
|
+
def dg(self, g, t):
|
273
|
+
return -g / self.tau
|
274
|
+
|
275
|
+
def init_state(self, batch_size: int = None, **kwargs):
|
276
|
+
self.g = ShortTermState(init.param(init.Constant(0.), self.varshape, batch_size))
|
277
|
+
|
278
|
+
def update(self, x=None):
|
279
|
+
self.g.value = exp_euler_step(self.dg, self.g.value, environ.get('t'))
|
280
|
+
if x is not None:
|
281
|
+
self.align_post_input_add(x)
|
282
|
+
return self.g.value
|
283
|
+
|
284
|
+
def align_post_input_add(self, x):
|
285
|
+
self.g.value += x
|
286
|
+
|
287
|
+
def return_info(self):
|
288
|
+
return self.g
|
289
|
+
|
290
|
+
|
291
|
+
class STP(Synapse):
|
292
|
+
r"""Synaptic output with short-term plasticity.
|
293
|
+
|
294
|
+
%s
|
295
|
+
|
296
|
+
Args:
|
297
|
+
tau_f: float, ArrayType, Callable. The time constant of short-term facilitation.
|
298
|
+
tau_d: float, ArrayType, Callable. The time constant of short-term depression.
|
299
|
+
U: float, ArrayType, Callable. The fraction of resources used per action potential.
|
300
|
+
%s
|
301
|
+
"""
|
302
|
+
__module__ = 'brainstate.nn'
|
303
|
+
|
304
|
+
def __init__(
|
305
|
+
self,
|
306
|
+
size: Size,
|
307
|
+
keep_size: bool = False,
|
308
|
+
name: Optional[str] = None,
|
309
|
+
mode: Optional[Mode] = None,
|
310
|
+
U: ArrayLike = 0.15,
|
311
|
+
tau_f: ArrayLike = 1500.,
|
312
|
+
tau_d: ArrayLike = 200.,
|
313
|
+
):
|
314
|
+
super().__init__(name=name,
|
315
|
+
mode=mode,
|
316
|
+
size=size,
|
317
|
+
keep_size=keep_size)
|
318
|
+
|
319
|
+
# parameters
|
320
|
+
self.tau_f = init.param(tau_f, self.varshape)
|
321
|
+
self.tau_d = init.param(tau_d, self.varshape)
|
322
|
+
self.U = init.param(U, self.varshape)
|
323
|
+
|
324
|
+
def init_state(self, batch_size: int = None, **kwargs):
|
325
|
+
self.x = ShortTermState(init.param(init.Constant(1.), self.varshape, batch_size))
|
326
|
+
self.u = ShortTermState(init.param(init.Constant(self.U), self.varshape, batch_size))
|
327
|
+
|
328
|
+
def du(self, u, t):
|
329
|
+
return self.U - u / self.tau_f
|
330
|
+
|
331
|
+
def dx(self, x, t):
|
332
|
+
return (1 - x) / self.tau_d
|
333
|
+
|
334
|
+
def update(self, pre_spike):
|
335
|
+
t = environ.get('t')
|
336
|
+
u = exp_euler_step(self.du, self.u.value, t)
|
337
|
+
x = exp_euler_step(self.dx, self.x.value, t)
|
338
|
+
|
339
|
+
# --- original code:
|
340
|
+
# if pre_spike.dtype == jax.numpy.bool_:
|
341
|
+
# u = bm.where(pre_spike, u + self.U * (1 - self.u), u)
|
342
|
+
# x = bm.where(pre_spike, x - u * self.x, x)
|
343
|
+
# else:
|
344
|
+
# u = pre_spike * (u + self.U * (1 - self.u)) + (1 - pre_spike) * u
|
345
|
+
# x = pre_spike * (x - u * self.x) + (1 - pre_spike) * x
|
346
|
+
|
347
|
+
# --- simplified code:
|
348
|
+
u = u + pre_spike * self.U * (1 - self.u.value)
|
349
|
+
x = x - pre_spike * u * self.x.value
|
350
|
+
|
351
|
+
self.u.value = u
|
352
|
+
self.x.value = x
|
353
|
+
return u * x
|
354
|
+
|
355
|
+
|
356
|
+
class STD(Synapse):
|
357
|
+
r"""Synaptic output with short-term depression.
|
358
|
+
|
359
|
+
%s
|
360
|
+
|
361
|
+
Args:
|
362
|
+
tau: float, ArrayType, Callable. The time constant of recovery of the synaptic vesicles.
|
363
|
+
U: float, ArrayType, Callable. The fraction of resources used per action potential.
|
364
|
+
%s
|
365
|
+
"""
|
366
|
+
__module__ = 'brainstate.nn'
|
367
|
+
|
368
|
+
def __init__(
|
369
|
+
self,
|
370
|
+
size: Size,
|
371
|
+
keep_size: bool = False,
|
372
|
+
name: Optional[str] = None,
|
373
|
+
mode: Optional[Mode] = None,
|
374
|
+
# synapse parameters
|
375
|
+
tau: ArrayLike = 200.,
|
376
|
+
U: ArrayLike = 0.07,
|
377
|
+
):
|
378
|
+
super().__init__(name=name,
|
379
|
+
mode=mode,
|
380
|
+
size=size,
|
381
|
+
keep_size=keep_size)
|
382
|
+
|
383
|
+
# parameters
|
384
|
+
self.tau = init.param(tau, self.varshape)
|
385
|
+
self.U = init.param(U, self.varshape)
|
386
|
+
|
387
|
+
def dx(self, x, t):
|
388
|
+
return (1 - x) / self.tau
|
389
|
+
|
390
|
+
def init_state(self, batch_size: int = None, **kwargs):
|
391
|
+
self.x = ShortTermState(init.param(init.Constant(1.), self.varshape, batch_size))
|
392
|
+
|
393
|
+
def update(self, pre_spike):
|
394
|
+
t = environ.get('t')
|
395
|
+
x = exp_euler_step(self.dx, self.x.value, t)
|
396
|
+
|
397
|
+
# --- original code:
|
398
|
+
# self.x.value = bm.where(pre_spike, x - self.U * self.x, x)
|
399
|
+
|
400
|
+
# --- simplified code:
|
401
|
+
self.x.value = x - pre_spike * self.U * self.x.value
|
402
|
+
|
403
|
+
return self.x.value
|
404
|
+
|
405
|
+
def return_info(self):
|
406
|
+
return self.x
|