brainstate 0.0.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. brainstate/__init__.py +45 -0
  2. brainstate/_module.py +1466 -0
  3. brainstate/_module_test.py +133 -0
  4. brainstate/_state.py +378 -0
  5. brainstate/_state_test.py +41 -0
  6. brainstate/_utils.py +21 -0
  7. brainstate/environ.py +375 -0
  8. brainstate/functional/__init__.py +25 -0
  9. brainstate/functional/_activations.py +754 -0
  10. brainstate/functional/_normalization.py +69 -0
  11. brainstate/functional/_spikes.py +90 -0
  12. brainstate/init/__init__.py +26 -0
  13. brainstate/init/_base.py +36 -0
  14. brainstate/init/_generic.py +175 -0
  15. brainstate/init/_random_inits.py +489 -0
  16. brainstate/init/_regular_inits.py +109 -0
  17. brainstate/math/__init__.py +21 -0
  18. brainstate/math/_einops.py +787 -0
  19. brainstate/math/_einops_parsing.py +169 -0
  20. brainstate/math/_einops_parsing_test.py +126 -0
  21. brainstate/math/_einops_test.py +346 -0
  22. brainstate/math/_misc.py +298 -0
  23. brainstate/math/_misc_test.py +58 -0
  24. brainstate/mixin.py +373 -0
  25. brainstate/mixin_test.py +73 -0
  26. brainstate/nn/__init__.py +68 -0
  27. brainstate/nn/_base.py +248 -0
  28. brainstate/nn/_connections.py +686 -0
  29. brainstate/nn/_dynamics.py +406 -0
  30. brainstate/nn/_elementwise.py +1437 -0
  31. brainstate/nn/_misc.py +132 -0
  32. brainstate/nn/_normalizations.py +389 -0
  33. brainstate/nn/_others.py +100 -0
  34. brainstate/nn/_poolings.py +1228 -0
  35. brainstate/nn/_poolings_test.py +231 -0
  36. brainstate/nn/_projection/__init__.py +32 -0
  37. brainstate/nn/_projection/_align_post.py +528 -0
  38. brainstate/nn/_projection/_align_pre.py +599 -0
  39. brainstate/nn/_projection/_delta.py +241 -0
  40. brainstate/nn/_projection/_utils.py +17 -0
  41. brainstate/nn/_projection/_vanilla.py +101 -0
  42. brainstate/nn/_rate_rnns.py +393 -0
  43. brainstate/nn/_readout.py +130 -0
  44. brainstate/nn/_synouts.py +166 -0
  45. brainstate/nn/functional/__init__.py +25 -0
  46. brainstate/nn/functional/_activations.py +754 -0
  47. brainstate/nn/functional/_normalization.py +69 -0
  48. brainstate/nn/functional/_spikes.py +90 -0
  49. brainstate/nn/init/__init__.py +26 -0
  50. brainstate/nn/init/_base.py +36 -0
  51. brainstate/nn/init/_generic.py +175 -0
  52. brainstate/nn/init/_random_inits.py +489 -0
  53. brainstate/nn/init/_regular_inits.py +109 -0
  54. brainstate/nn/surrogate.py +1740 -0
  55. brainstate/optim/__init__.py +23 -0
  56. brainstate/optim/_lr_scheduler.py +486 -0
  57. brainstate/optim/_lr_scheduler_test.py +36 -0
  58. brainstate/optim/_sgd_optimizer.py +1148 -0
  59. brainstate/random.py +5148 -0
  60. brainstate/random_test.py +576 -0
  61. brainstate/surrogate.py +1740 -0
  62. brainstate/transform/__init__.py +36 -0
  63. brainstate/transform/_autograd.py +585 -0
  64. brainstate/transform/_autograd_test.py +1183 -0
  65. brainstate/transform/_control.py +665 -0
  66. brainstate/transform/_controls_test.py +220 -0
  67. brainstate/transform/_jit.py +239 -0
  68. brainstate/transform/_jit_error.py +158 -0
  69. brainstate/transform/_jit_test.py +102 -0
  70. brainstate/transform/_make_jaxpr.py +573 -0
  71. brainstate/transform/_make_jaxpr_test.py +133 -0
  72. brainstate/transform/_progress_bar.py +113 -0
  73. brainstate/typing.py +69 -0
  74. brainstate/util.py +747 -0
  75. brainstate-0.0.1.dist-info/LICENSE +202 -0
  76. brainstate-0.0.1.dist-info/METADATA +101 -0
  77. brainstate-0.0.1.dist-info/RECORD +79 -0
  78. brainstate-0.0.1.dist-info/WHEEL +6 -0
  79. brainstate-0.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,393 @@
1
+ # Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ # -*- coding: utf-8 -*-
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import Callable, Union
21
+
22
+ import jax.numpy as jnp
23
+
24
+ from ._base import ExplicitInOutSize
25
+ from ._connections import Linear
26
+ from .. import random, init, functional
27
+ from .._module import Module
28
+ from .._state import ShortTermState, ParamState
29
+ from ..mixin import DelayedInit, Mode
30
+ from ..typing import ArrayLike
31
+
32
+ __all__ = [
33
+ 'RNNCell', 'ValinaRNNCell', 'GRUCell', 'MGUCell', 'LSTMCell', 'URLSTMCell',
34
+ ]
35
+
36
+
37
+ class RNNCell(Module, ExplicitInOutSize, DelayedInit):
38
+ """
39
+ Base class for RNN cells.
40
+ """
41
+ pass
42
+
43
+
44
+ class ValinaRNNCell(RNNCell):
45
+ """
46
+ Vanilla RNN cell.
47
+
48
+ Args:
49
+ num_in: int. The number of input units.
50
+ num_out: int. The number of hidden units.
51
+ state_init: callable, ArrayLike. The state initializer.
52
+ w_init: callable, ArrayLike. The input weight initializer.
53
+ b_init: optional, callable, ArrayLike. The bias weight initializer.
54
+ activation: str, callable. The activation function. It can be a string or a callable function.
55
+ mode: optional, Mode. The mode of the module.
56
+ name: optional, str. The name of the module.
57
+ """
58
+ __module__ = 'brainstate.nn'
59
+
60
+ def __init__(
61
+ self,
62
+ num_in: int,
63
+ num_out: int,
64
+ state_init: Union[ArrayLike, Callable] = init.ZeroInit(),
65
+ w_init: Union[ArrayLike, Callable] = init.XavierNormal(),
66
+ b_init: Union[ArrayLike, Callable] = init.ZeroInit(),
67
+ activation: str | Callable = 'relu',
68
+ mode: Mode = None,
69
+ name: str = None,
70
+ ):
71
+ super().__init__(mode=mode, name=name)
72
+
73
+ # parameters
74
+ self._state_initializer = state_init
75
+ self.num_out = num_out
76
+ self.num_in = num_in
77
+ self.in_size = (num_in,)
78
+ self.out_size = (num_out,)
79
+
80
+ # activation function
81
+ if isinstance(activation, str):
82
+ self.activation = getattr(functional, activation)
83
+ else:
84
+ assert callable(activation), "The activation function should be a string or a callable function. "
85
+ self.activation = activation
86
+
87
+ # weights
88
+ self.W = Linear(num_in + num_out, num_out, w_init=w_init, b_init=b_init, name=self.name + '_W')
89
+
90
+ def init_state(self, batch_size: int = None, **kwargs):
91
+ self.h = ShortTermState(init.param(self._state_initializer, self.num_out, batch_size))
92
+
93
+ def update(self, x):
94
+ xh = jnp.concatenate([x, self.h.value], axis=-1)
95
+ h = self.W(xh)
96
+ self.h.value = self.activation(h)
97
+ return self.h.value
98
+
99
+
100
+ class GRUCell(RNNCell):
101
+ """
102
+ Gated Recurrent Unit (GRU) cell.
103
+
104
+ Args:
105
+ num_in: int. The number of input units.
106
+ num_out: int. The number of hidden units.
107
+ state_init: callable, ArrayLike. The state initializer.
108
+ w_init: callable, ArrayLike. The input weight initializer.
109
+ b_init: optional, callable, ArrayLike. The bias weight initializer.
110
+ activation: str, callable. The activation function. It can be a string or a callable function.
111
+ mode: optional, Mode. The mode of the module.
112
+ name: optional, str. The name of the module.
113
+ """
114
+ __module__ = 'brainstate.nn'
115
+
116
+ def __init__(
117
+ self,
118
+ num_in: int,
119
+ num_out: int,
120
+ w_init: Union[ArrayLike, Callable] = init.Orthogonal(),
121
+ b_init: Union[ArrayLike, Callable] = init.ZeroInit(),
122
+ state_init: Union[ArrayLike, Callable] = init.ZeroInit(),
123
+ activation: str | Callable = 'tanh',
124
+ mode: Mode = None,
125
+ name: str = None,
126
+ ):
127
+ super().__init__(mode=mode, name=name)
128
+
129
+ # parameters
130
+ self._state_initializer = state_init
131
+ self.num_out = num_out
132
+ self.num_in = num_in
133
+ self.in_size = (num_in,)
134
+ self.out_size = (num_out,)
135
+
136
+ # activation function
137
+ if isinstance(activation, str):
138
+ self.activation = getattr(functional, activation)
139
+ else:
140
+ assert callable(activation), "The activation function should be a string or a callable function. "
141
+ self.activation = activation
142
+
143
+ # weights
144
+ self.Wrz = Linear(num_in + num_out, num_out * 2, w_init=w_init, b_init=b_init, name=self.name + '_Wrz')
145
+ self.Wh = Linear(num_in + num_out, num_out, w_init=w_init, b_init=b_init, name=self.name + '_Wh')
146
+
147
+ def init_state(self, batch_size: int = None, **kwargs):
148
+ self.h = ShortTermState(init.param(self._state_initializer, [self.num_out], batch_size))
149
+
150
+ def update(self, x):
151
+ old_h = self.h.value
152
+ xh = jnp.concatenate([x, old_h], axis=-1)
153
+ r, z = jnp.split(functional.sigmoid(self.Wrz(xh)), indices_or_sections=2, axis=-1)
154
+ rh = r * old_h
155
+ h = self.activation(self.Wh(jnp.concatenate([x, rh], axis=-1)))
156
+ h = (1 - z) * old_h + z * h
157
+ self.h.value = h
158
+ return h
159
+
160
+
161
+ class MGUCell(RNNCell):
162
+ r"""
163
+ Minimal Gated Recurrent Unit (MGU) cell.
164
+
165
+ .. math::
166
+
167
+ \begin{aligned}
168
+ f_{t}&=\sigma (W_{f}x_{t}+U_{f}h_{t-1}+b_{f})\\
169
+ {\hat {h}}_{t}&=\phi (W_{h}x_{t}+U_{h}(f_{t}\odot h_{t-1})+b_{h})\\
170
+ h_{t}&=(1-f_{t})\odot h_{t-1}+f_{t}\odot {\hat {h}}_{t}
171
+ \end{aligned}
172
+
173
+ where:
174
+
175
+ - :math:`x_{t}`: input vector
176
+ - :math:`h_{t}`: output vector
177
+ - :math:`{\hat {h}}_{t}`: candidate activation vector
178
+ - :math:`f_{t}`: forget vector
179
+ - :math:`W, U, b`: parameter matrices and vector
180
+
181
+ Args:
182
+ num_in: int. The number of input units.
183
+ num_out: int. The number of hidden units.
184
+ state_init: callable, ArrayLike. The state initializer.
185
+ w_init: callable, ArrayLike. The input weight initializer.
186
+ b_init: optional, callable, ArrayLike. The bias weight initializer.
187
+ activation: str, callable. The activation function. It can be a string or a callable function.
188
+ mode: optional, Mode. The mode of the module.
189
+ name: optional, str. The name of the module.
190
+ """
191
+ __module__ = 'brainstate.nn'
192
+
193
+ def __init__(
194
+ self,
195
+ num_in: int,
196
+ num_out: int,
197
+ w_init: Union[ArrayLike, Callable] = init.Orthogonal(),
198
+ b_init: Union[ArrayLike, Callable] = init.ZeroInit(),
199
+ state_init: Union[ArrayLike, Callable] = init.ZeroInit(),
200
+ activation: str | Callable = 'tanh',
201
+ mode: Mode = None,
202
+ name: str = None,
203
+ ):
204
+ super().__init__(mode=mode, name=name)
205
+
206
+ # parameters
207
+ self._state_initializer = state_init
208
+ self.num_out = num_out
209
+ self.num_in = num_in
210
+ self.in_size = (num_in,)
211
+ self.out_size = (num_out,)
212
+
213
+ # activation function
214
+ if isinstance(activation, str):
215
+ self.activation = getattr(functional, activation)
216
+ else:
217
+ assert callable(activation), "The activation function should be a string or a callable function. "
218
+ self.activation = activation
219
+
220
+ # weights
221
+ self.Wf = Linear(num_in + num_out, num_out, w_init=w_init, b_init=b_init, name=self.name + '_Wf')
222
+ self.Wh = Linear(num_in + num_out, num_out, w_init=w_init, b_init=b_init, name=self.name + '_Wh')
223
+
224
+ def init_state(self, batch_size: int = None, **kwargs):
225
+ self.h = ShortTermState(init.param(self._state_initializer, [self.num_out], batch_size))
226
+
227
+ def update(self, x):
228
+ old_h = self.h.value
229
+ xh = jnp.concatenate([x, old_h], axis=-1)
230
+ f = functional.sigmoid(self.Wf(xh))
231
+ fh = f * old_h
232
+ h = self.activation(self.Wh(jnp.concatenate([x, fh], axis=-1)))
233
+ self.h.value = (1 - f) * self.h.value + f * h
234
+ return self.h.value
235
+
236
+
237
+ class LSTMCell(RNNCell):
238
+ r"""Long short-term memory (LSTM) RNN core.
239
+
240
+ The implementation is based on (zaremba, et al., 2014) [1]_. Given
241
+ :math:`x_t` and the previous state :math:`(h_{t-1}, c_{t-1})` the core
242
+ computes
243
+
244
+ .. math::
245
+
246
+ \begin{array}{ll}
247
+ i_t = \sigma(W_{ii} x_t + W_{hi} h_{t-1} + b_i) \\
248
+ f_t = \sigma(W_{if} x_t + W_{hf} h_{t-1} + b_f) \\
249
+ g_t = \tanh(W_{ig} x_t + W_{hg} h_{t-1} + b_g) \\
250
+ o_t = \sigma(W_{io} x_t + W_{ho} h_{t-1} + b_o) \\
251
+ c_t = f_t c_{t-1} + i_t g_t \\
252
+ h_t = o_t \tanh(c_t)
253
+ \end{array}
254
+
255
+ where :math:`i_t`, :math:`f_t`, :math:`o_t` are input, forget and
256
+ output gate activations, and :math:`g_t` is a vector of cell updates.
257
+
258
+ The output is equal to the new hidden, :math:`h_t`.
259
+
260
+ Notes
261
+ -----
262
+
263
+ Forget gate initialization: Following (Jozefowicz, et al., 2015) [2]_ we add 1.0
264
+ to :math:`b_f` after initialization in order to reduce the scale of forgetting in
265
+ the beginning of the training.
266
+
267
+
268
+ Parameters
269
+ ----------
270
+ num_in: int
271
+ The dimension of the input vector
272
+ num_out: int
273
+ The number of hidden unit in the node.
274
+ state_init: callable, ArrayLike
275
+ The state initializer.
276
+ w_init: callable, ArrayLike
277
+ The input weight initializer.
278
+ b_init: optional, callable, ArrayLike
279
+ The bias weight initializer.
280
+ activation: str, callable
281
+ The activation function. It can be a string or a callable function.
282
+
283
+ References
284
+ ----------
285
+
286
+ .. [1] Zaremba, Wojciech, Ilya Sutskever, and Oriol Vinyals. "Recurrent neural
287
+ network regularization." arXiv preprint arXiv:1409.2329 (2014).
288
+ .. [2] Jozefowicz, Rafal, Wojciech Zaremba, and Ilya Sutskever. "An empirical
289
+ exploration of recurrent network architectures." In International conference
290
+ on machine learning, pp. 2342-2350. PMLR, 2015.
291
+ """
292
+ __module__ = 'brainstate.nn'
293
+
294
+ def __init__(
295
+ self,
296
+ num_in: int,
297
+ num_out: int,
298
+ w_init: Union[ArrayLike, Callable] = init.XavierNormal(),
299
+ b_init: Union[ArrayLike, Callable] = init.ZeroInit(),
300
+ state_init: Union[ArrayLike, Callable] = init.ZeroInit(),
301
+ activation: str | Callable = 'tanh',
302
+ mode: Mode = None,
303
+ name: str = None,
304
+ ):
305
+ super().__init__(mode=mode, name=name)
306
+
307
+ # parameters
308
+ self.num_out = num_out
309
+ self.num_in = num_in
310
+ self.in_size = (num_in,)
311
+ self.out_size = (num_out,)
312
+
313
+ # initializers
314
+ self._state_initializer = state_init
315
+
316
+ # activation function
317
+ if isinstance(activation, str):
318
+ self.activation = getattr(functional, activation)
319
+ else:
320
+ assert callable(activation), "The activation function should be a string or a callable function. "
321
+ self.activation = activation
322
+
323
+ # weights
324
+ self.W = Linear(num_in + num_out, num_out * 4, w_init=w_init, b_init=b_init, name=self.name + '_W')
325
+
326
+ def init_state(self, batch_size: int = None, **kwargs):
327
+ self.c = ShortTermState(init.param(self._state_initializer, [self.num_out], batch_size))
328
+ self.h = ShortTermState(init.param(self._state_initializer, [self.num_out], batch_size))
329
+
330
+ def update(self, x):
331
+ h, c = self.h.value, self.c.value
332
+ xh = jnp.concat([x, h], axis=-1)
333
+ i, g, f, o = jnp.split(self.W(xh), indices_or_sections=4, axis=-1)
334
+ c = functional.sigmoid(f + 1.) * c + functional.sigmoid(i) * self.activation(g)
335
+ h = functional.sigmoid(o) * self.activation(c)
336
+ self.h.value = h
337
+ self.c.value = c
338
+ return h
339
+
340
+
341
+ class URLSTMCell(RNNCell):
342
+ def __init__(
343
+ self,
344
+ num_in: int,
345
+ num_out: int,
346
+ w_init: Union[ArrayLike, Callable] = init.XavierNormal(),
347
+ state_init: Union[ArrayLike, Callable] = init.ZeroInit(),
348
+ activation: str | Callable = 'tanh',
349
+ mode: Mode = None,
350
+ name: str = None,
351
+ ):
352
+ super().__init__(mode=mode, name=name)
353
+
354
+ # parameters
355
+ self.num_out = num_out
356
+ self.num_in = num_in
357
+ self.in_size = (num_in,)
358
+ self.out_size = (num_out,)
359
+
360
+ # initializers
361
+ self._state_initializer = state_init
362
+
363
+ # activation function
364
+ if isinstance(activation, str):
365
+ self.activation = getattr(functional, activation)
366
+ else:
367
+ assert callable(activation), "The activation function should be a string or a callable function. "
368
+ self.activation = activation
369
+
370
+ # weights
371
+ self.W = Linear(num_in + num_out, num_out * 4, w_init=w_init, b_init=None, name=self.name + '_Wg')
372
+ self.bias = ParamState(self._forget_bias())
373
+
374
+ def _forget_bias(self):
375
+ u = random.uniform(1 / self.num_out, 1 - 1 / self.num_out, (self.num_out,))
376
+ return -jnp.log(1 / u - 1)
377
+
378
+ def init_state(self, batch_size: int = None, **kwargs):
379
+ self.c = ShortTermState(init.param(self._state_initializer, [self.num_out], batch_size))
380
+ self.h = ShortTermState(init.param(self._state_initializer, [self.num_out], batch_size))
381
+
382
+ def update(self, x: ArrayLike) -> ArrayLike:
383
+ h, c = self.h.value, self.c.value
384
+ xh = jnp.concat([x, h], axis=-1)
385
+ f, r, u, o = jnp.split(self.W(xh), indices_or_sections=4, axis=-1)
386
+ f_ = functional.sigmoid(f + self.bias.value)
387
+ r_ = functional.sigmoid(r - self.bias.value)
388
+ g = 2 * r_ * f_ + (1 - 2 * r_) * f_ ** 2
389
+ next_cell = g * c + (1 - g) * self.activation(u)
390
+ next_hidden = functional.sigmoid(o) * self.activation(next_cell)
391
+ self.h.value = next_hidden
392
+ self.c.value = next_cell
393
+ return next_hidden
@@ -0,0 +1,130 @@
1
+ # Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ # -*- coding: utf-8 -*-
17
+
18
+ from __future__ import annotations
19
+
20
+ import numbers
21
+ from typing import Callable
22
+
23
+ import jax
24
+ import jax.numpy as jnp
25
+
26
+ from ._base import DnnLayer
27
+ from ._dynamics import Neuron
28
+ from ._misc import exp_euler_step
29
+ from .. import environ, init, surrogate
30
+ from .._state import ShortTermState, ParamState
31
+ from ..mixin import Mode
32
+ from ..typing import Size, ArrayLike, DTypeLike
33
+
34
+ __all__ = [
35
+ 'LeakyRateReadout',
36
+ 'LeakySpikeReadout',
37
+ ]
38
+
39
+
40
+ class LeakyRateReadout(DnnLayer):
41
+ """
42
+ Leaky dynamics for the read-out module used in the Real-Time Recurrent Learning.
43
+ """
44
+ __module__ = 'brainstate.nn'
45
+
46
+ def __init__(
47
+ self,
48
+ in_size: Size,
49
+ out_size: Size,
50
+ tau: ArrayLike = 5.,
51
+ w_init: Callable = init.KaimingNormal(),
52
+ mode: Mode = None,
53
+ name: str = None,
54
+ ):
55
+ super().__init__(mode=mode, name=name)
56
+
57
+ # parameters
58
+ self.in_size = (in_size,) if isinstance(in_size, numbers.Integral) else tuple(in_size)
59
+ self.out_size = (out_size,) if isinstance(out_size, numbers.Integral) else tuple(out_size)
60
+ self.tau = init.param(tau, self.in_size)
61
+ self.decay = jnp.exp(-environ.get_dt() / self.tau)
62
+
63
+ # weights
64
+ self.weight = ParamState(init.param(w_init, (self.in_size[0], self.out_size[0])))
65
+
66
+ def init_state(self, batch_size=None, **kwargs):
67
+ self.r = ShortTermState(init.param(init.Constant(0.), self.out_size, batch_size))
68
+
69
+ def update(self, x):
70
+ r = self.decay * self.r.value + x @ self.weight.value
71
+ self.r.value = r
72
+ return r
73
+
74
+
75
+ class LeakySpikeReadout(Neuron):
76
+ """
77
+ Integrate-and-fire neuron model.
78
+ """
79
+
80
+ __module__ = 'brainstate.nn'
81
+
82
+ def __init__(
83
+ self,
84
+ in_size: Size,
85
+ keep_size: bool = False,
86
+ tau: ArrayLike = 5.,
87
+ V_th: ArrayLike = 1.,
88
+ w_init: Callable = init.KaimingNormal(),
89
+ spk_fun: Callable = surrogate.ReluGrad(),
90
+ spk_dtype: DTypeLike = None,
91
+ spk_reset: str = 'soft',
92
+ mode: Mode = None,
93
+ name: str = None,
94
+ ):
95
+ super().__init__(in_size, keep_size=keep_size, name=name, mode=mode,
96
+ spk_fun=spk_fun, spk_dtype=spk_dtype, spk_reset=spk_reset)
97
+
98
+ # parameters
99
+ self.tau = init.param(tau, (self.num,))
100
+ self.V_th = init.param(V_th, (self.num,))
101
+
102
+ # weights
103
+ self.weight = ParamState(init.param(w_init, (self.in_size[0], self.out_size[0])))
104
+
105
+ def dv(self, v, t, x):
106
+ x = self.sum_current_inputs(v, init=x)
107
+ return (-v + x) / self.tau
108
+
109
+ def init_state(self, batch_size, **kwargs):
110
+ self.V = ShortTermState(init.param(init.Constant(0.), self.varshape, batch_size))
111
+
112
+ @property
113
+ def spike(self):
114
+ return self.get_spike(self.V.value)
115
+
116
+ def get_spike(self, V):
117
+ v_scaled = (V - self.V_th) / self.V_th
118
+ return self.spk_fun(v_scaled)
119
+
120
+ def update(self, x):
121
+ # reset
122
+ last_V = self.V.value
123
+ last_spike = self.get_spike(last_V)
124
+ V_th = self.V_th if self.spk_reset == 'soft' else jax.lax.stop_gradient(last_V)
125
+ V = last_V - V_th * last_spike
126
+ # membrane potential
127
+ V = exp_euler_step(self.dv, V, environ.get('t'), x @ self.weight.value)
128
+ V = V + self.sum_delta_inputs()
129
+ self.V.value = V
130
+ return self.get_spike(V)
@@ -0,0 +1,166 @@
1
+ # Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ # -*- coding: utf-8 -*-
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import Optional
21
+
22
+ import jax.numpy as jnp
23
+
24
+ from .._module import Module
25
+ from ..mixin import DelayedInit, BindCondData
26
+ from ..typing import ArrayLike
27
+
28
+ __all__ = [
29
+ 'SynOut', 'COBA', 'CUBA', 'MgBlock',
30
+ ]
31
+
32
+
33
+ class SynOut(Module, DelayedInit, BindCondData):
34
+ """
35
+ Base class for synaptic outputs.
36
+
37
+ :py:class:`~.SynOut` is also subclass of :py:class:`~.ParamDesc` and :py:class:`~.BindCondData`.
38
+ """
39
+
40
+ __module__ = 'brainstate.nn'
41
+
42
+ def __init__(self, name: Optional[str] = None):
43
+ super().__init__(name=name)
44
+ self._conductance = None
45
+
46
+ def __call__(self, *args, **kwargs):
47
+ if self._conductance is None:
48
+ raise ValueError(f'Please first pack conductance data at the current step using '
49
+ f'".{BindCondData.bind_cond.__name__}(data)". {self}')
50
+ ret = self.update(self._conductance, *args, **kwargs)
51
+ return ret
52
+
53
+
54
+ class COBA(SynOut):
55
+ r"""
56
+ Conductance-based synaptic output.
57
+
58
+ Given the synaptic conductance, the model output the post-synaptic current with
59
+
60
+ .. math::
61
+
62
+ I_{syn}(t) = g_{\mathrm{syn}}(t) (E - V(t))
63
+
64
+ Parameters
65
+ ----------
66
+ E: ArrayLike
67
+ The reversal potential.
68
+ name: str
69
+ The model name.
70
+
71
+ See Also
72
+ --------
73
+ CUBA
74
+ """
75
+ __module__ = 'brainstate.nn'
76
+
77
+ def __init__(self, E: ArrayLike, name: Optional[str] = None):
78
+ super().__init__(name=name)
79
+
80
+ self.E = E
81
+
82
+ def update(self, conductance, potential):
83
+ return conductance * (self.E - potential)
84
+
85
+
86
+ class CUBA(SynOut):
87
+ r"""Current-based synaptic output.
88
+
89
+ Given the conductance, this model outputs the post-synaptic current with a identity function:
90
+
91
+ .. math::
92
+
93
+ I_{\mathrm{syn}}(t) = g_{\mathrm{syn}}(t)
94
+
95
+ Parameters
96
+ ----------
97
+ name: str
98
+ The model name.
99
+
100
+ See Also
101
+ --------
102
+ COBA
103
+ """
104
+ __module__ = 'brainstate.nn'
105
+
106
+ def __init__(self, name: Optional[str] = None, ):
107
+ super().__init__(name=name)
108
+
109
+ def update(self, conductance, potential=None):
110
+ return conductance
111
+
112
+
113
+ class MgBlock(SynOut):
114
+ r"""Synaptic output based on Magnesium blocking.
115
+
116
+ Given the synaptic conductance, the model output the post-synaptic current with
117
+
118
+ .. math::
119
+
120
+ I_{syn}(t) = g_{\mathrm{syn}}(t) (E - V(t)) g_{\infty}(V,[{Mg}^{2+}]_{o})
121
+
122
+ where The fraction of channels :math:`g_{\infty}` that are not blocked by magnesium can be fitted to
123
+
124
+ .. math::
125
+
126
+ g_{\infty}(V,[{Mg}^{2+}]_{o}) = (1+{e}^{-\alpha V} \frac{[{Mg}^{2+}]_{o}} {\beta})^{-1}
127
+
128
+ Here :math:`[{Mg}^{2+}]_{o}` is the extracellular magnesium concentration.
129
+
130
+ Parameters
131
+ ----------
132
+ E: ArrayLike
133
+ The reversal potential for the synaptic current. [mV]
134
+ alpha: ArrayLike
135
+ Binding constant. Default 0.062
136
+ beta: ArrayLike
137
+ Unbinding constant. Default 3.57
138
+ cc_Mg: ArrayLike
139
+ Concentration of Magnesium ion. Default 1.2 [mM].
140
+ V_offset: ArrayLike
141
+ The offset potential. Default 0. [mV]
142
+ name: str
143
+ The model name.
144
+ """
145
+ __module__ = 'brainstate.nn'
146
+
147
+ def __init__(
148
+ self,
149
+ E: ArrayLike = 0.,
150
+ cc_Mg: ArrayLike = 1.2,
151
+ alpha: ArrayLike = 0.062,
152
+ beta: ArrayLike = 3.57,
153
+ V_offset: ArrayLike = 0.,
154
+ name: Optional[str] = None,
155
+ ):
156
+ super().__init__(name=name)
157
+
158
+ self.E = E
159
+ self.V_offset = V_offset
160
+ self.cc_Mg = cc_Mg
161
+ self.alpha = alpha
162
+ self.beta = beta
163
+
164
+ def update(self, conductance, potential):
165
+ norm = (1 + self.cc_Mg / self.beta * jnp.exp(self.alpha * (self.V_offset - potential)))
166
+ return conductance * (self.E - potential) / norm
@@ -0,0 +1,25 @@
1
+ # Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+
17
+ from ._activations import *
18
+ from ._activations import __all__ as __activations_all__
19
+ from ._normalization import *
20
+ from ._normalization import __all__ as __others_all__
21
+ from ._spikes import *
22
+ from ._spikes import __all__ as __spikes_all__
23
+
24
+ __all__ = __spikes_all__ + __others_all__ + __activations_all__
25
+