brainstate 0.1.10__py2.py3-none-any.whl → 0.2.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (163) hide show
  1. brainstate/__init__.py +169 -58
  2. brainstate/_compatible_import.py +340 -148
  3. brainstate/_compatible_import_test.py +681 -0
  4. brainstate/_deprecation.py +210 -0
  5. brainstate/_deprecation_test.py +2319 -0
  6. brainstate/{util/error.py → _error.py} +45 -55
  7. brainstate/_state.py +1652 -1605
  8. brainstate/_state_test.py +52 -52
  9. brainstate/_utils.py +47 -47
  10. brainstate/environ.py +1495 -563
  11. brainstate/environ_test.py +1223 -62
  12. brainstate/graph/__init__.py +22 -29
  13. brainstate/graph/_node.py +240 -0
  14. brainstate/graph/_node_test.py +589 -0
  15. brainstate/graph/{_graph_operation.py → _operation.py} +1624 -1738
  16. brainstate/graph/_operation_test.py +1147 -0
  17. brainstate/mixin.py +1433 -365
  18. brainstate/mixin_test.py +1017 -77
  19. brainstate/nn/__init__.py +137 -135
  20. brainstate/nn/_activations.py +1100 -808
  21. brainstate/nn/_activations_test.py +354 -331
  22. brainstate/nn/_collective_ops.py +633 -514
  23. brainstate/nn/_collective_ops_test.py +774 -43
  24. brainstate/nn/_common.py +226 -178
  25. brainstate/nn/_common_test.py +154 -0
  26. brainstate/nn/_conv.py +2010 -501
  27. brainstate/nn/_conv_test.py +849 -238
  28. brainstate/nn/_delay.py +575 -588
  29. brainstate/nn/_delay_test.py +243 -238
  30. brainstate/nn/_dropout.py +618 -426
  31. brainstate/nn/_dropout_test.py +477 -100
  32. brainstate/nn/_dynamics.py +1267 -1343
  33. brainstate/nn/_dynamics_test.py +67 -78
  34. brainstate/nn/_elementwise.py +1298 -1119
  35. brainstate/nn/_elementwise_test.py +830 -169
  36. brainstate/nn/_embedding.py +408 -58
  37. brainstate/nn/_embedding_test.py +156 -0
  38. brainstate/nn/{_fixedprob.py → _event_fixedprob.py} +233 -239
  39. brainstate/nn/{_fixedprob_test.py → _event_fixedprob_test.py} +115 -114
  40. brainstate/nn/{_linear_mv.py → _event_linear.py} +83 -83
  41. brainstate/nn/{_linear_mv_test.py → _event_linear_test.py} +121 -120
  42. brainstate/nn/_exp_euler.py +254 -92
  43. brainstate/nn/_exp_euler_test.py +377 -35
  44. brainstate/nn/_linear.py +744 -424
  45. brainstate/nn/_linear_test.py +475 -107
  46. brainstate/nn/_metrics.py +1070 -0
  47. brainstate/nn/_metrics_test.py +611 -0
  48. brainstate/nn/_module.py +384 -377
  49. brainstate/nn/_module_test.py +40 -40
  50. brainstate/nn/_normalizations.py +1334 -975
  51. brainstate/nn/_normalizations_test.py +699 -73
  52. brainstate/nn/_paddings.py +1020 -0
  53. brainstate/nn/_paddings_test.py +723 -0
  54. brainstate/nn/_poolings.py +2239 -1177
  55. brainstate/nn/_poolings_test.py +953 -217
  56. brainstate/nn/{_rate_rnns.py → _rnns.py} +946 -554
  57. brainstate/nn/_rnns_test.py +593 -0
  58. brainstate/nn/_utils.py +216 -89
  59. brainstate/nn/_utils_test.py +402 -0
  60. brainstate/{init/_random_inits.py → nn/init.py} +809 -553
  61. brainstate/{init/_random_inits_test.py → nn/init_test.py} +180 -149
  62. brainstate/random/__init__.py +270 -24
  63. brainstate/random/_rand_funs.py +3938 -3616
  64. brainstate/random/_rand_funs_test.py +640 -567
  65. brainstate/random/_rand_seed.py +675 -210
  66. brainstate/random/_rand_seed_test.py +48 -48
  67. brainstate/random/_rand_state.py +1617 -1409
  68. brainstate/random/_rand_state_test.py +551 -0
  69. brainstate/transform/__init__.py +59 -0
  70. brainstate/transform/_ad_checkpoint.py +176 -0
  71. brainstate/{compile → transform}/_ad_checkpoint_test.py +49 -49
  72. brainstate/{augment → transform}/_autograd.py +1025 -778
  73. brainstate/{augment → transform}/_autograd_test.py +1289 -1289
  74. brainstate/transform/_conditions.py +316 -0
  75. brainstate/{compile → transform}/_conditions_test.py +220 -220
  76. brainstate/{compile → transform}/_error_if.py +94 -92
  77. brainstate/{compile → transform}/_error_if_test.py +52 -52
  78. brainstate/transform/_eval_shape.py +145 -0
  79. brainstate/{augment → transform}/_eval_shape_test.py +38 -38
  80. brainstate/{compile → transform}/_jit.py +399 -346
  81. brainstate/{compile → transform}/_jit_test.py +143 -143
  82. brainstate/{compile → transform}/_loop_collect_return.py +675 -536
  83. brainstate/{compile → transform}/_loop_collect_return_test.py +58 -58
  84. brainstate/{compile → transform}/_loop_no_collection.py +283 -184
  85. brainstate/{compile → transform}/_loop_no_collection_test.py +50 -50
  86. brainstate/transform/_make_jaxpr.py +2016 -0
  87. brainstate/transform/_make_jaxpr_test.py +1510 -0
  88. brainstate/transform/_mapping.py +529 -0
  89. brainstate/transform/_mapping_test.py +194 -0
  90. brainstate/{compile → transform}/_progress_bar.py +255 -202
  91. brainstate/{augment → transform}/_random.py +171 -151
  92. brainstate/{compile → transform}/_unvmap.py +256 -159
  93. brainstate/transform/_util.py +286 -0
  94. brainstate/typing.py +837 -304
  95. brainstate/typing_test.py +780 -0
  96. brainstate/util/__init__.py +27 -50
  97. brainstate/util/_others.py +1025 -0
  98. brainstate/util/_others_test.py +962 -0
  99. brainstate/util/_pretty_pytree.py +1301 -0
  100. brainstate/util/_pretty_pytree_test.py +675 -0
  101. brainstate/util/{pretty_repr.py → _pretty_repr.py} +462 -328
  102. brainstate/util/_pretty_repr_test.py +696 -0
  103. brainstate/util/filter.py +945 -469
  104. brainstate/util/filter_test.py +912 -0
  105. brainstate/util/struct.py +910 -523
  106. brainstate/util/struct_test.py +602 -0
  107. {brainstate-0.1.10.dist-info → brainstate-0.2.1.dist-info}/METADATA +108 -91
  108. brainstate-0.2.1.dist-info/RECORD +111 -0
  109. {brainstate-0.1.10.dist-info → brainstate-0.2.1.dist-info}/licenses/LICENSE +202 -202
  110. brainstate/augment/__init__.py +0 -30
  111. brainstate/augment/_eval_shape.py +0 -99
  112. brainstate/augment/_mapping.py +0 -1060
  113. brainstate/augment/_mapping_test.py +0 -597
  114. brainstate/compile/__init__.py +0 -38
  115. brainstate/compile/_ad_checkpoint.py +0 -204
  116. brainstate/compile/_conditions.py +0 -256
  117. brainstate/compile/_make_jaxpr.py +0 -888
  118. brainstate/compile/_make_jaxpr_test.py +0 -156
  119. brainstate/compile/_util.py +0 -147
  120. brainstate/functional/__init__.py +0 -27
  121. brainstate/graph/_graph_node.py +0 -244
  122. brainstate/graph/_graph_node_test.py +0 -73
  123. brainstate/graph/_graph_operation_test.py +0 -563
  124. brainstate/init/__init__.py +0 -26
  125. brainstate/init/_base.py +0 -52
  126. brainstate/init/_generic.py +0 -244
  127. brainstate/init/_regular_inits.py +0 -105
  128. brainstate/init/_regular_inits_test.py +0 -50
  129. brainstate/nn/_inputs.py +0 -608
  130. brainstate/nn/_ltp.py +0 -28
  131. brainstate/nn/_neuron.py +0 -705
  132. brainstate/nn/_neuron_test.py +0 -161
  133. brainstate/nn/_others.py +0 -46
  134. brainstate/nn/_projection.py +0 -486
  135. brainstate/nn/_rate_rnns_test.py +0 -63
  136. brainstate/nn/_readout.py +0 -209
  137. brainstate/nn/_readout_test.py +0 -53
  138. brainstate/nn/_stp.py +0 -236
  139. brainstate/nn/_synapse.py +0 -505
  140. brainstate/nn/_synapse_test.py +0 -131
  141. brainstate/nn/_synaptic_projection.py +0 -423
  142. brainstate/nn/_synouts.py +0 -162
  143. brainstate/nn/_synouts_test.py +0 -57
  144. brainstate/nn/metrics.py +0 -388
  145. brainstate/optim/__init__.py +0 -38
  146. brainstate/optim/_base.py +0 -64
  147. brainstate/optim/_lr_scheduler.py +0 -448
  148. brainstate/optim/_lr_scheduler_test.py +0 -50
  149. brainstate/optim/_optax_optimizer.py +0 -152
  150. brainstate/optim/_optax_optimizer_test.py +0 -53
  151. brainstate/optim/_sgd_optimizer.py +0 -1104
  152. brainstate/random/_random_for_unit.py +0 -52
  153. brainstate/surrogate.py +0 -1957
  154. brainstate/transform.py +0 -23
  155. brainstate/util/caller.py +0 -98
  156. brainstate/util/others.py +0 -540
  157. brainstate/util/pretty_pytree.py +0 -945
  158. brainstate/util/pretty_pytree_test.py +0 -159
  159. brainstate/util/pretty_table.py +0 -2954
  160. brainstate/util/scaling.py +0 -258
  161. brainstate-0.1.10.dist-info/RECORD +0 -130
  162. {brainstate-0.1.10.dist-info → brainstate-0.2.1.dist-info}/WHEEL +0 -0
  163. {brainstate-0.1.10.dist-info → brainstate-0.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,529 @@
1
+ # Copyright 2024 BrainX Ecosystem Limited. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ import functools
17
+ from typing import (
18
+ Any,
19
+ TypeVar,
20
+ Callable,
21
+ Hashable,
22
+ Sequence,
23
+ Iterable,
24
+ Tuple,
25
+ Union,
26
+ Optional,
27
+ Dict
28
+ )
29
+
30
+ import jax
31
+
32
+ from brainstate._compatible_import import Device
33
+ from brainstate._state import catch_new_states
34
+ from brainstate._utils import set_module_as
35
+ from brainstate.typing import Missing, Filter
36
+ from brainstate.util import NestedDict
37
+ from ._loop_collect_return import scan
38
+ from ._make_jaxpr import StatefulMapping
39
+
40
+ __all__ = [
41
+ 'vmap',
42
+ 'pmap',
43
+ 'map',
44
+ 'vmap_new_states',
45
+ ]
46
+
47
+ F = TypeVar("F", bound=Callable)
48
+ AxisName = Hashable
49
+
50
+
51
+ @set_module_as('brainstate.transform')
52
+ def vmap(
53
+ fn: F | Missing = Missing(),
54
+ *,
55
+ # --- normal jax.vmap arguments --- #
56
+ in_axes: int | None | Sequence[Any] = 0,
57
+ out_axes: Any = 0,
58
+ axis_name: AxisName | None = None,
59
+ axis_size: int | None = None,
60
+ spmd_axis_name: AxisName | tuple[AxisName, ...] | None = None,
61
+ # --- brainstate specific arguments --- #
62
+ state_in_axes: Union[Dict[AxisName, Filter], Filter] = None,
63
+ state_out_axes: Union[Dict[AxisName, Filter], Filter] = None,
64
+ ) -> StatefulMapping | Callable[[F], StatefulMapping]:
65
+ """
66
+ Vectorizing map. Creates a function which maps ``fun`` over argument axes.
67
+
68
+ The transformation :func:`vmap` is designed to work with ``pygraph`` structure
69
+ defined in the ``brainstate`` library. It is used to vectorize functions by
70
+ pushing the mapped axis down into primitive operations.
71
+
72
+ More information please see `jax.vmap <https://jax.readthedocs.io/en/latest/_autosummary/jax.vmap.html>`__.
73
+
74
+ These are several example usage::
75
+
76
+ >>> import brainstate as brainstate
77
+ >>> import jax.numpy as jnp
78
+
79
+ >>> class Model(brainstate.nn.Module):
80
+ >>> def __init__(self):
81
+ >>> super().__init__()
82
+ >>>
83
+ >>> self.a = brainstate.ShortTermState(brainstate.random.randn(5))
84
+ >>> self.b = brainstate.ShortTermState(brainstate.random.randn(5))
85
+ >>> self.c = brainstate.State(brainstate.random.randn(1))
86
+
87
+ >>> def __call__(self, *args, **kwargs):
88
+ >>> self.c.value = self.a.value * self.b.value
89
+ >>> return self.c.value + 1.
90
+
91
+ >>> model = Model()
92
+
93
+ >>> r = brainstate.transform.vmap(
94
+ >>> model,
95
+ >>> in_states=model.states(brainstate.ShortTermState),
96
+ >>> out_states=model.c
97
+ >>> )()
98
+
99
+ Parameters
100
+ ----------
101
+ fn : callable, optional
102
+ Function to be mapped over additional axes.
103
+ in_axes : int, None, or sequence, default 0
104
+ An integer, None, or sequence of values specifying which input
105
+ array axes to map over.
106
+ out_axes : int, None, or sequence, default 0
107
+ An integer, None, or (nested) standard Python container
108
+ (tuple/list/dict) thereof indicating where the mapped axis should appear
109
+ in the output.
110
+ axis_name : hashable, optional
111
+ A hashable Python object used to identify the mapped
112
+ axis so that parallel collectives can be applied.
113
+ axis_size : int, optional
114
+ An integer indicating the size of the axis to be
115
+ mapped. If not provided, the mapped axis size is inferred from arguments.
116
+ spmd_axis_name : hashable or tuple of hashable, optional
117
+ A hashable Python object or tuple of hashable
118
+ Python objects used to identify the mapped axis so that parallel collectives
119
+ can be applied. This is used to specify multiple axes to be mapped over
120
+ in a nested :func:`vmap` call. The length of the tuple must match the
121
+ number of nested :func:`vmap` calls. The first element of the tuple
122
+ corresponds to the outermost :func:`vmap` call, the second element to
123
+ the next outermost, and so on. If the tuple is not provided, the
124
+ ``axis_name`` is used for all nested :func:`vmap` calls.
125
+ in_states : dict or State objects, optional
126
+ The :class:`State` objects to be mapped over in the inputs.
127
+ out_states : dict or State objects, optional
128
+ The :class:`State` objects to be mapped over in the outputs.
129
+
130
+ Returns
131
+ -------
132
+ callable
133
+ Batched/vectorized version of ``fun`` with arguments that correspond to
134
+ those of ``fun``, but with extra array axes at positions indicated by
135
+ ``in_axes``, and a return value that corresponds to that of ``fun``, but
136
+ with extra array axes at positions indicated by ``out_axes``.
137
+
138
+ """
139
+
140
+ if isinstance(fn, Missing):
141
+ return functools.partial(
142
+ vmap,
143
+ in_axes=in_axes,
144
+ out_axes=out_axes,
145
+ state_in_axes=state_in_axes,
146
+ state_out_axes=state_out_axes,
147
+ axis_name=axis_name,
148
+ axis_size=axis_size,
149
+ spmd_axis_name=spmd_axis_name,
150
+ ) # type: ignore[return-value]
151
+
152
+ return StatefulMapping(
153
+ fn,
154
+ in_axes=in_axes,
155
+ out_axes=out_axes,
156
+ state_in_axes=state_in_axes,
157
+ state_out_axes=state_out_axes,
158
+ axis_name=axis_name,
159
+ axis_size=axis_size,
160
+ mapping_fn=functools.partial(jax.vmap, spmd_axis_name=spmd_axis_name)
161
+ )
162
+
163
+
164
+ @set_module_as('brainstate.transform')
165
+ def pmap(
166
+ fn: Callable[[NestedDict, ...], Any] | Missing = Missing(),
167
+ axis_name: Optional[AxisName] = None,
168
+ *,
169
+ in_axes: Any = 0,
170
+ out_axes: Any = 0,
171
+ static_broadcasted_argnums: int | Iterable[int] = (),
172
+ devices: Optional[Sequence[Device]] = None, # noqa: F811
173
+ backend: Optional[str] = None,
174
+ axis_size: Optional[int] = None,
175
+ donate_argnums: int | Iterable[int] = (),
176
+ global_arg_shapes: Optional[Tuple[Tuple[int, ...], ...]] = None,
177
+ # --- brainstate specific arguments --- #
178
+ state_in_axes: Union[Dict[AxisName, Filter], Filter] = None,
179
+ state_out_axes: Union[Dict[AxisName, Filter], Filter] = None,
180
+ ) -> Callable[[F], F] | F:
181
+ """
182
+ Parallel map with support for collective operations.
183
+
184
+ The purpose of :py:func:`pmap` is to express single-program multiple-data
185
+ (SPMD) programs. Applying :py:func:`pmap` to a function will compile the
186
+ function with XLA (similarly to :py:func:`jit`), then execute it in parallel
187
+ on XLA devices, such as multiple GPUs or multiple TPU cores. Semantically it
188
+ is comparable to :py:func:`vmap` because both transformations map a function
189
+ over array axes, but where :py:func:`vmap` vectorizes functions by pushing the
190
+ mapped axis down into primitive operations, :py:func:`pmap` instead replicates
191
+ the function and executes each replica on its own XLA device in parallel.
192
+
193
+ The mapped axis size must be less than or equal to the number of local XLA
194
+ devices available, as returned by :py:func:`jax.local_device_count()` (unless
195
+ ``devices`` is specified, see below). For nested :py:func:`pmap` calls, the
196
+ product of the mapped axis sizes must be less than or equal to the number of
197
+ XLA devices.
198
+
199
+ More information please see `jax.vmap <https://jax.readthedocs.io/en/latest/_autosummary/jax.vmap.html>`__.
200
+
201
+
202
+ Args:
203
+ fn: Function to be mapped over argument axes. Its arguments and return
204
+ value should be arrays, scalars, or (nested) standard Python containers
205
+ (tuple/list/dict) thereof. Positional arguments indicated by
206
+ ``static_broadcasted_argnums`` can be anything at all, provided they are
207
+ hashable and have an equality operation defined.
208
+ axis_name: Optional, a hashable Python object used to identify the mapped
209
+ axis so that parallel collectives can be applied.
210
+ in_axes: A non-negative integer, None, or nested Python container thereof
211
+ that specifies which axes of positional arguments to map over. Arguments
212
+ passed as keywords are always mapped over their leading axis (i.e. axis
213
+ index 0). See :py:func:`vmap` for details.
214
+ out_axes: A non-negative integer, None, or nested Python container thereof
215
+ indicating where the mapped axis should appear in the output. All outputs
216
+ with a mapped axis must have a non-None ``out_axes`` specification
217
+ (see :py:func:`vmap`).
218
+ static_broadcasted_argnums: An int or collection of ints specifying which
219
+ positional arguments to treat as static (compile-time constant).
220
+ Operations that only depend on static arguments will be constant-folded.
221
+ Calling the pmapped function with different values for these constants
222
+ will trigger recompilation. If the pmapped function is called with fewer
223
+ positional arguments than indicated by ``static_broadcasted_argnums`` then
224
+ an error is raised. Each of the static arguments will be broadcasted to
225
+ all devices. Arguments that are not arrays or containers thereof must be
226
+ marked as static. Defaults to ().
227
+
228
+ Static arguments must be hashable, meaning both ``__hash__`` and
229
+ ``__eq__`` are implemented, and should be immutable.
230
+
231
+ devices: This is an experimental feature and the API is likely to change.
232
+ Optional, a sequence of Devices to map over. (Available devices can be
233
+ retrieved via jax.devices()). Must be given identically for each process
234
+ in multi-process settings (and will therefore include devices across
235
+ processes). If specified, the size of the mapped axis must be equal to
236
+ the number of devices in the sequence local to the given process. Nested
237
+ :py:func:`pmap` s with ``devices`` specified in either the inner or outer
238
+ :py:func:`pmap` are not yet supported.
239
+ backend: This is an experimental feature and the API is likely to change.
240
+ Optional, a string representing the XLA backend. 'cpu', 'gpu', or 'tpu'.
241
+ axis_size: Optional; the size of the mapped axis.
242
+ donate_argnums: Specify which positional argument buffers are "donated" to
243
+ the computation. It is safe to donate argument buffers if you no longer need
244
+ them once the computation has finished. In some cases XLA can make use of
245
+ donated buffers to reduce the amount of memory needed to perform a
246
+ computation, for example recycling one of your input buffers to store a
247
+ result. You should not reuse buffers that you donate to a computation, JAX
248
+ will raise an error if you try to.
249
+ Note that donate_argnums only work for positional arguments, and keyword
250
+ arguments will not be donated.
251
+
252
+ For more details on buffer donation see the
253
+ `FAQ <https://jax.readthedocs.io/en/latest/faq.html#buffer-donation>`_.
254
+ global_arg_shapes: Optional; a tuple of tuples of integers representing the
255
+ shapes of the global arguments. These are arguments that are not replicated
256
+ across devices, but are broadcasted to all devices. The tuple should have
257
+ the same length as the number of global arguments, and each inner tuple
258
+ should have the same length as the corresponding argument. The shapes of
259
+ the global arguments must be the same on all devices.
260
+ rngs: Optional, a random number generator or sequence of random number
261
+ generators to be used in the mapped function. These random number
262
+ generators are restored their random key after the mapped function is
263
+ executed.
264
+
265
+ Returns:
266
+ A parallelized version of ``fun`` with arguments that correspond to those of
267
+ ``fun`` but with extra array axes at positions indicated by ``in_axes`` and
268
+ with output that has an additional leading array axis (with the same size).
269
+
270
+ """
271
+
272
+ if isinstance(fn, Missing):
273
+ return functools.partial(
274
+ pmap,
275
+ axis_name=axis_name,
276
+ in_axes=in_axes,
277
+ out_axes=out_axes,
278
+ static_broadcasted_argnums=static_broadcasted_argnums,
279
+ devices=devices,
280
+ backend=backend,
281
+ axis_size=axis_size,
282
+ donate_argnums=donate_argnums,
283
+ global_arg_shapes=global_arg_shapes,
284
+ ) # type: ignore[return-value]
285
+
286
+ return StatefulMapping(
287
+ fn,
288
+ in_axes=in_axes,
289
+ out_axes=out_axes,
290
+ state_in_axes=state_in_axes,
291
+ state_out_axes=state_out_axes,
292
+ axis_name=axis_name,
293
+ axis_size=axis_size,
294
+ mapping_fn=functools.partial(
295
+ jax.pmap,
296
+ static_broadcasted_argnums=static_broadcasted_argnums,
297
+ devices=devices,
298
+ backend=backend,
299
+ donate_argnums=donate_argnums,
300
+ global_arg_shapes=global_arg_shapes,
301
+ ),
302
+ )
303
+
304
+
305
+ def _batch_and_remainder(x, batch_size: int):
306
+ leaves, tree_def = jax.tree.flatten(x)
307
+
308
+ scan_leaves = []
309
+ remainder_leaves = []
310
+
311
+ length = None
312
+ for leaf in leaves:
313
+ if length is None:
314
+ length = leaf.shape[0]
315
+ if length != leaf.shape[0]:
316
+ raise ValueError(f"All inputs must have the same length. Got {length} and {leaf.shape[0]}.")
317
+
318
+ num_batches, num_remainder = divmod(length, batch_size)
319
+ for leaf in leaves:
320
+ total_batch_elems = num_batches * batch_size
321
+ scan_leaves.append(leaf[:total_batch_elems].reshape(num_batches, batch_size, *leaf.shape[1:]))
322
+ if num_remainder:
323
+ remainder_leaves.append(leaf[total_batch_elems:])
324
+
325
+ scan_tree = tree_def.unflatten(scan_leaves)
326
+ if num_remainder:
327
+ remainder_tree = tree_def.unflatten(remainder_leaves)
328
+ return scan_tree, remainder_tree
329
+ else:
330
+ return scan_tree, None
331
+
332
+
333
+ @set_module_as('brainstate.transform')
334
+ def map(
335
+ f,
336
+ *xs,
337
+ batch_size: int | None = None,
338
+ ):
339
+ """
340
+ Map a function over leading array axes.
341
+
342
+ Like Python's builtin map, except inputs and outputs are in the form of
343
+ stacked arrays. Consider using the :func:`~jax.vmap` transform instead, unless you
344
+ need to apply a function element by element for reduced memory usage or
345
+ heterogeneous computation with other control flow primitives.
346
+
347
+ When ``xs`` is an array type, the semantics of :func:`~map` are given by this
348
+ Python implementation::
349
+
350
+ def map(f, *xs):
351
+ return np.stack([f(*x) for x in xs])
352
+
353
+ Like :func:`~scan`, :func:`~map` is implemented in terms of JAX primitives so
354
+ many of the same advantages over a Python loop apply: ``xs`` may be an
355
+ arbitrary nested pytree type, and the mapped computation is compiled only
356
+ once.
357
+
358
+ If ``batch_size`` is provided, the computation is executed in batches of that size
359
+ and parallelized using :func:`~jax.vmap`. This can be used as either a more performant
360
+ version of ``map`` or as a memory-efficient version of ``vmap``. If the axis is not
361
+ divisible by the batch size, the remainder is processed in a separate ``vmap`` and
362
+ concatenated to the result.
363
+
364
+ >>> import jax.numpy as jnp
365
+ >>> x = jnp.ones((10, 3, 4))
366
+ >>> def f(x):
367
+ ... print('inner shape:', x.shape)
368
+ ... return x + 1
369
+ >>> y = map(f, x, batch_size=3)
370
+ inner shape: (3, 4)
371
+ inner shape: (3, 4)
372
+ >>> y.shape
373
+ (10, 3, 4)
374
+
375
+ In the example above, "inner shape" is printed twice, once while tracing the batched
376
+ computation and once while tracing the remainder computation.
377
+
378
+ Args:
379
+ f: a Python function to apply element-wise over the first axis or axes of
380
+ ``xs``.
381
+ xs: values over which to map along the leading axis.
382
+ batch_size: (optional) integer specifying the size of the batch for each step to execute
383
+ in parallel.
384
+
385
+ Returns:
386
+ Mapped values.
387
+ """
388
+ if batch_size is not None:
389
+ scan_xs, remainder_xs = _batch_and_remainder(xs, batch_size)
390
+ g = lambda _, x: ((), vmap(f)(*x))
391
+ _, scan_ys = scan(g, (), scan_xs)
392
+ if remainder_xs is None:
393
+ ys = jax.tree.map(lambda x: _flatten(x), scan_ys)
394
+ else:
395
+ remainder_ys = vmap(f)(*remainder_xs)
396
+ ys = jax.tree.map(
397
+ lambda x, y: jax.lax.concatenate([_flatten(x), y], dimension=0),
398
+ scan_ys,
399
+ remainder_ys,
400
+ )
401
+ else:
402
+ g = lambda _, x: ((), f(*x))
403
+ _, ys = scan(g, (), xs)
404
+ return ys
405
+
406
+
407
+ def _flatten(x):
408
+ return x.reshape(-1, *x.shape[2:])
409
+
410
+
411
+ def _vmap_new_states_transform(
412
+ fun: Callable[..., Any],
413
+ *,
414
+ # -- normal jax.vmap arguments -- #
415
+ in_axes: int | None | Sequence[Any] = 0,
416
+ out_axes: Any = 0,
417
+ axis_name: AxisName | None = None,
418
+ axis_size: int | None = None,
419
+ spmd_axis_name: AxisName | tuple[AxisName, ...] | None = None,
420
+ # -- brainstate specific arguments -- #
421
+ state_tag: str | None = None,
422
+ state_to_exclude: Filter | None = None,
423
+ state_in_axes: Union[Dict[AxisName, Filter], Filter] = None,
424
+ state_out_axes: Union[Dict[AxisName, Filter], Filter] = None,
425
+ ):
426
+ # TODO: How about nested call ``vmap_new_states``?
427
+ if isinstance(axis_size, int) and axis_size <= 0:
428
+ raise ValueError(f"axis_size must be greater than 0, got {axis_size}.")
429
+
430
+ @vmap(
431
+ in_axes=in_axes,
432
+ out_axes=out_axes,
433
+ axis_name=axis_name,
434
+ axis_size=axis_size,
435
+ spmd_axis_name=spmd_axis_name,
436
+ state_in_axes=state_in_axes,
437
+ state_out_axes=state_out_axes,
438
+ )
439
+ def new_fun(args):
440
+ # call the function
441
+ with catch_new_states(state_tag=state_tag, state_to_exclude=state_to_exclude) as catcher:
442
+ out = fun(*args)
443
+
444
+ # get vmap state values
445
+ vmap_state_vals = catcher.get_state_values()
446
+
447
+ return out, vmap_state_vals
448
+
449
+ @functools.wraps(fun)
450
+ def vmapped_fn(*args):
451
+ # vmapping
452
+ with catch_new_states(state_to_exclude=state_to_exclude) as catcher:
453
+ outs, vmap_state_vals = new_fun(args)
454
+ vmap_states = catcher.get_states()
455
+
456
+ # restore vmapped state values
457
+ for st_val, st in zip(vmap_state_vals, vmap_states):
458
+ st.restore_value(st_val)
459
+ # ------------------------------------------------
460
+ # --- this is CRUCIAL to avoid jax tracing leakage
461
+ # ------------------------------------------------
462
+ st.decrease_stack_level()
463
+ return outs
464
+
465
+ return vmapped_fn
466
+
467
+
468
+ @set_module_as('brainstate.transform')
469
+ def vmap_new_states(
470
+ fun: Callable = Missing(),
471
+ *,
472
+ # -- normal jax.vmap arguments -- #
473
+ in_axes: int | None | Sequence[Any] = 0,
474
+ out_axes: Any = 0,
475
+ axis_name: AxisName | None = None,
476
+ axis_size: int | None = None,
477
+ spmd_axis_name: AxisName | tuple[AxisName, ...] | None = None,
478
+ # -- brainstate specific arguments -- #
479
+ state_tag: str | None = None,
480
+ state_to_exclude: Filter = None,
481
+ state_in_axes: Union[Dict[AxisName, Filter], Filter] = None,
482
+ state_out_axes: Union[Dict[AxisName, Filter], Filter] = None,
483
+ ):
484
+ """
485
+ Vectorize a function over new states created within it.
486
+
487
+ This function applies JAX's vmap transformation to newly created states
488
+ during the function's execution. It allows for more
489
+ flexible vectorization in the context of stateful computations.
490
+
491
+ Args:
492
+ fun (Callable, optional): The function to be vectorized. Defaults to Missing().
493
+ in_axes (int | None | Sequence[Any], optional): Specification of input axes for vectorization. Defaults to 0.
494
+ out_axes (Any, optional): Specification of output axes after vectorization. Defaults to 0.
495
+ axis_name (AxisName, optional): Name of the axis being vectorized over. Defaults to None.
496
+ axis_size (int, optional): Size of the axis being vectorized over. Defaults to None.
497
+ spmd_axis_name (AxisName | tuple[AxisName, ...], optional): Name(s) of SPMD axis/axes. Defaults to None.
498
+ state_tag (str, optional): A tag to identify specific states. Defaults to None.
499
+ state_to_exclude (Sequence[int], optional): Indices of states to exclude from vectorization. Defaults to ().
500
+
501
+ Returns:
502
+ Callable: A vectorized version of the input function that handles new state creation.
503
+ """
504
+ if isinstance(fun, Missing):
505
+ return functools.partial(
506
+ _vmap_new_states_transform,
507
+ in_axes=in_axes,
508
+ out_axes=out_axes,
509
+ axis_name=axis_name,
510
+ axis_size=axis_size,
511
+ spmd_axis_name=spmd_axis_name,
512
+ state_tag=state_tag,
513
+ state_to_exclude=state_to_exclude,
514
+ state_in_axes=state_in_axes,
515
+ state_out_axes=state_out_axes,
516
+ )
517
+ else:
518
+ return _vmap_new_states_transform(
519
+ fun,
520
+ in_axes=in_axes,
521
+ out_axes=out_axes,
522
+ axis_name=axis_name,
523
+ axis_size=axis_size,
524
+ spmd_axis_name=spmd_axis_name,
525
+ state_tag=state_tag,
526
+ state_to_exclude=state_to_exclude,
527
+ state_in_axes=state_in_axes,
528
+ state_out_axes=state_out_axes,
529
+ )