brainstate 0.0.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. brainstate/__init__.py +45 -0
  2. brainstate/_module.py +1466 -0
  3. brainstate/_module_test.py +133 -0
  4. brainstate/_state.py +378 -0
  5. brainstate/_state_test.py +41 -0
  6. brainstate/_utils.py +21 -0
  7. brainstate/environ.py +375 -0
  8. brainstate/functional/__init__.py +25 -0
  9. brainstate/functional/_activations.py +754 -0
  10. brainstate/functional/_normalization.py +69 -0
  11. brainstate/functional/_spikes.py +90 -0
  12. brainstate/init/__init__.py +26 -0
  13. brainstate/init/_base.py +36 -0
  14. brainstate/init/_generic.py +175 -0
  15. brainstate/init/_random_inits.py +489 -0
  16. brainstate/init/_regular_inits.py +109 -0
  17. brainstate/math/__init__.py +21 -0
  18. brainstate/math/_einops.py +787 -0
  19. brainstate/math/_einops_parsing.py +169 -0
  20. brainstate/math/_einops_parsing_test.py +126 -0
  21. brainstate/math/_einops_test.py +346 -0
  22. brainstate/math/_misc.py +298 -0
  23. brainstate/math/_misc_test.py +58 -0
  24. brainstate/mixin.py +373 -0
  25. brainstate/mixin_test.py +73 -0
  26. brainstate/nn/__init__.py +68 -0
  27. brainstate/nn/_base.py +248 -0
  28. brainstate/nn/_connections.py +686 -0
  29. brainstate/nn/_dynamics.py +406 -0
  30. brainstate/nn/_elementwise.py +1437 -0
  31. brainstate/nn/_misc.py +132 -0
  32. brainstate/nn/_normalizations.py +389 -0
  33. brainstate/nn/_others.py +100 -0
  34. brainstate/nn/_poolings.py +1228 -0
  35. brainstate/nn/_poolings_test.py +231 -0
  36. brainstate/nn/_projection/__init__.py +32 -0
  37. brainstate/nn/_projection/_align_post.py +528 -0
  38. brainstate/nn/_projection/_align_pre.py +599 -0
  39. brainstate/nn/_projection/_delta.py +241 -0
  40. brainstate/nn/_projection/_utils.py +17 -0
  41. brainstate/nn/_projection/_vanilla.py +101 -0
  42. brainstate/nn/_rate_rnns.py +393 -0
  43. brainstate/nn/_readout.py +130 -0
  44. brainstate/nn/_synouts.py +166 -0
  45. brainstate/nn/functional/__init__.py +25 -0
  46. brainstate/nn/functional/_activations.py +754 -0
  47. brainstate/nn/functional/_normalization.py +69 -0
  48. brainstate/nn/functional/_spikes.py +90 -0
  49. brainstate/nn/init/__init__.py +26 -0
  50. brainstate/nn/init/_base.py +36 -0
  51. brainstate/nn/init/_generic.py +175 -0
  52. brainstate/nn/init/_random_inits.py +489 -0
  53. brainstate/nn/init/_regular_inits.py +109 -0
  54. brainstate/nn/surrogate.py +1740 -0
  55. brainstate/optim/__init__.py +23 -0
  56. brainstate/optim/_lr_scheduler.py +486 -0
  57. brainstate/optim/_lr_scheduler_test.py +36 -0
  58. brainstate/optim/_sgd_optimizer.py +1148 -0
  59. brainstate/random.py +5148 -0
  60. brainstate/random_test.py +576 -0
  61. brainstate/surrogate.py +1740 -0
  62. brainstate/transform/__init__.py +36 -0
  63. brainstate/transform/_autograd.py +585 -0
  64. brainstate/transform/_autograd_test.py +1183 -0
  65. brainstate/transform/_control.py +665 -0
  66. brainstate/transform/_controls_test.py +220 -0
  67. brainstate/transform/_jit.py +239 -0
  68. brainstate/transform/_jit_error.py +158 -0
  69. brainstate/transform/_jit_test.py +102 -0
  70. brainstate/transform/_make_jaxpr.py +573 -0
  71. brainstate/transform/_make_jaxpr_test.py +133 -0
  72. brainstate/transform/_progress_bar.py +113 -0
  73. brainstate/typing.py +69 -0
  74. brainstate/util.py +747 -0
  75. brainstate-0.0.1.dist-info/LICENSE +202 -0
  76. brainstate-0.0.1.dist-info/METADATA +101 -0
  77. brainstate-0.0.1.dist-info/RECORD +79 -0
  78. brainstate-0.0.1.dist-info/WHEEL +6 -0
  79. brainstate-0.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,787 @@
1
+ # Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ from __future__ import annotations
17
+
18
+ import functools
19
+ import itertools
20
+ from collections import OrderedDict
21
+ from typing import Set, Tuple, List, Dict, Union, Callable, Optional, cast
22
+
23
+ import jax
24
+ import jax.numpy as jnp
25
+ import numpy as np
26
+
27
+ from ._einops_parsing import ParsedExpression, _ellipsis, AnonymousAxis, EinopsError
28
+ from .._utils import set_module_as
29
+
30
+ __all__ = [
31
+ 'einreduce',
32
+ 'einrearrange',
33
+ 'einrepeat',
34
+ 'einshape',
35
+ ]
36
+
37
+ ReductionCallable = Callable[[jax.typing.ArrayLike, Tuple[int, ...]], jax.typing.ArrayLike]
38
+ Reduction = Union[str, ReductionCallable]
39
+
40
+ _reductions = ("min", "max", "sum", "mean", "prod", "any", "all")
41
+
42
+ # magic integers are required to stay within
43
+ # traceable subset of language
44
+ _unknown_axis_length = -999999
45
+ _expected_axis_length = -99999
46
+
47
+
48
+ def is_float_type(x: jax.typing.ArrayLike):
49
+ return x.dtype in ("float16", "float32", "float64", "float128", "bfloat16")
50
+
51
+
52
+ def add_axis(x: jax.typing.ArrayLike, new_position: int):
53
+ return jnp.expand_dims(jnp.asarray(x), new_position)
54
+
55
+
56
+ def add_axes(x: jax.typing.ArrayLike, n_axes, pos2len):
57
+ repeats = [1] * n_axes
58
+ for axis_position, axis_length in pos2len.items():
59
+ x = add_axis(x, axis_position)
60
+ repeats[axis_position] = axis_length
61
+ return jnp.tile(x, repeats)
62
+
63
+
64
+ def _product(sequence: List[int]) -> int:
65
+ """minimalistic product that works both with numbers and symbols. Supports empty lists"""
66
+ result = 1
67
+ for element in sequence:
68
+ result *= element
69
+ return result
70
+
71
+
72
+ def _reduce_axes(tensor, reduction_type: Reduction, reduced_axes: List[int]):
73
+ if callable(reduction_type):
74
+ # custom callable
75
+ return reduction_type(tensor, tuple(reduced_axes))
76
+ else:
77
+ # one of built-in operations
78
+ assert reduction_type in _reductions
79
+ if reduction_type == "mean":
80
+ if not is_float_type(tensor):
81
+ raise NotImplementedError("reduce_mean is not available for non-floating tensors")
82
+ return __reduce(tensor, reduction_type, tuple(reduced_axes))
83
+
84
+
85
+ def __reduce(x: jax.typing.ArrayLike, operation: str, reduced_axes):
86
+ if operation == "min":
87
+ return x.min(axis=reduced_axes)
88
+ elif operation == "max":
89
+ return x.max(axis=reduced_axes)
90
+ elif operation == "sum":
91
+ return x.sum(axis=reduced_axes)
92
+ elif operation == "mean":
93
+ return x.mean(axis=reduced_axes)
94
+ elif operation == "prod":
95
+ return x.prod(axis=reduced_axes)
96
+ elif operation == "any":
97
+ return x.any(axis=reduced_axes)
98
+ elif operation == "all":
99
+ return x.all(axis=reduced_axes)
100
+ else:
101
+ raise NotImplementedError("Unknown reduction ", operation)
102
+
103
+
104
+ def _optimize_transformation(init_shapes, reduced_axes, axes_reordering, final_shapes):
105
+ # 'collapses' neighboring axes if those participate in the result pattern in the same order
106
+ # TODO add support for added_axes
107
+ assert len(axes_reordering) + len(reduced_axes) == len(init_shapes)
108
+ # joining consecutive axes that will be reduced
109
+ # possibly we can skip this if all backends can optimize this (not sure)
110
+ reduced_axes = tuple(sorted(reduced_axes))
111
+ for i in range(len(reduced_axes) - 1)[::-1]:
112
+ if reduced_axes[i] + 1 == reduced_axes[i + 1]:
113
+ removed_axis = reduced_axes[i + 1]
114
+ removed_length = init_shapes[removed_axis]
115
+ init_shapes = init_shapes[:removed_axis] + init_shapes[removed_axis + 1:]
116
+ init_shapes[removed_axis - 1] *= removed_length
117
+ reduced_axes = reduced_axes[: i + 1] + tuple(axis - 1 for axis in reduced_axes[i + 2:])
118
+
119
+ # removing axes that are moved together during reshape
120
+ def build_mapping():
121
+ init_to_final = {}
122
+ for axis in range(len(init_shapes)):
123
+ if axis in reduced_axes:
124
+ init_to_final[axis] = None
125
+ else:
126
+ after_reduction = sum(x is not None for x in init_to_final.values())
127
+ init_to_final[axis] = list(axes_reordering).index(after_reduction)
128
+ return init_to_final
129
+
130
+ init_axis_to_final_axis = build_mapping()
131
+
132
+ for init_axis in range(len(init_shapes) - 1)[::-1]:
133
+ if init_axis_to_final_axis[init_axis] is None:
134
+ continue
135
+ if init_axis_to_final_axis[init_axis + 1] is None:
136
+ continue
137
+ if init_axis_to_final_axis[init_axis] + 1 == init_axis_to_final_axis[init_axis + 1]:
138
+ removed_axis = init_axis + 1
139
+ removed_length = init_shapes[removed_axis]
140
+ removed_axis_after_reduction = sum(x not in reduced_axes for x in range(removed_axis))
141
+
142
+ reduced_axes = tuple(axis if axis < removed_axis else axis - 1 for axis in reduced_axes)
143
+ init_shapes = init_shapes[:removed_axis] + init_shapes[removed_axis + 1:]
144
+ init_shapes[removed_axis - 1] *= removed_length
145
+ old_reordering = axes_reordering
146
+ axes_reordering = []
147
+ for axis in old_reordering:
148
+ if axis == removed_axis_after_reduction:
149
+ pass
150
+ elif axis < removed_axis_after_reduction:
151
+ axes_reordering.append(axis)
152
+ else:
153
+ axes_reordering.append(axis - 1)
154
+ init_axis_to_final_axis = build_mapping()
155
+
156
+ return init_shapes, reduced_axes, axes_reordering, final_shapes
157
+
158
+
159
+ CookedRecipe = Tuple[Optional[List[int]], Optional[List[int]], List[int], Dict[int, int], Optional[List[int]], int]
160
+
161
+ # Actual type is tuple[tuple[str, int], ...]
162
+ # However torch.jit.script does not "understand" the correct type,
163
+ # and torch_specific will use list version.
164
+ HashableAxesLengths = Tuple[Tuple[str, int], ...]
165
+ FakeHashableAxesLengths = List[Tuple[str, int]]
166
+
167
+
168
+ class TransformRecipe:
169
+ """
170
+ Recipe describes actual computation pathway.
171
+ Recipe can be applied to a tensor or variable.
172
+ """
173
+
174
+ # structure is non-mutable. In future, this can be non-mutable dataclass (python 3.7+)
175
+ # update: pytorch 2.0 torch.jit.script seems to have problems with dataclasses unless they were explicitly provided
176
+
177
+ def __init__(
178
+ self,
179
+ # list of sizes (or just sizes) for elementary axes as they appear in left expression.
180
+ # this is what (after computing unknown parts) will be a shape after first transposition.
181
+ # This does not include any ellipsis dimensions.
182
+ elementary_axes_lengths: List[int],
183
+ # if additional axes are provided, they should be set in prev array
184
+ # This shows mapping from name to position
185
+ axis_name2elementary_axis: Dict[str, int],
186
+ # each dimension in input can help to reconstruct length of one elementary axis
187
+ # or verify one of dimensions. Each element points to element of elementary_axes_lengths.
188
+ input_composition_known_unknown: List[Tuple[List[int], List[int]]],
189
+ # permutation applied to elementary axes, if ellipsis is absent
190
+ axes_permutation: List[int],
191
+ # permutation puts reduced axes in the end, we only need to know the first position.
192
+ first_reduced_axis: int,
193
+ # at which positions which of elementary axes should appear. Axis position -> axis index.
194
+ added_axes: Dict[int, int],
195
+ # ids of axes as they appear in result, again pointers to elementary_axes_lengths,
196
+ # only used to infer result dimensions
197
+ output_composite_axes: List[List[int]],
198
+ ):
199
+ self.elementary_axes_lengths: List[int] = elementary_axes_lengths
200
+ self.axis_name2elementary_axis: Dict[str, int] = axis_name2elementary_axis
201
+ self.input_composition_known_unknown: List[Tuple[List[int], List[int]]] = input_composition_known_unknown
202
+ self.axes_permutation: List[int] = axes_permutation
203
+
204
+ self.first_reduced_axis: int = first_reduced_axis
205
+ self.added_axes: Dict[int, int] = added_axes
206
+ self.output_composite_axes: List[List[int]] = output_composite_axes
207
+
208
+
209
+ def _reconstruct_from_shape_uncached(
210
+ self: TransformRecipe,
211
+ shape: List[int],
212
+ axes_dims: FakeHashableAxesLengths
213
+ ) -> CookedRecipe:
214
+ """
215
+ Reconstruct all actual parameters using shape.
216
+ Shape is a tuple that may contain integers, shape symbols (tf, theano) and UnknownSize (tf, previously mxnet)
217
+ known axes can be integers or symbols, but not Nones.
218
+ """
219
+ # magic number
220
+ need_init_reshape = False
221
+
222
+ # last axis is allocated for collapsed ellipsis
223
+ axes_lengths: List[int] = list(self.elementary_axes_lengths)
224
+ for axis, dim in axes_dims:
225
+ axes_lengths[self.axis_name2elementary_axis[axis]] = dim
226
+
227
+ for input_axis, (known_axes, unknown_axes) in enumerate(self.input_composition_known_unknown):
228
+ length = shape[input_axis]
229
+ if len(known_axes) == 0 and len(unknown_axes) == 1:
230
+ # shortcut for the most common case
231
+ axes_lengths[unknown_axes[0]] = length
232
+ continue
233
+
234
+ known_product = 1
235
+ for axis in known_axes:
236
+ known_product *= axes_lengths[axis]
237
+
238
+ if len(unknown_axes) == 0:
239
+ if isinstance(length, int) and isinstance(known_product, int) and length != known_product:
240
+ raise EinopsError(f"Shape mismatch, {length} != {known_product}")
241
+ else:
242
+ # assert len(unknown_axes) == 1, 'this is enforced when recipe is created, so commented out'
243
+ if isinstance(length, int) and isinstance(known_product, int) and length % known_product != 0:
244
+ raise EinopsError(f"Shape mismatch, can't divide axis of length {length} in chunks of {known_product}")
245
+
246
+ unknown_axis = unknown_axes[0]
247
+ inferred_length: int = length // known_product
248
+ axes_lengths[unknown_axis] = inferred_length
249
+
250
+ if len(known_axes) + len(unknown_axes) != 1:
251
+ need_init_reshape = True
252
+
253
+ # at this point all axes_lengths are computed (either have values or variables, but not Nones)
254
+
255
+ # elementary axes are ordered as they appear in input, then all added axes
256
+ init_shapes: Optional[List[int]] = axes_lengths[: len(self.axes_permutation)] if need_init_reshape else None
257
+
258
+ need_final_reshape = False
259
+ final_shapes: List[int] = []
260
+ for grouping in self.output_composite_axes:
261
+ lengths = [axes_lengths[elementary_axis] for elementary_axis in grouping]
262
+ final_shapes.append(_product(lengths))
263
+ if len(lengths) != 1:
264
+ need_final_reshape = True
265
+
266
+ added_axes: Dict[int, int] = {
267
+ pos: axes_lengths[pos_in_elementary] for pos, pos_in_elementary in self.added_axes.items()
268
+ }
269
+
270
+ # this list can be empty
271
+ reduced_axes = list(range(self.first_reduced_axis, len(self.axes_permutation)))
272
+
273
+ n_axes_after_adding_axes = len(added_axes) + len(self.axes_permutation)
274
+
275
+ axes_reordering: Optional[List[int]] = self.axes_permutation
276
+ if self.axes_permutation == list(range(len(self.axes_permutation))):
277
+ axes_reordering = None
278
+
279
+ _final_shapes = final_shapes if need_final_reshape else None
280
+ return init_shapes, axes_reordering, reduced_axes, added_axes, _final_shapes, n_axes_after_adding_axes
281
+
282
+
283
+ _reconstruct_from_shape = functools.lru_cache(1024)(_reconstruct_from_shape_uncached)
284
+
285
+
286
+ def _apply_recipe(
287
+ recipe: TransformRecipe,
288
+ tensor: jax.typing.ArrayLike,
289
+ reduction_type: Reduction,
290
+ axes_lengths: HashableAxesLengths
291
+ ) -> jax.typing.ArrayLike:
292
+ # this method implements actual work for all backends for 3 operations
293
+ try:
294
+ init_shapes, axes_reordering, reduced_axes, added_axes, final_shapes, n_axes_w_added = (
295
+ _reconstruct_from_shape(recipe, jnp.shape(tensor), axes_lengths))
296
+ except TypeError:
297
+ # shape or one of passed axes lengths is not hashable (i.e. they are symbols)
298
+ _result = _reconstruct_from_shape_uncached(recipe, jnp.shape(tensor), axes_lengths)
299
+ (init_shapes, axes_reordering, reduced_axes, added_axes, final_shapes, n_axes_w_added) = _result
300
+ if init_shapes is not None:
301
+ tensor = jnp.reshape(tensor, init_shapes)
302
+ if axes_reordering is not None:
303
+ tensor = jnp.transpose(jnp.asarray(tensor), axes_reordering)
304
+ if len(reduced_axes) > 0:
305
+ tensor = _reduce_axes(tensor, reduction_type=reduction_type, reduced_axes=reduced_axes)
306
+ if len(added_axes) > 0:
307
+ tensor = add_axes(tensor, n_axes=n_axes_w_added, pos2len=added_axes)
308
+ if final_shapes is not None:
309
+ tensor = jnp.reshape(jnp.asarray(tensor), final_shapes)
310
+ return tensor
311
+
312
+
313
+ def _apply_recipe_array_api(
314
+ xp,
315
+ recipe: TransformRecipe,
316
+ tensor: jax.typing.ArrayLike,
317
+ reduction_type: Reduction,
318
+ axes_lengths: HashableAxesLengths
319
+ ) -> jax.typing.ArrayLike:
320
+ # completely-inline implementation
321
+ init_shapes, axes_reordering, reduced_axes, added_axes, final_shapes, n_axes_w_added = _reconstruct_from_shape(
322
+ recipe, tensor.shape, axes_lengths
323
+ )
324
+ if init_shapes is not None:
325
+ tensor = xp.reshape(tensor, init_shapes)
326
+ if axes_reordering is not None:
327
+ tensor = xp.permute_dims(tensor, axes_reordering)
328
+ if len(reduced_axes) > 0:
329
+ if callable(reduction_type):
330
+ # custom callable
331
+ tensor = reduction_type(tensor, tuple(reduced_axes))
332
+ else:
333
+ # one of built-in operations
334
+ assert reduction_type in _reductions
335
+ tensor = getattr(xp, reduction_type)(tensor, axis=tuple(reduced_axes))
336
+ if len(added_axes) > 0:
337
+ # we use broadcasting
338
+ for axis_position, axis_length in added_axes.items():
339
+ tensor = xp.expand_dims(tensor, axis=axis_position)
340
+
341
+ final_shape = list(tensor.shape)
342
+ for axis_position, axis_length in added_axes.items():
343
+ final_shape[axis_position] = axis_length
344
+
345
+ tensor = xp.broadcast_to(tensor, final_shape)
346
+ if final_shapes is not None:
347
+ tensor = xp.reshape(tensor, final_shapes)
348
+ return tensor
349
+
350
+
351
+ @functools.lru_cache(256)
352
+ def _prepare_transformation_recipe(
353
+ pattern: str,
354
+ operation: Reduction,
355
+ axes_names: Tuple[str, ...],
356
+ ndim: int,
357
+ ) -> TransformRecipe:
358
+ """Perform initial parsing of pattern and provided supplementary info
359
+ axes_lengths is a tuple of tuples (axis_name, axis_length)
360
+ """
361
+ left_str, rght_str = pattern.split("->")
362
+ left = ParsedExpression(left_str)
363
+ rght = ParsedExpression(rght_str)
364
+
365
+ # checking that axes are in agreement - new axes appear only in repeat, while disappear only in reduction
366
+ if not left.has_ellipsis and rght.has_ellipsis:
367
+ raise EinopsError("Ellipsis found in right side, but not left side of a pattern {}".format(pattern))
368
+ if left.has_ellipsis and left.has_ellipsis_parenthesized:
369
+ raise EinopsError("Ellipsis inside parenthesis in the left side is not allowed: {}".format(pattern))
370
+ if operation == "rearrange":
371
+ if left.has_non_unitary_anonymous_axes or rght.has_non_unitary_anonymous_axes:
372
+ raise EinopsError("Non-unitary anonymous axes are not supported in rearrange (exception is length 1)")
373
+ difference = set.symmetric_difference(left.identifiers, rght.identifiers)
374
+ if len(difference) > 0:
375
+ raise EinopsError("Identifiers only on one side of expression (should be on both): {}".format(difference))
376
+ elif operation == "repeat":
377
+ difference = set.difference(left.identifiers, rght.identifiers)
378
+ if len(difference) > 0:
379
+ raise EinopsError("Unexpected identifiers on the left side of repeat: {}".format(difference))
380
+ axes_without_size = set.difference(
381
+ {ax for ax in rght.identifiers if not isinstance(ax, AnonymousAxis)},
382
+ {*left.identifiers, *axes_names},
383
+ )
384
+ if len(axes_without_size) > 0:
385
+ raise EinopsError("Specify sizes for new axes in repeat: {}".format(axes_without_size))
386
+ elif operation in _reductions or callable(operation):
387
+ difference = set.difference(rght.identifiers, left.identifiers)
388
+ if len(difference) > 0:
389
+ raise EinopsError("Unexpected identifiers on the right side of reduce {}: {}".format(operation, difference))
390
+ else:
391
+ raise EinopsError("Unknown reduction {}. Expect one of {}.".format(operation, _reductions))
392
+
393
+ if left.has_ellipsis:
394
+ n_other_dims = len(left.composition) - 1
395
+ if ndim < n_other_dims:
396
+ raise EinopsError(f"Wrong shape: expected >={n_other_dims} dims. Received {ndim}-dim tensor.")
397
+ ellipsis_ndim = ndim - n_other_dims
398
+ ell_axes = [_ellipsis + str(i) for i in range(ellipsis_ndim)]
399
+ left_composition = []
400
+ for composite_axis in left.composition:
401
+ if composite_axis == _ellipsis:
402
+ for axis in ell_axes:
403
+ left_composition.append([axis])
404
+ else:
405
+ left_composition.append(composite_axis)
406
+
407
+ rght_composition = []
408
+ for composite_axis in rght.composition:
409
+ if composite_axis == _ellipsis:
410
+ for axis in ell_axes:
411
+ rght_composition.append([axis])
412
+ else:
413
+ group = []
414
+ for axis in composite_axis:
415
+ if axis == _ellipsis:
416
+ group.extend(ell_axes)
417
+ else:
418
+ group.append(axis)
419
+ rght_composition.append(group)
420
+
421
+ left.identifiers.update(ell_axes)
422
+ left.identifiers.remove(_ellipsis)
423
+ if rght.has_ellipsis:
424
+ rght.identifiers.update(ell_axes)
425
+ rght.identifiers.remove(_ellipsis)
426
+ else:
427
+ if ndim != len(left.composition):
428
+ raise EinopsError(f"Wrong shape: expected {len(left.composition)} dims. Received {ndim}-dim tensor.")
429
+ left_composition = left.composition
430
+ rght_composition = rght.composition
431
+
432
+ # parsing all dimensions to find out lengths
433
+ axis_name2known_length: Dict[Union[str, AnonymousAxis], int] = OrderedDict()
434
+ for composite_axis in left_composition:
435
+ for axis_name in composite_axis:
436
+ if isinstance(axis_name, AnonymousAxis):
437
+ axis_name2known_length[axis_name] = axis_name.value
438
+ else:
439
+ axis_name2known_length[axis_name] = _unknown_axis_length
440
+
441
+ # axis_ids_after_first_reshape = range(len(axis_name2known_length)) at this point
442
+
443
+ repeat_axes_names = []
444
+ for axis_name in rght.identifiers:
445
+ if axis_name not in axis_name2known_length:
446
+ if isinstance(axis_name, AnonymousAxis):
447
+ axis_name2known_length[axis_name] = axis_name.value
448
+ else:
449
+ axis_name2known_length[axis_name] = _unknown_axis_length
450
+ repeat_axes_names.append(axis_name)
451
+
452
+ axis_name2position = {name: position for position, name in enumerate(axis_name2known_length)}
453
+
454
+ # axes provided as kwargs
455
+ for elementary_axis in axes_names:
456
+ if not ParsedExpression.check_axis_name(elementary_axis):
457
+ raise EinopsError("Invalid name for an axis", elementary_axis)
458
+ if elementary_axis not in axis_name2known_length:
459
+ raise EinopsError("Axis {} is not used in transform".format(elementary_axis))
460
+ axis_name2known_length[elementary_axis] = _expected_axis_length
461
+
462
+ input_axes_known_unknown = []
463
+ # some shapes are inferred later - all information is prepared for faster inference
464
+ for i, composite_axis in enumerate(left_composition):
465
+ known: Set[str] = {axis for axis in composite_axis if axis_name2known_length[axis] != _unknown_axis_length}
466
+ unknown: Set[str] = {axis for axis in composite_axis if axis_name2known_length[axis] == _unknown_axis_length}
467
+ if len(unknown) > 1:
468
+ raise EinopsError("Could not infer sizes for {}".format(unknown))
469
+ assert len(unknown) + len(known) == len(composite_axis)
470
+ input_axes_known_unknown.append(
471
+ ([axis_name2position[axis] for axis in known], [axis_name2position[axis] for axis in unknown])
472
+ )
473
+
474
+ axis_position_after_reduction: Dict[str, int] = {}
475
+ for axis_name in itertools.chain(*left_composition):
476
+ if axis_name in rght.identifiers:
477
+ axis_position_after_reduction[axis_name] = len(axis_position_after_reduction)
478
+
479
+ result_axes_grouping: List[List[int]] = [
480
+ [axis_name2position[axis] for axis in composite_axis] for i, composite_axis in enumerate(rght_composition)
481
+ ]
482
+
483
+ ordered_axis_left = list(itertools.chain(*left_composition))
484
+ ordered_axis_rght = list(itertools.chain(*rght_composition))
485
+ reduced_axes = [axis for axis in ordered_axis_left if axis not in rght.identifiers]
486
+ order_after_transposition = [axis for axis in ordered_axis_rght if axis in left.identifiers] + reduced_axes
487
+ axes_permutation = [ordered_axis_left.index(axis) for axis in order_after_transposition]
488
+ added_axes = {
489
+ i: axis_name2position[axis_name]
490
+ for i, axis_name in enumerate(ordered_axis_rght)
491
+ if axis_name not in left.identifiers
492
+ }
493
+
494
+ first_reduced_axis = len(order_after_transposition) - len(reduced_axes)
495
+
496
+ return TransformRecipe(
497
+ elementary_axes_lengths=list(axis_name2known_length.values()),
498
+ axis_name2elementary_axis={axis: axis_name2position[axis] for axis in axes_names},
499
+ input_composition_known_unknown=input_axes_known_unknown,
500
+ axes_permutation=axes_permutation,
501
+ first_reduced_axis=first_reduced_axis,
502
+ added_axes=added_axes,
503
+ output_composite_axes=result_axes_grouping,
504
+ )
505
+
506
+
507
+ def _prepare_recipes_for_all_dims(
508
+ pattern: str, operation: Reduction, axes_names: Tuple[str, ...]
509
+ ) -> Dict[int, TransformRecipe]:
510
+ """
511
+ Internal function, used in layers.
512
+ Layer makes all recipe creation when it is initialized, thus to keep recipes simple we pre-compute for all dims
513
+ """
514
+ left_str, rght_str = pattern.split("->")
515
+ left = ParsedExpression(left_str)
516
+ dims = [len(left.composition)]
517
+ if left.has_ellipsis:
518
+ dims = [len(left.composition) - 1 + ellipsis_dims for ellipsis_dims in range(8)]
519
+ return {ndim: _prepare_transformation_recipe(pattern, operation, axes_names, ndim=ndim) for ndim in dims}
520
+
521
+
522
+ @set_module_as('brainstate.math')
523
+ def einreduce(
524
+ tensor: Union[jax.typing.ArrayLike, List[jax.typing.ArrayLike]],
525
+ pattern: str,
526
+ reduction: Reduction,
527
+ **axes_lengths: int
528
+ ) -> jax.typing.ArrayLike:
529
+ """
530
+ ``ein_reduce`` provides combination of reordering and reduction using reader-friendly notation.
531
+
532
+ Examples for reduce operation:
533
+
534
+ ```python
535
+ >>> x = np.random.randn(100, 32, 64)
536
+
537
+ # perform max-reduction on the first axis
538
+ >>> y = einreduce(x, 't b c -> b c', 'max')
539
+
540
+ # same as previous, but with clearer axes meaning
541
+ >>> y = einreduce(x, 'time batch channel -> batch channel', 'max')
542
+
543
+ >>> x = np.random.randn(10, 20, 30, 40)
544
+
545
+ # 2d max-pooling with kernel size = 2 * 2 for image processing
546
+ >>> y1 = einreduce(x, 'b c (h1 h2) (w1 w2) -> b c h1 w1', 'max', h2=2, w2=2)
547
+
548
+ # if one wants to go back to the original height and width, depth-to-space trick can be applied
549
+ >>> y2 = einrearrange(y1, 'b (c h2 w2) h1 w1 -> b c (h1 h2) (w1 w2)', h2=2, w2=2)
550
+ >>> assert einshape(x, 'b _ h w') == einshape(y2, 'b _ h w')
551
+
552
+ # Adaptive 2d max-pooling to 3 * 4 grid
553
+ >>> einreduce(x, 'b c (h1 h2) (w1 w2) -> b c h1 w1', 'max', h1=3, w1=4).shape
554
+ (10, 20, 3, 4)
555
+
556
+ # Global average pooling
557
+ >>> einreduce(x, 'b c h w -> b c', 'mean').shape
558
+ (10, 20)
559
+
560
+ # Subtracting mean over batch for each channel
561
+ >>> y = x - einreduce(x, 'b c h w -> () c () ()', 'mean')
562
+
563
+ # Subtracting per-image mean for each channel
564
+ >>> y = x - einreduce(x, 'b c h w -> b c () ()', 'mean')
565
+
566
+ ```
567
+
568
+ Parameters:
569
+ tensor: tensor: tensor of any supported library (e.g. numpy.ndarray, tensorflow, pytorch).
570
+ list of tensors is also accepted, those should be of the same type and shape
571
+ pattern: string, reduction pattern
572
+ reduction: one of available reductions ('min', 'max', 'sum', 'mean', 'prod'), case-sensitive
573
+ alternatively, a callable f(tensor, reduced_axes) -> tensor can be provided.
574
+ This allows using various reductions, examples: np.max, tf.reduce_logsumexp, torch.var, etc.
575
+ axes_lengths: any additional specifications for dimensions
576
+
577
+ Returns:
578
+ tensor of the same type as input
579
+ """
580
+ try:
581
+ hashable_axes_lengths = tuple(axes_lengths.items())
582
+ shape = jnp.shape(tensor)
583
+ recipe = _prepare_transformation_recipe(pattern, reduction, axes_names=tuple(axes_lengths), ndim=len(shape))
584
+ return _apply_recipe(recipe,
585
+ cast(jax.typing.ArrayLike, tensor),
586
+ reduction_type=reduction,
587
+ axes_lengths=hashable_axes_lengths)
588
+ except EinopsError as e:
589
+ message = ' Error while processing {}-reduction pattern "{}".'.format(reduction, pattern)
590
+ if not isinstance(tensor, list):
591
+ message += "\n Input tensor shape: {}. ".format(shape)
592
+ else:
593
+ message += "\n Input is list. "
594
+ message += "Additional info: {}.".format(axes_lengths)
595
+ raise EinopsError(message + "\n {}".format(e))
596
+
597
+
598
+ @set_module_as('brainstate.math')
599
+ def einrearrange(
600
+ tensor: Union[jax.typing.ArrayLike, List[jax.typing.ArrayLike]],
601
+ pattern: str,
602
+ **axes_lengths
603
+ ) -> jax.typing.ArrayLike:
604
+ """
605
+ ``ein_rearrange`` is a reader-friendly smart element reordering for multidimensional tensors.
606
+ This operation includes functionality of transpose (axes permutation), reshape (view), squeeze, unsqueeze,
607
+ stack, concatenate and other operations.
608
+
609
+ Examples for rearrange operation:
610
+
611
+ ```python
612
+ # suppose we have a set of 32 images in "h w c" format (height-width-channel)
613
+ >>> images = [np.random.randn(30, 40, 3) for _ in range(32)]
614
+
615
+ # stack along first (batch) axis, output is a single array
616
+ >>> einrearrange(images, 'b h w c -> b h w c').shape
617
+ (32, 30, 40, 3)
618
+
619
+ # concatenate images along height (vertical axis), 960 = 32 * 30
620
+ >>> einrearrange(images, 'b h w c -> (b h) w c').shape
621
+ (960, 40, 3)
622
+
623
+ # concatenated images along horizontal axis, 1280 = 32 * 40
624
+ >>> einrearrange(images, 'b h w c -> h (b w) c').shape
625
+ (30, 1280, 3)
626
+
627
+ # reordered axes to "b c h w" format for deep learning
628
+ >>> einrearrange(images, 'b h w c -> b c h w').shape
629
+ (32, 3, 30, 40)
630
+
631
+ # flattened each image into a vector, 3600 = 30 * 40 * 3
632
+ >>> einrearrange(images, 'b h w c -> b (c h w)').shape
633
+ (32, 3600)
634
+
635
+ # split each image into 4 smaller (top-left, top-right, bottom-left, bottom-right), 128 = 32 * 2 * 2
636
+ >>> einrearrange(images, 'b (h1 h) (w1 w) c -> (b h1 w1) h w c', h1=2, w1=2).shape
637
+ (128, 15, 20, 3)
638
+
639
+ # space-to-depth operation
640
+ >>> einrearrange(images, 'b (h h1) (w w1) c -> b h w (c h1 w1)', h1=2, w1=2).shape
641
+ (32, 15, 20, 12)
642
+
643
+ ```
644
+
645
+ When composing axes, C-order enumeration used (consecutive elements have different last axis)
646
+ Find more examples in einops tutorial.
647
+
648
+ Parameters:
649
+ tensor: tensor of any supported library (e.g. numpy.ndarray, tensorflow, pytorch).
650
+ list of tensors is also accepted, those should be of the same type and shape
651
+ pattern: string, rearrangement pattern
652
+ axes_lengths: any additional specifications for dimensions
653
+
654
+ Returns:
655
+ tensor of the same type as input. If possible, a view to the original tensor is returned.
656
+
657
+ """
658
+ return einreduce(tensor, pattern, reduction="rearrange", **axes_lengths)
659
+
660
+
661
+ @set_module_as('brainstate.math')
662
+ def einrepeat(
663
+ tensor: Union[jax.typing.ArrayLike, List[jax.typing.ArrayLike]],
664
+ pattern: str, **axes_lengths
665
+ ) -> jax.typing.ArrayLike:
666
+ """
667
+ ``ein_repeat`` allows reordering elements and repeating them in arbitrary combinations.
668
+ This operation includes functionality of repeat, tile, broadcast functions.
669
+
670
+ Examples for repeat operation:
671
+
672
+ ```python
673
+ # a grayscale image (of shape height x width)
674
+ >>> image = np.random.randn(30, 40)
675
+
676
+ # change it to RGB format by repeating in each channel
677
+ >>> einrepeat(image, 'h w -> h w c', c=3).shape
678
+ (30, 40, 3)
679
+
680
+ # repeat image 2 times along height (vertical axis)
681
+ >>> einrepeat(image, 'h w -> (repeat h) w', repeat=2).shape
682
+ (60, 40)
683
+
684
+ # repeat image 2 time along height and 3 times along width
685
+ >>> einrepeat(image, 'h w -> (h2 h) (w3 w)', h2=2, w3=3).shape
686
+ (60, 120)
687
+
688
+ # convert each pixel to a small square 2x2. Upsample image by 2x
689
+ >>> einrepeat(image, 'h w -> (h h2) (w w2)', h2=2, w2=2).shape
690
+ (60, 80)
691
+
692
+ # pixelate image first by downsampling by 2x, then upsampling
693
+ >>> downsampled = einreduce(image, '(h h2) (w w2) -> h w', 'mean', h2=2, w2=2)
694
+ >>> einrepeat(downsampled, 'h w -> (h h2) (w w2)', h2=2, w2=2).shape
695
+ (30, 40)
696
+
697
+ ```
698
+
699
+ When composing axes, C-order enumeration used (consecutive elements have different last axis)
700
+ Find more examples in einops tutorial.
701
+
702
+ Parameters:
703
+ tensor: tensor of any supported library (e.g. numpy.ndarray, tensorflow, pytorch).
704
+ list of tensors is also accepted, those should be of the same type and shape
705
+ pattern: string, rearrangement pattern
706
+ axes_lengths: any additional specifications for dimensions
707
+
708
+ Returns:
709
+ Tensor of the same type as input. If possible, a view to the original tensor is returned.
710
+
711
+ """
712
+ return einreduce(tensor, pattern, reduction="repeat", **axes_lengths)
713
+
714
+
715
+ @set_module_as('brainstate.math')
716
+ def einshape(x, pattern: str) -> dict:
717
+ """
718
+ Parse a tensor shape to dictionary mapping axes names to their lengths.
719
+
720
+ ```python
721
+ # Use underscore to skip the dimension in parsing.
722
+ >>> x = np.zeros([2, 3, 5, 7])
723
+ >>> einshape(x, 'batch _ h w')
724
+ {'batch': 2, 'h': 5, 'w': 7}
725
+
726
+ # `parse_shape` output can be used to specify axes_lengths for other operations:
727
+ >>> y = np.zeros([700])
728
+ >>> einrearrange(y, '(b c h w) -> b c h w', **einshape(x, 'b _ h w')).shape
729
+ (2, 10, 5, 7)
730
+
731
+ ```
732
+
733
+ For symbolic frameworks may return symbols, not integers.
734
+
735
+ Parameters:
736
+ x: tensor of any supported framework
737
+ pattern: str, space separated names for axes, underscore means skip axis
738
+
739
+ Returns:
740
+ dict, maps axes names to their lengths
741
+ """
742
+ exp = ParsedExpression(pattern, allow_underscore=True)
743
+ shape = jnp.shape(x)
744
+ if exp.has_composed_axes():
745
+ raise RuntimeError(f"Can't parse shape with composite axes: {pattern} {shape}")
746
+ if len(shape) != len(exp.composition):
747
+ if exp.has_ellipsis:
748
+ if len(shape) < len(exp.composition) - 1:
749
+ raise RuntimeError(f"Can't parse shape with this number of dimensions: {pattern} {shape}")
750
+ else:
751
+ raise RuntimeError(f"Can't parse shape with different number of dimensions: {pattern} {shape}")
752
+ if exp.has_ellipsis:
753
+ ellipsis_idx = exp.composition.index(_ellipsis)
754
+ composition = (
755
+ exp.composition[:ellipsis_idx]
756
+ + ["_"] * (len(shape) - len(exp.composition) + 1)
757
+ + exp.composition[ellipsis_idx + 1:]
758
+ )
759
+ else:
760
+ composition = exp.composition
761
+ result = {}
762
+ for (axis_name,), axis_length in zip(composition, shape): # type: ignore
763
+ if axis_name != "_":
764
+ result[axis_name] = axis_length
765
+ return result
766
+
767
+
768
+ # _enumerate_directions is not exposed in the public API
769
+ def _enumerate_directions(x):
770
+ """
771
+ For an n-dimensional tensor, returns tensors to enumerate each axis.
772
+ ```python
773
+ x = np.zeros([2, 3, 4]) # or any other tensor
774
+ i, j, k = _enumerate_directions(x)
775
+ result = i + 2*j + 3*k
776
+ ```
777
+
778
+ `result[i, j, k] = i + 2j + 3k`, and also has the same shape as result
779
+ Works very similarly to numpy.ogrid (open indexing grid)
780
+ """
781
+ shape = jnp.shape(x)
782
+ result = []
783
+ for axis_id, axis_length in enumerate(shape):
784
+ shape = [1] * len(shape)
785
+ shape[axis_id] = axis_length
786
+ result.append(jnp.reshape(jnp.arange(0, axis_length), shape))
787
+ return result