brainstate 0.1.8__py2.py3-none-any.whl → 0.1.9__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (133) hide show
  1. brainstate/__init__.py +58 -51
  2. brainstate/_compatible_import.py +148 -148
  3. brainstate/_state.py +1605 -1663
  4. brainstate/_state_test.py +52 -52
  5. brainstate/_utils.py +47 -47
  6. brainstate/augment/__init__.py +30 -30
  7. brainstate/augment/_autograd.py +778 -778
  8. brainstate/augment/_autograd_test.py +1289 -1289
  9. brainstate/augment/_eval_shape.py +99 -99
  10. brainstate/augment/_eval_shape_test.py +38 -38
  11. brainstate/augment/_mapping.py +1060 -1060
  12. brainstate/augment/_mapping_test.py +597 -597
  13. brainstate/augment/_random.py +151 -151
  14. brainstate/compile/__init__.py +38 -38
  15. brainstate/compile/_ad_checkpoint.py +204 -204
  16. brainstate/compile/_ad_checkpoint_test.py +49 -49
  17. brainstate/compile/_conditions.py +256 -256
  18. brainstate/compile/_conditions_test.py +220 -220
  19. brainstate/compile/_error_if.py +92 -92
  20. brainstate/compile/_error_if_test.py +52 -52
  21. brainstate/compile/_jit.py +346 -346
  22. brainstate/compile/_jit_test.py +143 -143
  23. brainstate/compile/_loop_collect_return.py +536 -536
  24. brainstate/compile/_loop_collect_return_test.py +58 -58
  25. brainstate/compile/_loop_no_collection.py +184 -184
  26. brainstate/compile/_loop_no_collection_test.py +50 -50
  27. brainstate/compile/_make_jaxpr.py +888 -888
  28. brainstate/compile/_make_jaxpr_test.py +156 -156
  29. brainstate/compile/_progress_bar.py +202 -202
  30. brainstate/compile/_unvmap.py +159 -159
  31. brainstate/compile/_util.py +147 -147
  32. brainstate/environ.py +563 -563
  33. brainstate/environ_test.py +62 -62
  34. brainstate/functional/__init__.py +27 -26
  35. brainstate/graph/__init__.py +29 -29
  36. brainstate/graph/_graph_node.py +244 -244
  37. brainstate/graph/_graph_node_test.py +73 -73
  38. brainstate/graph/_graph_operation.py +1738 -1738
  39. brainstate/graph/_graph_operation_test.py +563 -563
  40. brainstate/init/__init__.py +26 -26
  41. brainstate/init/_base.py +52 -52
  42. brainstate/init/_generic.py +244 -244
  43. brainstate/init/_random_inits.py +553 -553
  44. brainstate/init/_random_inits_test.py +149 -149
  45. brainstate/init/_regular_inits.py +105 -105
  46. brainstate/init/_regular_inits_test.py +50 -50
  47. brainstate/mixin.py +365 -363
  48. brainstate/mixin_test.py +77 -73
  49. brainstate/nn/__init__.py +135 -131
  50. brainstate/{functional → nn}/_activations.py +808 -813
  51. brainstate/{functional → nn}/_activations_test.py +331 -331
  52. brainstate/nn/_collective_ops.py +514 -514
  53. brainstate/nn/_collective_ops_test.py +43 -43
  54. brainstate/nn/_common.py +178 -178
  55. brainstate/nn/_conv.py +501 -501
  56. brainstate/nn/_conv_test.py +238 -238
  57. brainstate/nn/_delay.py +509 -502
  58. brainstate/nn/_delay_test.py +238 -184
  59. brainstate/nn/_dropout.py +426 -426
  60. brainstate/nn/_dropout_test.py +100 -100
  61. brainstate/nn/_dynamics.py +1343 -1343
  62. brainstate/nn/_dynamics_test.py +78 -78
  63. brainstate/nn/_elementwise.py +1119 -1119
  64. brainstate/nn/_elementwise_test.py +169 -169
  65. brainstate/nn/_embedding.py +58 -58
  66. brainstate/nn/_exp_euler.py +92 -92
  67. brainstate/nn/_exp_euler_test.py +35 -35
  68. brainstate/nn/_fixedprob.py +239 -239
  69. brainstate/nn/_fixedprob_test.py +114 -114
  70. brainstate/nn/_inputs.py +608 -608
  71. brainstate/nn/_linear.py +424 -424
  72. brainstate/nn/_linear_mv.py +83 -83
  73. brainstate/nn/_linear_mv_test.py +120 -120
  74. brainstate/nn/_linear_test.py +107 -107
  75. brainstate/nn/_ltp.py +28 -28
  76. brainstate/nn/_module.py +377 -377
  77. brainstate/nn/_module_test.py +40 -40
  78. brainstate/nn/_neuron.py +705 -705
  79. brainstate/nn/_neuron_test.py +161 -161
  80. brainstate/nn/_normalizations.py +975 -918
  81. brainstate/nn/_normalizations_test.py +73 -73
  82. brainstate/{functional → nn}/_others.py +46 -46
  83. brainstate/nn/_poolings.py +1177 -1177
  84. brainstate/nn/_poolings_test.py +217 -217
  85. brainstate/nn/_projection.py +486 -486
  86. brainstate/nn/_rate_rnns.py +554 -554
  87. brainstate/nn/_rate_rnns_test.py +63 -63
  88. brainstate/nn/_readout.py +209 -209
  89. brainstate/nn/_readout_test.py +53 -53
  90. brainstate/nn/_stp.py +236 -236
  91. brainstate/nn/_synapse.py +505 -505
  92. brainstate/nn/_synapse_test.py +131 -131
  93. brainstate/nn/_synaptic_projection.py +423 -423
  94. brainstate/nn/_synouts.py +162 -162
  95. brainstate/nn/_synouts_test.py +57 -57
  96. brainstate/nn/_utils.py +89 -89
  97. brainstate/nn/metrics.py +388 -388
  98. brainstate/optim/__init__.py +38 -38
  99. brainstate/optim/_base.py +64 -64
  100. brainstate/optim/_lr_scheduler.py +448 -448
  101. brainstate/optim/_lr_scheduler_test.py +50 -50
  102. brainstate/optim/_optax_optimizer.py +152 -152
  103. brainstate/optim/_optax_optimizer_test.py +53 -53
  104. brainstate/optim/_sgd_optimizer.py +1104 -1104
  105. brainstate/random/__init__.py +24 -24
  106. brainstate/random/_rand_funs.py +3616 -3616
  107. brainstate/random/_rand_funs_test.py +567 -567
  108. brainstate/random/_rand_seed.py +210 -210
  109. brainstate/random/_rand_seed_test.py +48 -48
  110. brainstate/random/_rand_state.py +1409 -1409
  111. brainstate/random/_random_for_unit.py +52 -52
  112. brainstate/surrogate.py +1957 -1957
  113. brainstate/transform.py +23 -23
  114. brainstate/typing.py +304 -304
  115. brainstate/util/__init__.py +50 -50
  116. brainstate/util/caller.py +98 -98
  117. brainstate/util/error.py +55 -55
  118. brainstate/util/filter.py +469 -469
  119. brainstate/util/others.py +540 -540
  120. brainstate/util/pretty_pytree.py +945 -945
  121. brainstate/util/pretty_pytree_test.py +159 -159
  122. brainstate/util/pretty_repr.py +328 -328
  123. brainstate/util/pretty_table.py +2954 -2954
  124. brainstate/util/scaling.py +258 -258
  125. brainstate/util/struct.py +523 -523
  126. {brainstate-0.1.8.dist-info → brainstate-0.1.9.dist-info}/METADATA +91 -99
  127. brainstate-0.1.9.dist-info/RECORD +130 -0
  128. {brainstate-0.1.8.dist-info → brainstate-0.1.9.dist-info}/WHEEL +1 -1
  129. {brainstate-0.1.8.dist-info → brainstate-0.1.9.dist-info/licenses}/LICENSE +202 -202
  130. brainstate/functional/_normalization.py +0 -81
  131. brainstate/functional/_spikes.py +0 -204
  132. brainstate-0.1.8.dist-info/RECORD +0 -132
  133. {brainstate-0.1.8.dist-info → brainstate-0.1.9.dist-info}/top_level.txt +0 -0
@@ -1,159 +1,159 @@
1
- # Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ==============================================================================
15
-
16
- import jax
17
- import jax.core
18
- import jax.interpreters.batching as batching
19
- import jax.interpreters.mlir as mlir
20
- import jax.numpy as jnp
21
-
22
- from brainstate._compatible_import import Primitive
23
- from brainstate._utils import set_module_as
24
-
25
- __all__ = [
26
- "unvmap",
27
- ]
28
-
29
-
30
- @set_module_as('brainstate.augment')
31
- def unvmap(x, op: str = 'any'):
32
- if op == 'all':
33
- return unvmap_all(x)
34
- elif op == 'any':
35
- return unvmap_any(x)
36
- elif op == 'none':
37
- return _without_vmap(x)
38
- elif op == 'max':
39
- return unvmap_max(x)
40
- else:
41
- raise ValueError(f'Do not support type: {op}')
42
-
43
-
44
- # unvmap_all
45
-
46
- unvmap_all_p = Primitive("unvmap_all")
47
-
48
-
49
- def unvmap_all(x):
50
- """As `jnp.all`, but ignores batch dimensions."""
51
- return unvmap_all_p.bind(x)
52
-
53
-
54
- def _unvmap_all_impl(x):
55
- return jnp.all(x)
56
-
57
-
58
- def _unvmap_all_abstract_eval(x):
59
- return jax.core.ShapedArray(shape=(), dtype=jax.numpy.bool_.dtype) # pyright: ignore
60
-
61
-
62
- def _unvmap_all_batch(x, batch_axes):
63
- (x,) = x
64
- return unvmap_all(x), batching.not_mapped
65
-
66
-
67
- unvmap_all_p.def_impl(_unvmap_all_impl)
68
- unvmap_all_p.def_abstract_eval(_unvmap_all_abstract_eval)
69
- batching.primitive_batchers[unvmap_all_p] = _unvmap_all_batch # pyright: ignore
70
- mlir.register_lowering(
71
- unvmap_all_p,
72
- mlir.lower_fun(_unvmap_all_impl, multiple_results=False),
73
- )
74
-
75
- # unvmap_any
76
-
77
- unvmap_any_p = Primitive("unvmap_any")
78
-
79
-
80
- def unvmap_any(x):
81
- """As `jnp.any`, but ignores batch dimensions."""
82
- return unvmap_any_p.bind(x)
83
-
84
-
85
- def _unvmap_any_impl(x):
86
- return jnp.any(x)
87
-
88
-
89
- def _unvmap_any_abstract_eval(x):
90
- return jax.core.ShapedArray(shape=(), dtype=jax.numpy.bool_.dtype) # pyright: ignore
91
-
92
-
93
- def _unvmap_any_batch(x, batch_axes):
94
- (x,) = x
95
- return unvmap_any(x), batching.not_mapped
96
-
97
-
98
- unvmap_any_p.def_impl(_unvmap_any_impl)
99
- unvmap_any_p.def_abstract_eval(_unvmap_any_abstract_eval)
100
- batching.primitive_batchers[unvmap_any_p] = _unvmap_any_batch # pyright: ignore
101
- mlir.register_lowering(
102
- unvmap_any_p,
103
- mlir.lower_fun(_unvmap_any_impl, multiple_results=False),
104
- )
105
-
106
- # unvmap_max
107
-
108
- unvmap_max_p = Primitive("unvmap_max")
109
-
110
-
111
- def unvmap_max(x):
112
- """As `jnp.max`, but ignores batch dimensions."""
113
- return unvmap_max_p.bind(x)
114
-
115
-
116
- def _unvmap_max_impl(x):
117
- return jnp.max(x)
118
-
119
-
120
- def _unvmap_max_abstract_eval(x):
121
- return jax.core.ShapedArray(shape=(), dtype=x.dtype)
122
-
123
-
124
- def _unvmap_max_batch(x, batch_axes):
125
- (x,) = x
126
- return unvmap_max(x), batching.not_mapped
127
-
128
-
129
- unvmap_max_p.def_impl(_unvmap_max_impl)
130
- unvmap_max_p.def_abstract_eval(_unvmap_max_abstract_eval)
131
- batching.primitive_batchers[unvmap_max_p] = _unvmap_max_batch # pyright: ignore
132
- mlir.register_lowering(
133
- unvmap_max_p,
134
- mlir.lower_fun(_unvmap_max_impl, multiple_results=False),
135
- )
136
-
137
-
138
- def _without_vmap(x):
139
- return _no_vmap_prim.bind(x)
140
-
141
-
142
- def _without_vmap_imp(x):
143
- return x
144
-
145
-
146
- def _without_vmap_abs(x):
147
- return x
148
-
149
-
150
- def _without_vmap_batch(x, batch_axes):
151
- (x,) = x
152
- return _without_vmap(x), batching.not_mapped
153
-
154
-
155
- _no_vmap_prim = Primitive('no_vmap')
156
- _no_vmap_prim.def_impl(_without_vmap_imp)
157
- _no_vmap_prim.def_abstract_eval(_without_vmap_abs)
158
- batching.primitive_batchers[_no_vmap_prim] = _without_vmap_batch
159
- mlir.register_lowering(_no_vmap_prim, mlir.lower_fun(_without_vmap_imp, multiple_results=False))
1
+ # Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ import jax
17
+ import jax.core
18
+ import jax.interpreters.batching as batching
19
+ import jax.interpreters.mlir as mlir
20
+ import jax.numpy as jnp
21
+
22
+ from brainstate._compatible_import import Primitive
23
+ from brainstate._utils import set_module_as
24
+
25
+ __all__ = [
26
+ "unvmap",
27
+ ]
28
+
29
+
30
+ @set_module_as('brainstate.augment')
31
+ def unvmap(x, op: str = 'any'):
32
+ if op == 'all':
33
+ return unvmap_all(x)
34
+ elif op == 'any':
35
+ return unvmap_any(x)
36
+ elif op == 'none':
37
+ return _without_vmap(x)
38
+ elif op == 'max':
39
+ return unvmap_max(x)
40
+ else:
41
+ raise ValueError(f'Do not support type: {op}')
42
+
43
+
44
+ # unvmap_all
45
+
46
+ unvmap_all_p = Primitive("unvmap_all")
47
+
48
+
49
+ def unvmap_all(x):
50
+ """As `jnp.all`, but ignores batch dimensions."""
51
+ return unvmap_all_p.bind(x)
52
+
53
+
54
+ def _unvmap_all_impl(x):
55
+ return jnp.all(x)
56
+
57
+
58
+ def _unvmap_all_abstract_eval(x):
59
+ return jax.core.ShapedArray(shape=(), dtype=jax.numpy.bool_.dtype) # pyright: ignore
60
+
61
+
62
+ def _unvmap_all_batch(x, batch_axes):
63
+ (x,) = x
64
+ return unvmap_all(x), batching.not_mapped
65
+
66
+
67
+ unvmap_all_p.def_impl(_unvmap_all_impl)
68
+ unvmap_all_p.def_abstract_eval(_unvmap_all_abstract_eval)
69
+ batching.primitive_batchers[unvmap_all_p] = _unvmap_all_batch # pyright: ignore
70
+ mlir.register_lowering(
71
+ unvmap_all_p,
72
+ mlir.lower_fun(_unvmap_all_impl, multiple_results=False),
73
+ )
74
+
75
+ # unvmap_any
76
+
77
+ unvmap_any_p = Primitive("unvmap_any")
78
+
79
+
80
+ def unvmap_any(x):
81
+ """As `jnp.any`, but ignores batch dimensions."""
82
+ return unvmap_any_p.bind(x)
83
+
84
+
85
+ def _unvmap_any_impl(x):
86
+ return jnp.any(x)
87
+
88
+
89
+ def _unvmap_any_abstract_eval(x):
90
+ return jax.core.ShapedArray(shape=(), dtype=jax.numpy.bool_.dtype) # pyright: ignore
91
+
92
+
93
+ def _unvmap_any_batch(x, batch_axes):
94
+ (x,) = x
95
+ return unvmap_any(x), batching.not_mapped
96
+
97
+
98
+ unvmap_any_p.def_impl(_unvmap_any_impl)
99
+ unvmap_any_p.def_abstract_eval(_unvmap_any_abstract_eval)
100
+ batching.primitive_batchers[unvmap_any_p] = _unvmap_any_batch # pyright: ignore
101
+ mlir.register_lowering(
102
+ unvmap_any_p,
103
+ mlir.lower_fun(_unvmap_any_impl, multiple_results=False),
104
+ )
105
+
106
+ # unvmap_max
107
+
108
+ unvmap_max_p = Primitive("unvmap_max")
109
+
110
+
111
+ def unvmap_max(x):
112
+ """As `jnp.max`, but ignores batch dimensions."""
113
+ return unvmap_max_p.bind(x)
114
+
115
+
116
+ def _unvmap_max_impl(x):
117
+ return jnp.max(x)
118
+
119
+
120
+ def _unvmap_max_abstract_eval(x):
121
+ return jax.core.ShapedArray(shape=(), dtype=x.dtype)
122
+
123
+
124
+ def _unvmap_max_batch(x, batch_axes):
125
+ (x,) = x
126
+ return unvmap_max(x), batching.not_mapped
127
+
128
+
129
+ unvmap_max_p.def_impl(_unvmap_max_impl)
130
+ unvmap_max_p.def_abstract_eval(_unvmap_max_abstract_eval)
131
+ batching.primitive_batchers[unvmap_max_p] = _unvmap_max_batch # pyright: ignore
132
+ mlir.register_lowering(
133
+ unvmap_max_p,
134
+ mlir.lower_fun(_unvmap_max_impl, multiple_results=False),
135
+ )
136
+
137
+
138
+ def _without_vmap(x):
139
+ return _no_vmap_prim.bind(x)
140
+
141
+
142
+ def _without_vmap_imp(x):
143
+ return x
144
+
145
+
146
+ def _without_vmap_abs(x):
147
+ return x
148
+
149
+
150
+ def _without_vmap_batch(x, batch_axes):
151
+ (x,) = x
152
+ return _without_vmap(x), batching.not_mapped
153
+
154
+
155
+ _no_vmap_prim = Primitive('no_vmap')
156
+ _no_vmap_prim.def_impl(_without_vmap_imp)
157
+ _no_vmap_prim.def_abstract_eval(_without_vmap_abs)
158
+ batching.primitive_batchers[_no_vmap_prim] = _without_vmap_batch
159
+ mlir.register_lowering(_no_vmap_prim, mlir.lower_fun(_without_vmap_imp, multiple_results=False))
@@ -1,147 +1,147 @@
1
- # Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ==============================================================================
15
-
16
- from functools import wraps
17
- from typing import Sequence, Tuple
18
-
19
- from brainstate._state import StateTraceStack
20
- from brainstate.typing import PyTree
21
- from ._make_jaxpr import StatefulFunction
22
-
23
-
24
- def write_back_state_values(
25
- state_trace: StateTraceStack,
26
- read_state_vals: Sequence[PyTree],
27
- write_state_vals: Sequence[PyTree],
28
- ):
29
- assert len(state_trace.states) == len(state_trace.been_writen) == len(read_state_vals) == len(write_state_vals)
30
- for st, write, val_r, val_w in zip(state_trace.states, state_trace.been_writen, read_state_vals, write_state_vals):
31
- if write:
32
- st.value = val_w
33
- else:
34
- st.restore_value(val_r)
35
-
36
-
37
- def wrap_single_fun_in_multi_branches(
38
- stateful_fun: StatefulFunction,
39
- merged_state_trace: StateTraceStack,
40
- read_state_vals: Sequence[PyTree | None],
41
- return_states: bool = True
42
- ):
43
- state_ids_belong_to_this_fun = {id(st): st for st in stateful_fun.get_states()}
44
-
45
- @wraps(stateful_fun.fun)
46
- def wrapped_branch(write_state_vals, *operands):
47
- # "write_state_vals" should have the same length as "merged_state_trace.states"
48
- assert len(merged_state_trace.states) == len(write_state_vals) == len(read_state_vals)
49
-
50
- # get all state values needed for this function, which is a subset of "write_state_vals"
51
- st_vals_for_this_fun = []
52
- for write, st, val_w, val_r in zip(merged_state_trace.been_writen,
53
- merged_state_trace.states,
54
- write_state_vals,
55
- read_state_vals):
56
- if id(st) in state_ids_belong_to_this_fun:
57
- st_vals_for_this_fun.append(val_w if write else val_r)
58
-
59
- # call this function
60
- new_state_vals, out = stateful_fun.jaxpr_call(st_vals_for_this_fun, *operands)
61
- assert len(new_state_vals) == len(st_vals_for_this_fun)
62
-
63
- if return_states:
64
- # get all written state values
65
- new_state_vals = {id(st): val for st, val in zip(stateful_fun.get_states(), new_state_vals)}
66
- write_state_vals = tuple([
67
- (new_state_vals[id(st)] if id(st) in state_ids_belong_to_this_fun else w_val)
68
- if write else None
69
- for write, st, w_val in zip(merged_state_trace.been_writen,
70
- merged_state_trace.states,
71
- write_state_vals)
72
- ])
73
- return write_state_vals, out
74
- return out
75
-
76
- return wrapped_branch
77
-
78
-
79
- def wrap_single_fun_in_multi_branches_while_loop(
80
- stateful_fun: StatefulFunction,
81
- merged_state_trace: StateTraceStack,
82
- read_state_vals: Sequence[PyTree | None],
83
- return_states: bool = True
84
- ):
85
- state_ids_belong_to_this_fun = {id(st): st for st in stateful_fun.get_states()}
86
-
87
- @wraps(stateful_fun.fun)
88
- def wrapped_branch(init_val):
89
- write_state_vals, init_val = init_val
90
- # "write_state_vals" should have the same length as "merged_state_trace.states"
91
- assert len(merged_state_trace.states) == len(write_state_vals) == len(read_state_vals)
92
-
93
- # get all state values needed for this function, which is a subset of "write_state_vals"
94
- st_vals_for_this_fun = []
95
- for write, st, val_w, val_r in zip(merged_state_trace.been_writen,
96
- merged_state_trace.states,
97
- write_state_vals,
98
- read_state_vals):
99
- if id(st) in state_ids_belong_to_this_fun:
100
- st_vals_for_this_fun.append(val_w if write else val_r)
101
-
102
- # call this function
103
- new_state_vals, out = stateful_fun.jaxpr_call(st_vals_for_this_fun, init_val)
104
- assert len(new_state_vals) == len(st_vals_for_this_fun)
105
-
106
- if return_states:
107
- # get all written state values
108
- new_state_vals = {id(st): val for st, val in zip(stateful_fun.get_states(), new_state_vals)}
109
- write_state_vals = tuple([
110
- (new_state_vals[id(st)] if id(st) in state_ids_belong_to_this_fun else w_val)
111
- if write else None
112
- for write, st, w_val in zip(merged_state_trace.been_writen,
113
- merged_state_trace.states,
114
- write_state_vals)
115
- ])
116
- return write_state_vals, out
117
- return out
118
-
119
- return wrapped_branch
120
-
121
-
122
- def wrap_single_fun(
123
- stateful_fun: StatefulFunction,
124
- been_writen: Tuple[bool],
125
- read_state_vals: Tuple[PyTree | None],
126
- ):
127
- @wraps(stateful_fun.fun)
128
- def wrapped_fun(new_carry, inputs):
129
- writen_state_vals, carry = new_carry
130
- assert len(been_writen) == len(writen_state_vals) == len(read_state_vals)
131
-
132
- # collect all written and read states
133
- state_vals = [
134
- written_val if written else read_val
135
- for written, written_val, read_val in zip(been_writen, writen_state_vals, read_state_vals)
136
- ]
137
-
138
- # call the jaxpr
139
- state_vals, (carry, out) = stateful_fun.jaxpr_call(state_vals, carry, inputs)
140
-
141
- # only return the written states
142
- writen_state_vals = tuple([val if written else None for written, val in zip(been_writen, state_vals)])
143
-
144
- # return
145
- return (writen_state_vals, carry), out
146
-
147
- return wrapped_fun
1
+ # Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ from functools import wraps
17
+ from typing import Sequence, Tuple
18
+
19
+ from brainstate._state import StateTraceStack
20
+ from brainstate.typing import PyTree
21
+ from ._make_jaxpr import StatefulFunction
22
+
23
+
24
+ def write_back_state_values(
25
+ state_trace: StateTraceStack,
26
+ read_state_vals: Sequence[PyTree],
27
+ write_state_vals: Sequence[PyTree],
28
+ ):
29
+ assert len(state_trace.states) == len(state_trace.been_writen) == len(read_state_vals) == len(write_state_vals)
30
+ for st, write, val_r, val_w in zip(state_trace.states, state_trace.been_writen, read_state_vals, write_state_vals):
31
+ if write:
32
+ st.value = val_w
33
+ else:
34
+ st.restore_value(val_r)
35
+
36
+
37
+ def wrap_single_fun_in_multi_branches(
38
+ stateful_fun: StatefulFunction,
39
+ merged_state_trace: StateTraceStack,
40
+ read_state_vals: Sequence[PyTree | None],
41
+ return_states: bool = True
42
+ ):
43
+ state_ids_belong_to_this_fun = {id(st): st for st in stateful_fun.get_states()}
44
+
45
+ @wraps(stateful_fun.fun)
46
+ def wrapped_branch(write_state_vals, *operands):
47
+ # "write_state_vals" should have the same length as "merged_state_trace.states"
48
+ assert len(merged_state_trace.states) == len(write_state_vals) == len(read_state_vals)
49
+
50
+ # get all state values needed for this function, which is a subset of "write_state_vals"
51
+ st_vals_for_this_fun = []
52
+ for write, st, val_w, val_r in zip(merged_state_trace.been_writen,
53
+ merged_state_trace.states,
54
+ write_state_vals,
55
+ read_state_vals):
56
+ if id(st) in state_ids_belong_to_this_fun:
57
+ st_vals_for_this_fun.append(val_w if write else val_r)
58
+
59
+ # call this function
60
+ new_state_vals, out = stateful_fun.jaxpr_call(st_vals_for_this_fun, *operands)
61
+ assert len(new_state_vals) == len(st_vals_for_this_fun)
62
+
63
+ if return_states:
64
+ # get all written state values
65
+ new_state_vals = {id(st): val for st, val in zip(stateful_fun.get_states(), new_state_vals)}
66
+ write_state_vals = tuple([
67
+ (new_state_vals[id(st)] if id(st) in state_ids_belong_to_this_fun else w_val)
68
+ if write else None
69
+ for write, st, w_val in zip(merged_state_trace.been_writen,
70
+ merged_state_trace.states,
71
+ write_state_vals)
72
+ ])
73
+ return write_state_vals, out
74
+ return out
75
+
76
+ return wrapped_branch
77
+
78
+
79
+ def wrap_single_fun_in_multi_branches_while_loop(
80
+ stateful_fun: StatefulFunction,
81
+ merged_state_trace: StateTraceStack,
82
+ read_state_vals: Sequence[PyTree | None],
83
+ return_states: bool = True
84
+ ):
85
+ state_ids_belong_to_this_fun = {id(st): st for st in stateful_fun.get_states()}
86
+
87
+ @wraps(stateful_fun.fun)
88
+ def wrapped_branch(init_val):
89
+ write_state_vals, init_val = init_val
90
+ # "write_state_vals" should have the same length as "merged_state_trace.states"
91
+ assert len(merged_state_trace.states) == len(write_state_vals) == len(read_state_vals)
92
+
93
+ # get all state values needed for this function, which is a subset of "write_state_vals"
94
+ st_vals_for_this_fun = []
95
+ for write, st, val_w, val_r in zip(merged_state_trace.been_writen,
96
+ merged_state_trace.states,
97
+ write_state_vals,
98
+ read_state_vals):
99
+ if id(st) in state_ids_belong_to_this_fun:
100
+ st_vals_for_this_fun.append(val_w if write else val_r)
101
+
102
+ # call this function
103
+ new_state_vals, out = stateful_fun.jaxpr_call(st_vals_for_this_fun, init_val)
104
+ assert len(new_state_vals) == len(st_vals_for_this_fun)
105
+
106
+ if return_states:
107
+ # get all written state values
108
+ new_state_vals = {id(st): val for st, val in zip(stateful_fun.get_states(), new_state_vals)}
109
+ write_state_vals = tuple([
110
+ (new_state_vals[id(st)] if id(st) in state_ids_belong_to_this_fun else w_val)
111
+ if write else None
112
+ for write, st, w_val in zip(merged_state_trace.been_writen,
113
+ merged_state_trace.states,
114
+ write_state_vals)
115
+ ])
116
+ return write_state_vals, out
117
+ return out
118
+
119
+ return wrapped_branch
120
+
121
+
122
+ def wrap_single_fun(
123
+ stateful_fun: StatefulFunction,
124
+ been_writen: Tuple[bool],
125
+ read_state_vals: Tuple[PyTree | None],
126
+ ):
127
+ @wraps(stateful_fun.fun)
128
+ def wrapped_fun(new_carry, inputs):
129
+ writen_state_vals, carry = new_carry
130
+ assert len(been_writen) == len(writen_state_vals) == len(read_state_vals)
131
+
132
+ # collect all written and read states
133
+ state_vals = [
134
+ written_val if written else read_val
135
+ for written, written_val, read_val in zip(been_writen, writen_state_vals, read_state_vals)
136
+ ]
137
+
138
+ # call the jaxpr
139
+ state_vals, (carry, out) = stateful_fun.jaxpr_call(state_vals, carry, inputs)
140
+
141
+ # only return the written states
142
+ writen_state_vals = tuple([val if written else None for written, val in zip(been_writen, state_vals)])
143
+
144
+ # return
145
+ return (writen_state_vals, carry), out
146
+
147
+ return wrapped_fun