brainstate 0.1.7__py2.py3-none-any.whl → 0.1.9__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brainstate/__init__.py +58 -51
- brainstate/_compatible_import.py +148 -148
- brainstate/_state.py +1605 -1663
- brainstate/_state_test.py +52 -52
- brainstate/_utils.py +47 -47
- brainstate/augment/__init__.py +30 -30
- brainstate/augment/_autograd.py +778 -778
- brainstate/augment/_autograd_test.py +1289 -1289
- brainstate/augment/_eval_shape.py +99 -99
- brainstate/augment/_eval_shape_test.py +38 -38
- brainstate/augment/_mapping.py +1060 -1060
- brainstate/augment/_mapping_test.py +597 -597
- brainstate/augment/_random.py +151 -151
- brainstate/compile/__init__.py +38 -38
- brainstate/compile/_ad_checkpoint.py +204 -204
- brainstate/compile/_ad_checkpoint_test.py +49 -49
- brainstate/compile/_conditions.py +256 -256
- brainstate/compile/_conditions_test.py +220 -220
- brainstate/compile/_error_if.py +92 -92
- brainstate/compile/_error_if_test.py +52 -52
- brainstate/compile/_jit.py +346 -346
- brainstate/compile/_jit_test.py +143 -143
- brainstate/compile/_loop_collect_return.py +536 -536
- brainstate/compile/_loop_collect_return_test.py +58 -58
- brainstate/compile/_loop_no_collection.py +184 -184
- brainstate/compile/_loop_no_collection_test.py +50 -50
- brainstate/compile/_make_jaxpr.py +888 -888
- brainstate/compile/_make_jaxpr_test.py +156 -146
- brainstate/compile/_progress_bar.py +202 -202
- brainstate/compile/_unvmap.py +159 -159
- brainstate/compile/_util.py +147 -147
- brainstate/environ.py +563 -563
- brainstate/environ_test.py +62 -62
- brainstate/functional/__init__.py +27 -26
- brainstate/graph/__init__.py +29 -29
- brainstate/graph/_graph_node.py +244 -244
- brainstate/graph/_graph_node_test.py +73 -73
- brainstate/graph/_graph_operation.py +1738 -1738
- brainstate/graph/_graph_operation_test.py +563 -563
- brainstate/init/__init__.py +26 -26
- brainstate/init/_base.py +52 -52
- brainstate/init/_generic.py +244 -244
- brainstate/init/_random_inits.py +553 -553
- brainstate/init/_random_inits_test.py +149 -149
- brainstate/init/_regular_inits.py +105 -105
- brainstate/init/_regular_inits_test.py +50 -50
- brainstate/mixin.py +365 -363
- brainstate/mixin_test.py +77 -73
- brainstate/nn/__init__.py +135 -131
- brainstate/{functional → nn}/_activations.py +808 -813
- brainstate/{functional → nn}/_activations_test.py +331 -331
- brainstate/nn/_collective_ops.py +514 -514
- brainstate/nn/_collective_ops_test.py +43 -43
- brainstate/nn/_common.py +178 -178
- brainstate/nn/_conv.py +501 -501
- brainstate/nn/_conv_test.py +238 -238
- brainstate/nn/_delay.py +509 -470
- brainstate/nn/_delay_test.py +238 -0
- brainstate/nn/_dropout.py +426 -426
- brainstate/nn/_dropout_test.py +100 -100
- brainstate/nn/_dynamics.py +1343 -1361
- brainstate/nn/_dynamics_test.py +78 -78
- brainstate/nn/_elementwise.py +1119 -1120
- brainstate/nn/_elementwise_test.py +169 -169
- brainstate/nn/_embedding.py +58 -58
- brainstate/nn/_exp_euler.py +92 -92
- brainstate/nn/_exp_euler_test.py +35 -35
- brainstate/nn/_fixedprob.py +239 -239
- brainstate/nn/_fixedprob_test.py +114 -114
- brainstate/nn/_inputs.py +608 -608
- brainstate/nn/_linear.py +424 -424
- brainstate/nn/_linear_mv.py +83 -83
- brainstate/nn/_linear_mv_test.py +120 -120
- brainstate/nn/_linear_test.py +107 -107
- brainstate/nn/_ltp.py +28 -28
- brainstate/nn/_module.py +377 -377
- brainstate/nn/_module_test.py +40 -208
- brainstate/nn/_neuron.py +705 -705
- brainstate/nn/_neuron_test.py +161 -161
- brainstate/nn/_normalizations.py +975 -918
- brainstate/nn/_normalizations_test.py +73 -73
- brainstate/{functional → nn}/_others.py +46 -46
- brainstate/nn/_poolings.py +1177 -1177
- brainstate/nn/_poolings_test.py +217 -217
- brainstate/nn/_projection.py +486 -486
- brainstate/nn/_rate_rnns.py +554 -554
- brainstate/nn/_rate_rnns_test.py +63 -63
- brainstate/nn/_readout.py +209 -209
- brainstate/nn/_readout_test.py +53 -53
- brainstate/nn/_stp.py +236 -236
- brainstate/nn/_synapse.py +505 -505
- brainstate/nn/_synapse_test.py +131 -131
- brainstate/nn/_synaptic_projection.py +423 -423
- brainstate/nn/_synouts.py +162 -162
- brainstate/nn/_synouts_test.py +57 -57
- brainstate/nn/_utils.py +89 -89
- brainstate/nn/metrics.py +388 -388
- brainstate/optim/__init__.py +38 -38
- brainstate/optim/_base.py +64 -64
- brainstate/optim/_lr_scheduler.py +448 -448
- brainstate/optim/_lr_scheduler_test.py +50 -50
- brainstate/optim/_optax_optimizer.py +152 -152
- brainstate/optim/_optax_optimizer_test.py +53 -53
- brainstate/optim/_sgd_optimizer.py +1104 -1104
- brainstate/random/__init__.py +24 -24
- brainstate/random/_rand_funs.py +3616 -3616
- brainstate/random/_rand_funs_test.py +567 -567
- brainstate/random/_rand_seed.py +210 -210
- brainstate/random/_rand_seed_test.py +48 -48
- brainstate/random/_rand_state.py +1409 -1409
- brainstate/random/_random_for_unit.py +52 -52
- brainstate/surrogate.py +1957 -1957
- brainstate/transform.py +23 -23
- brainstate/typing.py +304 -304
- brainstate/util/__init__.py +50 -50
- brainstate/util/caller.py +98 -98
- brainstate/util/error.py +55 -55
- brainstate/util/filter.py +469 -469
- brainstate/util/others.py +540 -540
- brainstate/util/pretty_pytree.py +945 -945
- brainstate/util/pretty_pytree_test.py +159 -159
- brainstate/util/pretty_repr.py +328 -328
- brainstate/util/pretty_table.py +2954 -2954
- brainstate/util/scaling.py +258 -258
- brainstate/util/struct.py +523 -523
- {brainstate-0.1.7.dist-info → brainstate-0.1.9.dist-info}/METADATA +91 -99
- brainstate-0.1.9.dist-info/RECORD +130 -0
- {brainstate-0.1.7.dist-info → brainstate-0.1.9.dist-info}/WHEEL +1 -1
- {brainstate-0.1.7.dist-info → brainstate-0.1.9.dist-info/licenses}/LICENSE +202 -202
- brainstate/functional/_normalization.py +0 -81
- brainstate/functional/_spikes.py +0 -204
- brainstate-0.1.7.dist-info/RECORD +0 -131
- {brainstate-0.1.7.dist-info → brainstate-0.1.9.dist-info}/top_level.txt +0 -0
brainstate/compile/_unvmap.py
CHANGED
@@ -1,159 +1,159 @@
|
|
1
|
-
# Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
# you may not use this file except in compliance with the License.
|
5
|
-
# You may obtain a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
# See the License for the specific language governing permissions and
|
13
|
-
# limitations under the License.
|
14
|
-
# ==============================================================================
|
15
|
-
|
16
|
-
import jax
|
17
|
-
import jax.core
|
18
|
-
import jax.interpreters.batching as batching
|
19
|
-
import jax.interpreters.mlir as mlir
|
20
|
-
import jax.numpy as jnp
|
21
|
-
|
22
|
-
from brainstate._compatible_import import Primitive
|
23
|
-
from brainstate._utils import set_module_as
|
24
|
-
|
25
|
-
__all__ = [
|
26
|
-
"unvmap",
|
27
|
-
]
|
28
|
-
|
29
|
-
|
30
|
-
@set_module_as('brainstate.augment')
|
31
|
-
def unvmap(x, op: str = 'any'):
|
32
|
-
if op == 'all':
|
33
|
-
return unvmap_all(x)
|
34
|
-
elif op == 'any':
|
35
|
-
return unvmap_any(x)
|
36
|
-
elif op == 'none':
|
37
|
-
return _without_vmap(x)
|
38
|
-
elif op == 'max':
|
39
|
-
return unvmap_max(x)
|
40
|
-
else:
|
41
|
-
raise ValueError(f'Do not support type: {op}')
|
42
|
-
|
43
|
-
|
44
|
-
# unvmap_all
|
45
|
-
|
46
|
-
unvmap_all_p = Primitive("unvmap_all")
|
47
|
-
|
48
|
-
|
49
|
-
def unvmap_all(x):
|
50
|
-
"""As `jnp.all`, but ignores batch dimensions."""
|
51
|
-
return unvmap_all_p.bind(x)
|
52
|
-
|
53
|
-
|
54
|
-
def _unvmap_all_impl(x):
|
55
|
-
return jnp.all(x)
|
56
|
-
|
57
|
-
|
58
|
-
def _unvmap_all_abstract_eval(x):
|
59
|
-
return jax.core.ShapedArray(shape=(), dtype=jax.numpy.bool_.dtype) # pyright: ignore
|
60
|
-
|
61
|
-
|
62
|
-
def _unvmap_all_batch(x, batch_axes):
|
63
|
-
(x,) = x
|
64
|
-
return unvmap_all(x), batching.not_mapped
|
65
|
-
|
66
|
-
|
67
|
-
unvmap_all_p.def_impl(_unvmap_all_impl)
|
68
|
-
unvmap_all_p.def_abstract_eval(_unvmap_all_abstract_eval)
|
69
|
-
batching.primitive_batchers[unvmap_all_p] = _unvmap_all_batch # pyright: ignore
|
70
|
-
mlir.register_lowering(
|
71
|
-
unvmap_all_p,
|
72
|
-
mlir.lower_fun(_unvmap_all_impl, multiple_results=False),
|
73
|
-
)
|
74
|
-
|
75
|
-
# unvmap_any
|
76
|
-
|
77
|
-
unvmap_any_p = Primitive("unvmap_any")
|
78
|
-
|
79
|
-
|
80
|
-
def unvmap_any(x):
|
81
|
-
"""As `jnp.any`, but ignores batch dimensions."""
|
82
|
-
return unvmap_any_p.bind(x)
|
83
|
-
|
84
|
-
|
85
|
-
def _unvmap_any_impl(x):
|
86
|
-
return jnp.any(x)
|
87
|
-
|
88
|
-
|
89
|
-
def _unvmap_any_abstract_eval(x):
|
90
|
-
return jax.core.ShapedArray(shape=(), dtype=jax.numpy.bool_.dtype) # pyright: ignore
|
91
|
-
|
92
|
-
|
93
|
-
def _unvmap_any_batch(x, batch_axes):
|
94
|
-
(x,) = x
|
95
|
-
return unvmap_any(x), batching.not_mapped
|
96
|
-
|
97
|
-
|
98
|
-
unvmap_any_p.def_impl(_unvmap_any_impl)
|
99
|
-
unvmap_any_p.def_abstract_eval(_unvmap_any_abstract_eval)
|
100
|
-
batching.primitive_batchers[unvmap_any_p] = _unvmap_any_batch # pyright: ignore
|
101
|
-
mlir.register_lowering(
|
102
|
-
unvmap_any_p,
|
103
|
-
mlir.lower_fun(_unvmap_any_impl, multiple_results=False),
|
104
|
-
)
|
105
|
-
|
106
|
-
# unvmap_max
|
107
|
-
|
108
|
-
unvmap_max_p = Primitive("unvmap_max")
|
109
|
-
|
110
|
-
|
111
|
-
def unvmap_max(x):
|
112
|
-
"""As `jnp.max`, but ignores batch dimensions."""
|
113
|
-
return unvmap_max_p.bind(x)
|
114
|
-
|
115
|
-
|
116
|
-
def _unvmap_max_impl(x):
|
117
|
-
return jnp.max(x)
|
118
|
-
|
119
|
-
|
120
|
-
def _unvmap_max_abstract_eval(x):
|
121
|
-
return jax.core.ShapedArray(shape=(), dtype=x.dtype)
|
122
|
-
|
123
|
-
|
124
|
-
def _unvmap_max_batch(x, batch_axes):
|
125
|
-
(x,) = x
|
126
|
-
return unvmap_max(x), batching.not_mapped
|
127
|
-
|
128
|
-
|
129
|
-
unvmap_max_p.def_impl(_unvmap_max_impl)
|
130
|
-
unvmap_max_p.def_abstract_eval(_unvmap_max_abstract_eval)
|
131
|
-
batching.primitive_batchers[unvmap_max_p] = _unvmap_max_batch # pyright: ignore
|
132
|
-
mlir.register_lowering(
|
133
|
-
unvmap_max_p,
|
134
|
-
mlir.lower_fun(_unvmap_max_impl, multiple_results=False),
|
135
|
-
)
|
136
|
-
|
137
|
-
|
138
|
-
def _without_vmap(x):
|
139
|
-
return _no_vmap_prim.bind(x)
|
140
|
-
|
141
|
-
|
142
|
-
def _without_vmap_imp(x):
|
143
|
-
return x
|
144
|
-
|
145
|
-
|
146
|
-
def _without_vmap_abs(x):
|
147
|
-
return x
|
148
|
-
|
149
|
-
|
150
|
-
def _without_vmap_batch(x, batch_axes):
|
151
|
-
(x,) = x
|
152
|
-
return _without_vmap(x), batching.not_mapped
|
153
|
-
|
154
|
-
|
155
|
-
_no_vmap_prim = Primitive('no_vmap')
|
156
|
-
_no_vmap_prim.def_impl(_without_vmap_imp)
|
157
|
-
_no_vmap_prim.def_abstract_eval(_without_vmap_abs)
|
158
|
-
batching.primitive_batchers[_no_vmap_prim] = _without_vmap_batch
|
159
|
-
mlir.register_lowering(_no_vmap_prim, mlir.lower_fun(_without_vmap_imp, multiple_results=False))
|
1
|
+
# Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
|
16
|
+
import jax
|
17
|
+
import jax.core
|
18
|
+
import jax.interpreters.batching as batching
|
19
|
+
import jax.interpreters.mlir as mlir
|
20
|
+
import jax.numpy as jnp
|
21
|
+
|
22
|
+
from brainstate._compatible_import import Primitive
|
23
|
+
from brainstate._utils import set_module_as
|
24
|
+
|
25
|
+
__all__ = [
|
26
|
+
"unvmap",
|
27
|
+
]
|
28
|
+
|
29
|
+
|
30
|
+
@set_module_as('brainstate.augment')
|
31
|
+
def unvmap(x, op: str = 'any'):
|
32
|
+
if op == 'all':
|
33
|
+
return unvmap_all(x)
|
34
|
+
elif op == 'any':
|
35
|
+
return unvmap_any(x)
|
36
|
+
elif op == 'none':
|
37
|
+
return _without_vmap(x)
|
38
|
+
elif op == 'max':
|
39
|
+
return unvmap_max(x)
|
40
|
+
else:
|
41
|
+
raise ValueError(f'Do not support type: {op}')
|
42
|
+
|
43
|
+
|
44
|
+
# unvmap_all
|
45
|
+
|
46
|
+
unvmap_all_p = Primitive("unvmap_all")
|
47
|
+
|
48
|
+
|
49
|
+
def unvmap_all(x):
|
50
|
+
"""As `jnp.all`, but ignores batch dimensions."""
|
51
|
+
return unvmap_all_p.bind(x)
|
52
|
+
|
53
|
+
|
54
|
+
def _unvmap_all_impl(x):
|
55
|
+
return jnp.all(x)
|
56
|
+
|
57
|
+
|
58
|
+
def _unvmap_all_abstract_eval(x):
|
59
|
+
return jax.core.ShapedArray(shape=(), dtype=jax.numpy.bool_.dtype) # pyright: ignore
|
60
|
+
|
61
|
+
|
62
|
+
def _unvmap_all_batch(x, batch_axes):
|
63
|
+
(x,) = x
|
64
|
+
return unvmap_all(x), batching.not_mapped
|
65
|
+
|
66
|
+
|
67
|
+
unvmap_all_p.def_impl(_unvmap_all_impl)
|
68
|
+
unvmap_all_p.def_abstract_eval(_unvmap_all_abstract_eval)
|
69
|
+
batching.primitive_batchers[unvmap_all_p] = _unvmap_all_batch # pyright: ignore
|
70
|
+
mlir.register_lowering(
|
71
|
+
unvmap_all_p,
|
72
|
+
mlir.lower_fun(_unvmap_all_impl, multiple_results=False),
|
73
|
+
)
|
74
|
+
|
75
|
+
# unvmap_any
|
76
|
+
|
77
|
+
unvmap_any_p = Primitive("unvmap_any")
|
78
|
+
|
79
|
+
|
80
|
+
def unvmap_any(x):
|
81
|
+
"""As `jnp.any`, but ignores batch dimensions."""
|
82
|
+
return unvmap_any_p.bind(x)
|
83
|
+
|
84
|
+
|
85
|
+
def _unvmap_any_impl(x):
|
86
|
+
return jnp.any(x)
|
87
|
+
|
88
|
+
|
89
|
+
def _unvmap_any_abstract_eval(x):
|
90
|
+
return jax.core.ShapedArray(shape=(), dtype=jax.numpy.bool_.dtype) # pyright: ignore
|
91
|
+
|
92
|
+
|
93
|
+
def _unvmap_any_batch(x, batch_axes):
|
94
|
+
(x,) = x
|
95
|
+
return unvmap_any(x), batching.not_mapped
|
96
|
+
|
97
|
+
|
98
|
+
unvmap_any_p.def_impl(_unvmap_any_impl)
|
99
|
+
unvmap_any_p.def_abstract_eval(_unvmap_any_abstract_eval)
|
100
|
+
batching.primitive_batchers[unvmap_any_p] = _unvmap_any_batch # pyright: ignore
|
101
|
+
mlir.register_lowering(
|
102
|
+
unvmap_any_p,
|
103
|
+
mlir.lower_fun(_unvmap_any_impl, multiple_results=False),
|
104
|
+
)
|
105
|
+
|
106
|
+
# unvmap_max
|
107
|
+
|
108
|
+
unvmap_max_p = Primitive("unvmap_max")
|
109
|
+
|
110
|
+
|
111
|
+
def unvmap_max(x):
|
112
|
+
"""As `jnp.max`, but ignores batch dimensions."""
|
113
|
+
return unvmap_max_p.bind(x)
|
114
|
+
|
115
|
+
|
116
|
+
def _unvmap_max_impl(x):
|
117
|
+
return jnp.max(x)
|
118
|
+
|
119
|
+
|
120
|
+
def _unvmap_max_abstract_eval(x):
|
121
|
+
return jax.core.ShapedArray(shape=(), dtype=x.dtype)
|
122
|
+
|
123
|
+
|
124
|
+
def _unvmap_max_batch(x, batch_axes):
|
125
|
+
(x,) = x
|
126
|
+
return unvmap_max(x), batching.not_mapped
|
127
|
+
|
128
|
+
|
129
|
+
unvmap_max_p.def_impl(_unvmap_max_impl)
|
130
|
+
unvmap_max_p.def_abstract_eval(_unvmap_max_abstract_eval)
|
131
|
+
batching.primitive_batchers[unvmap_max_p] = _unvmap_max_batch # pyright: ignore
|
132
|
+
mlir.register_lowering(
|
133
|
+
unvmap_max_p,
|
134
|
+
mlir.lower_fun(_unvmap_max_impl, multiple_results=False),
|
135
|
+
)
|
136
|
+
|
137
|
+
|
138
|
+
def _without_vmap(x):
|
139
|
+
return _no_vmap_prim.bind(x)
|
140
|
+
|
141
|
+
|
142
|
+
def _without_vmap_imp(x):
|
143
|
+
return x
|
144
|
+
|
145
|
+
|
146
|
+
def _without_vmap_abs(x):
|
147
|
+
return x
|
148
|
+
|
149
|
+
|
150
|
+
def _without_vmap_batch(x, batch_axes):
|
151
|
+
(x,) = x
|
152
|
+
return _without_vmap(x), batching.not_mapped
|
153
|
+
|
154
|
+
|
155
|
+
_no_vmap_prim = Primitive('no_vmap')
|
156
|
+
_no_vmap_prim.def_impl(_without_vmap_imp)
|
157
|
+
_no_vmap_prim.def_abstract_eval(_without_vmap_abs)
|
158
|
+
batching.primitive_batchers[_no_vmap_prim] = _without_vmap_batch
|
159
|
+
mlir.register_lowering(_no_vmap_prim, mlir.lower_fun(_without_vmap_imp, multiple_results=False))
|
brainstate/compile/_util.py
CHANGED
@@ -1,147 +1,147 @@
|
|
1
|
-
# Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
# you may not use this file except in compliance with the License.
|
5
|
-
# You may obtain a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
# See the License for the specific language governing permissions and
|
13
|
-
# limitations under the License.
|
14
|
-
# ==============================================================================
|
15
|
-
|
16
|
-
from functools import wraps
|
17
|
-
from typing import Sequence, Tuple
|
18
|
-
|
19
|
-
from brainstate._state import StateTraceStack
|
20
|
-
from brainstate.typing import PyTree
|
21
|
-
from ._make_jaxpr import StatefulFunction
|
22
|
-
|
23
|
-
|
24
|
-
def write_back_state_values(
|
25
|
-
state_trace: StateTraceStack,
|
26
|
-
read_state_vals: Sequence[PyTree],
|
27
|
-
write_state_vals: Sequence[PyTree],
|
28
|
-
):
|
29
|
-
assert len(state_trace.states) == len(state_trace.been_writen) == len(read_state_vals) == len(write_state_vals)
|
30
|
-
for st, write, val_r, val_w in zip(state_trace.states, state_trace.been_writen, read_state_vals, write_state_vals):
|
31
|
-
if write:
|
32
|
-
st.value = val_w
|
33
|
-
else:
|
34
|
-
st.restore_value(val_r)
|
35
|
-
|
36
|
-
|
37
|
-
def wrap_single_fun_in_multi_branches(
|
38
|
-
stateful_fun: StatefulFunction,
|
39
|
-
merged_state_trace: StateTraceStack,
|
40
|
-
read_state_vals: Sequence[PyTree | None],
|
41
|
-
return_states: bool = True
|
42
|
-
):
|
43
|
-
state_ids_belong_to_this_fun = {id(st): st for st in stateful_fun.get_states()}
|
44
|
-
|
45
|
-
@wraps(stateful_fun.fun)
|
46
|
-
def wrapped_branch(write_state_vals, *operands):
|
47
|
-
# "write_state_vals" should have the same length as "merged_state_trace.states"
|
48
|
-
assert len(merged_state_trace.states) == len(write_state_vals) == len(read_state_vals)
|
49
|
-
|
50
|
-
# get all state values needed for this function, which is a subset of "write_state_vals"
|
51
|
-
st_vals_for_this_fun = []
|
52
|
-
for write, st, val_w, val_r in zip(merged_state_trace.been_writen,
|
53
|
-
merged_state_trace.states,
|
54
|
-
write_state_vals,
|
55
|
-
read_state_vals):
|
56
|
-
if id(st) in state_ids_belong_to_this_fun:
|
57
|
-
st_vals_for_this_fun.append(val_w if write else val_r)
|
58
|
-
|
59
|
-
# call this function
|
60
|
-
new_state_vals, out = stateful_fun.jaxpr_call(st_vals_for_this_fun, *operands)
|
61
|
-
assert len(new_state_vals) == len(st_vals_for_this_fun)
|
62
|
-
|
63
|
-
if return_states:
|
64
|
-
# get all written state values
|
65
|
-
new_state_vals = {id(st): val for st, val in zip(stateful_fun.get_states(), new_state_vals)}
|
66
|
-
write_state_vals = tuple([
|
67
|
-
(new_state_vals[id(st)] if id(st) in state_ids_belong_to_this_fun else w_val)
|
68
|
-
if write else None
|
69
|
-
for write, st, w_val in zip(merged_state_trace.been_writen,
|
70
|
-
merged_state_trace.states,
|
71
|
-
write_state_vals)
|
72
|
-
])
|
73
|
-
return write_state_vals, out
|
74
|
-
return out
|
75
|
-
|
76
|
-
return wrapped_branch
|
77
|
-
|
78
|
-
|
79
|
-
def wrap_single_fun_in_multi_branches_while_loop(
|
80
|
-
stateful_fun: StatefulFunction,
|
81
|
-
merged_state_trace: StateTraceStack,
|
82
|
-
read_state_vals: Sequence[PyTree | None],
|
83
|
-
return_states: bool = True
|
84
|
-
):
|
85
|
-
state_ids_belong_to_this_fun = {id(st): st for st in stateful_fun.get_states()}
|
86
|
-
|
87
|
-
@wraps(stateful_fun.fun)
|
88
|
-
def wrapped_branch(init_val):
|
89
|
-
write_state_vals, init_val = init_val
|
90
|
-
# "write_state_vals" should have the same length as "merged_state_trace.states"
|
91
|
-
assert len(merged_state_trace.states) == len(write_state_vals) == len(read_state_vals)
|
92
|
-
|
93
|
-
# get all state values needed for this function, which is a subset of "write_state_vals"
|
94
|
-
st_vals_for_this_fun = []
|
95
|
-
for write, st, val_w, val_r in zip(merged_state_trace.been_writen,
|
96
|
-
merged_state_trace.states,
|
97
|
-
write_state_vals,
|
98
|
-
read_state_vals):
|
99
|
-
if id(st) in state_ids_belong_to_this_fun:
|
100
|
-
st_vals_for_this_fun.append(val_w if write else val_r)
|
101
|
-
|
102
|
-
# call this function
|
103
|
-
new_state_vals, out = stateful_fun.jaxpr_call(st_vals_for_this_fun, init_val)
|
104
|
-
assert len(new_state_vals) == len(st_vals_for_this_fun)
|
105
|
-
|
106
|
-
if return_states:
|
107
|
-
# get all written state values
|
108
|
-
new_state_vals = {id(st): val for st, val in zip(stateful_fun.get_states(), new_state_vals)}
|
109
|
-
write_state_vals = tuple([
|
110
|
-
(new_state_vals[id(st)] if id(st) in state_ids_belong_to_this_fun else w_val)
|
111
|
-
if write else None
|
112
|
-
for write, st, w_val in zip(merged_state_trace.been_writen,
|
113
|
-
merged_state_trace.states,
|
114
|
-
write_state_vals)
|
115
|
-
])
|
116
|
-
return write_state_vals, out
|
117
|
-
return out
|
118
|
-
|
119
|
-
return wrapped_branch
|
120
|
-
|
121
|
-
|
122
|
-
def wrap_single_fun(
|
123
|
-
stateful_fun: StatefulFunction,
|
124
|
-
been_writen: Tuple[bool],
|
125
|
-
read_state_vals: Tuple[PyTree | None],
|
126
|
-
):
|
127
|
-
@wraps(stateful_fun.fun)
|
128
|
-
def wrapped_fun(new_carry, inputs):
|
129
|
-
writen_state_vals, carry = new_carry
|
130
|
-
assert len(been_writen) == len(writen_state_vals) == len(read_state_vals)
|
131
|
-
|
132
|
-
# collect all written and read states
|
133
|
-
state_vals = [
|
134
|
-
written_val if written else read_val
|
135
|
-
for written, written_val, read_val in zip(been_writen, writen_state_vals, read_state_vals)
|
136
|
-
]
|
137
|
-
|
138
|
-
# call the jaxpr
|
139
|
-
state_vals, (carry, out) = stateful_fun.jaxpr_call(state_vals, carry, inputs)
|
140
|
-
|
141
|
-
# only return the written states
|
142
|
-
writen_state_vals = tuple([val if written else None for written, val in zip(been_writen, state_vals)])
|
143
|
-
|
144
|
-
# return
|
145
|
-
return (writen_state_vals, carry), out
|
146
|
-
|
147
|
-
return wrapped_fun
|
1
|
+
# Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
|
16
|
+
from functools import wraps
|
17
|
+
from typing import Sequence, Tuple
|
18
|
+
|
19
|
+
from brainstate._state import StateTraceStack
|
20
|
+
from brainstate.typing import PyTree
|
21
|
+
from ._make_jaxpr import StatefulFunction
|
22
|
+
|
23
|
+
|
24
|
+
def write_back_state_values(
|
25
|
+
state_trace: StateTraceStack,
|
26
|
+
read_state_vals: Sequence[PyTree],
|
27
|
+
write_state_vals: Sequence[PyTree],
|
28
|
+
):
|
29
|
+
assert len(state_trace.states) == len(state_trace.been_writen) == len(read_state_vals) == len(write_state_vals)
|
30
|
+
for st, write, val_r, val_w in zip(state_trace.states, state_trace.been_writen, read_state_vals, write_state_vals):
|
31
|
+
if write:
|
32
|
+
st.value = val_w
|
33
|
+
else:
|
34
|
+
st.restore_value(val_r)
|
35
|
+
|
36
|
+
|
37
|
+
def wrap_single_fun_in_multi_branches(
|
38
|
+
stateful_fun: StatefulFunction,
|
39
|
+
merged_state_trace: StateTraceStack,
|
40
|
+
read_state_vals: Sequence[PyTree | None],
|
41
|
+
return_states: bool = True
|
42
|
+
):
|
43
|
+
state_ids_belong_to_this_fun = {id(st): st for st in stateful_fun.get_states()}
|
44
|
+
|
45
|
+
@wraps(stateful_fun.fun)
|
46
|
+
def wrapped_branch(write_state_vals, *operands):
|
47
|
+
# "write_state_vals" should have the same length as "merged_state_trace.states"
|
48
|
+
assert len(merged_state_trace.states) == len(write_state_vals) == len(read_state_vals)
|
49
|
+
|
50
|
+
# get all state values needed for this function, which is a subset of "write_state_vals"
|
51
|
+
st_vals_for_this_fun = []
|
52
|
+
for write, st, val_w, val_r in zip(merged_state_trace.been_writen,
|
53
|
+
merged_state_trace.states,
|
54
|
+
write_state_vals,
|
55
|
+
read_state_vals):
|
56
|
+
if id(st) in state_ids_belong_to_this_fun:
|
57
|
+
st_vals_for_this_fun.append(val_w if write else val_r)
|
58
|
+
|
59
|
+
# call this function
|
60
|
+
new_state_vals, out = stateful_fun.jaxpr_call(st_vals_for_this_fun, *operands)
|
61
|
+
assert len(new_state_vals) == len(st_vals_for_this_fun)
|
62
|
+
|
63
|
+
if return_states:
|
64
|
+
# get all written state values
|
65
|
+
new_state_vals = {id(st): val for st, val in zip(stateful_fun.get_states(), new_state_vals)}
|
66
|
+
write_state_vals = tuple([
|
67
|
+
(new_state_vals[id(st)] if id(st) in state_ids_belong_to_this_fun else w_val)
|
68
|
+
if write else None
|
69
|
+
for write, st, w_val in zip(merged_state_trace.been_writen,
|
70
|
+
merged_state_trace.states,
|
71
|
+
write_state_vals)
|
72
|
+
])
|
73
|
+
return write_state_vals, out
|
74
|
+
return out
|
75
|
+
|
76
|
+
return wrapped_branch
|
77
|
+
|
78
|
+
|
79
|
+
def wrap_single_fun_in_multi_branches_while_loop(
|
80
|
+
stateful_fun: StatefulFunction,
|
81
|
+
merged_state_trace: StateTraceStack,
|
82
|
+
read_state_vals: Sequence[PyTree | None],
|
83
|
+
return_states: bool = True
|
84
|
+
):
|
85
|
+
state_ids_belong_to_this_fun = {id(st): st for st in stateful_fun.get_states()}
|
86
|
+
|
87
|
+
@wraps(stateful_fun.fun)
|
88
|
+
def wrapped_branch(init_val):
|
89
|
+
write_state_vals, init_val = init_val
|
90
|
+
# "write_state_vals" should have the same length as "merged_state_trace.states"
|
91
|
+
assert len(merged_state_trace.states) == len(write_state_vals) == len(read_state_vals)
|
92
|
+
|
93
|
+
# get all state values needed for this function, which is a subset of "write_state_vals"
|
94
|
+
st_vals_for_this_fun = []
|
95
|
+
for write, st, val_w, val_r in zip(merged_state_trace.been_writen,
|
96
|
+
merged_state_trace.states,
|
97
|
+
write_state_vals,
|
98
|
+
read_state_vals):
|
99
|
+
if id(st) in state_ids_belong_to_this_fun:
|
100
|
+
st_vals_for_this_fun.append(val_w if write else val_r)
|
101
|
+
|
102
|
+
# call this function
|
103
|
+
new_state_vals, out = stateful_fun.jaxpr_call(st_vals_for_this_fun, init_val)
|
104
|
+
assert len(new_state_vals) == len(st_vals_for_this_fun)
|
105
|
+
|
106
|
+
if return_states:
|
107
|
+
# get all written state values
|
108
|
+
new_state_vals = {id(st): val for st, val in zip(stateful_fun.get_states(), new_state_vals)}
|
109
|
+
write_state_vals = tuple([
|
110
|
+
(new_state_vals[id(st)] if id(st) in state_ids_belong_to_this_fun else w_val)
|
111
|
+
if write else None
|
112
|
+
for write, st, w_val in zip(merged_state_trace.been_writen,
|
113
|
+
merged_state_trace.states,
|
114
|
+
write_state_vals)
|
115
|
+
])
|
116
|
+
return write_state_vals, out
|
117
|
+
return out
|
118
|
+
|
119
|
+
return wrapped_branch
|
120
|
+
|
121
|
+
|
122
|
+
def wrap_single_fun(
|
123
|
+
stateful_fun: StatefulFunction,
|
124
|
+
been_writen: Tuple[bool],
|
125
|
+
read_state_vals: Tuple[PyTree | None],
|
126
|
+
):
|
127
|
+
@wraps(stateful_fun.fun)
|
128
|
+
def wrapped_fun(new_carry, inputs):
|
129
|
+
writen_state_vals, carry = new_carry
|
130
|
+
assert len(been_writen) == len(writen_state_vals) == len(read_state_vals)
|
131
|
+
|
132
|
+
# collect all written and read states
|
133
|
+
state_vals = [
|
134
|
+
written_val if written else read_val
|
135
|
+
for written, written_val, read_val in zip(been_writen, writen_state_vals, read_state_vals)
|
136
|
+
]
|
137
|
+
|
138
|
+
# call the jaxpr
|
139
|
+
state_vals, (carry, out) = stateful_fun.jaxpr_call(state_vals, carry, inputs)
|
140
|
+
|
141
|
+
# only return the written states
|
142
|
+
writen_state_vals = tuple([val if written else None for written, val in zip(been_writen, state_vals)])
|
143
|
+
|
144
|
+
# return
|
145
|
+
return (writen_state_vals, carry), out
|
146
|
+
|
147
|
+
return wrapped_fun
|