brainstate 0.1.8__py2.py3-none-any.whl → 0.1.10__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (133) hide show
  1. brainstate/__init__.py +58 -51
  2. brainstate/_compatible_import.py +148 -148
  3. brainstate/_state.py +1605 -1663
  4. brainstate/_state_test.py +52 -52
  5. brainstate/_utils.py +47 -47
  6. brainstate/augment/__init__.py +30 -30
  7. brainstate/augment/_autograd.py +778 -778
  8. brainstate/augment/_autograd_test.py +1289 -1289
  9. brainstate/augment/_eval_shape.py +99 -99
  10. brainstate/augment/_eval_shape_test.py +38 -38
  11. brainstate/augment/_mapping.py +1060 -1060
  12. brainstate/augment/_mapping_test.py +597 -597
  13. brainstate/augment/_random.py +151 -151
  14. brainstate/compile/__init__.py +38 -38
  15. brainstate/compile/_ad_checkpoint.py +204 -204
  16. brainstate/compile/_ad_checkpoint_test.py +49 -49
  17. brainstate/compile/_conditions.py +256 -256
  18. brainstate/compile/_conditions_test.py +220 -220
  19. brainstate/compile/_error_if.py +92 -92
  20. brainstate/compile/_error_if_test.py +52 -52
  21. brainstate/compile/_jit.py +346 -346
  22. brainstate/compile/_jit_test.py +143 -143
  23. brainstate/compile/_loop_collect_return.py +536 -536
  24. brainstate/compile/_loop_collect_return_test.py +58 -58
  25. brainstate/compile/_loop_no_collection.py +184 -184
  26. brainstate/compile/_loop_no_collection_test.py +50 -50
  27. brainstate/compile/_make_jaxpr.py +888 -888
  28. brainstate/compile/_make_jaxpr_test.py +156 -156
  29. brainstate/compile/_progress_bar.py +202 -202
  30. brainstate/compile/_unvmap.py +159 -159
  31. brainstate/compile/_util.py +147 -147
  32. brainstate/environ.py +563 -563
  33. brainstate/environ_test.py +62 -62
  34. brainstate/functional/__init__.py +27 -26
  35. brainstate/graph/__init__.py +29 -29
  36. brainstate/graph/_graph_node.py +244 -244
  37. brainstate/graph/_graph_node_test.py +73 -73
  38. brainstate/graph/_graph_operation.py +1738 -1738
  39. brainstate/graph/_graph_operation_test.py +563 -563
  40. brainstate/init/__init__.py +26 -26
  41. brainstate/init/_base.py +52 -52
  42. brainstate/init/_generic.py +244 -244
  43. brainstate/init/_random_inits.py +553 -553
  44. brainstate/init/_random_inits_test.py +149 -149
  45. brainstate/init/_regular_inits.py +105 -105
  46. brainstate/init/_regular_inits_test.py +50 -50
  47. brainstate/mixin.py +365 -363
  48. brainstate/mixin_test.py +77 -73
  49. brainstate/nn/__init__.py +135 -131
  50. brainstate/{functional → nn}/_activations.py +808 -813
  51. brainstate/{functional → nn}/_activations_test.py +331 -331
  52. brainstate/nn/_collective_ops.py +514 -514
  53. brainstate/nn/_collective_ops_test.py +43 -43
  54. brainstate/nn/_common.py +178 -178
  55. brainstate/nn/_conv.py +501 -501
  56. brainstate/nn/_conv_test.py +238 -238
  57. brainstate/nn/_delay.py +588 -502
  58. brainstate/nn/_delay_test.py +238 -184
  59. brainstate/nn/_dropout.py +426 -426
  60. brainstate/nn/_dropout_test.py +100 -100
  61. brainstate/nn/_dynamics.py +1343 -1343
  62. brainstate/nn/_dynamics_test.py +78 -78
  63. brainstate/nn/_elementwise.py +1119 -1119
  64. brainstate/nn/_elementwise_test.py +169 -169
  65. brainstate/nn/_embedding.py +58 -58
  66. brainstate/nn/_exp_euler.py +92 -92
  67. brainstate/nn/_exp_euler_test.py +35 -35
  68. brainstate/nn/_fixedprob.py +239 -239
  69. brainstate/nn/_fixedprob_test.py +114 -114
  70. brainstate/nn/_inputs.py +608 -608
  71. brainstate/nn/_linear.py +424 -424
  72. brainstate/nn/_linear_mv.py +83 -83
  73. brainstate/nn/_linear_mv_test.py +120 -120
  74. brainstate/nn/_linear_test.py +107 -107
  75. brainstate/nn/_ltp.py +28 -28
  76. brainstate/nn/_module.py +377 -377
  77. brainstate/nn/_module_test.py +40 -40
  78. brainstate/nn/_neuron.py +705 -705
  79. brainstate/nn/_neuron_test.py +161 -161
  80. brainstate/nn/_normalizations.py +975 -918
  81. brainstate/nn/_normalizations_test.py +73 -73
  82. brainstate/{functional → nn}/_others.py +46 -46
  83. brainstate/nn/_poolings.py +1177 -1177
  84. brainstate/nn/_poolings_test.py +217 -217
  85. brainstate/nn/_projection.py +486 -486
  86. brainstate/nn/_rate_rnns.py +554 -554
  87. brainstate/nn/_rate_rnns_test.py +63 -63
  88. brainstate/nn/_readout.py +209 -209
  89. brainstate/nn/_readout_test.py +53 -53
  90. brainstate/nn/_stp.py +236 -236
  91. brainstate/nn/_synapse.py +505 -505
  92. brainstate/nn/_synapse_test.py +131 -131
  93. brainstate/nn/_synaptic_projection.py +423 -423
  94. brainstate/nn/_synouts.py +162 -162
  95. brainstate/nn/_synouts_test.py +57 -57
  96. brainstate/nn/_utils.py +89 -89
  97. brainstate/nn/metrics.py +388 -388
  98. brainstate/optim/__init__.py +38 -38
  99. brainstate/optim/_base.py +64 -64
  100. brainstate/optim/_lr_scheduler.py +448 -448
  101. brainstate/optim/_lr_scheduler_test.py +50 -50
  102. brainstate/optim/_optax_optimizer.py +152 -152
  103. brainstate/optim/_optax_optimizer_test.py +53 -53
  104. brainstate/optim/_sgd_optimizer.py +1104 -1104
  105. brainstate/random/__init__.py +24 -24
  106. brainstate/random/_rand_funs.py +3616 -3616
  107. brainstate/random/_rand_funs_test.py +567 -567
  108. brainstate/random/_rand_seed.py +210 -210
  109. brainstate/random/_rand_seed_test.py +48 -48
  110. brainstate/random/_rand_state.py +1409 -1409
  111. brainstate/random/_random_for_unit.py +52 -52
  112. brainstate/surrogate.py +1957 -1957
  113. brainstate/transform.py +23 -23
  114. brainstate/typing.py +304 -304
  115. brainstate/util/__init__.py +50 -50
  116. brainstate/util/caller.py +98 -98
  117. brainstate/util/error.py +55 -55
  118. brainstate/util/filter.py +469 -469
  119. brainstate/util/others.py +540 -540
  120. brainstate/util/pretty_pytree.py +945 -945
  121. brainstate/util/pretty_pytree_test.py +159 -159
  122. brainstate/util/pretty_repr.py +328 -328
  123. brainstate/util/pretty_table.py +2954 -2954
  124. brainstate/util/scaling.py +258 -258
  125. brainstate/util/struct.py +523 -523
  126. {brainstate-0.1.8.dist-info → brainstate-0.1.10.dist-info}/METADATA +91 -99
  127. brainstate-0.1.10.dist-info/RECORD +130 -0
  128. {brainstate-0.1.8.dist-info → brainstate-0.1.10.dist-info}/WHEEL +1 -1
  129. {brainstate-0.1.8.dist-info → brainstate-0.1.10.dist-info/licenses}/LICENSE +202 -202
  130. brainstate/functional/_normalization.py +0 -81
  131. brainstate/functional/_spikes.py +0 -204
  132. brainstate-0.1.8.dist-info/RECORD +0 -132
  133. {brainstate-0.1.8.dist-info → brainstate-0.1.10.dist-info}/top_level.txt +0 -0
@@ -1,553 +1,553 @@
1
- # Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ==============================================================================
15
-
16
- # -*- coding: utf-8 -*-
17
-
18
- import math
19
-
20
- import brainunit as u
21
- import jax.numpy as jnp
22
- import numpy as np
23
-
24
- from brainstate import environ, random
25
- from brainstate.typing import ArrayLike, SeedOrKey, DTypeLike
26
- from ._base import Initializer, to_size
27
-
28
- __all__ = [
29
- 'Normal',
30
- 'TruncatedNormal',
31
- 'Uniform',
32
- 'VarianceScaling',
33
- 'KaimingUniform',
34
- 'KaimingNormal',
35
- 'XavierUniform',
36
- 'XavierNormal',
37
- 'LecunUniform',
38
- 'LecunNormal',
39
- 'Orthogonal',
40
- 'DeltaOrthogonal',
41
- ]
42
-
43
-
44
- def calculate_gain(nonlinearity, param=None):
45
- r"""Return the recommended gain value for the given nonlinearity function.
46
- The values are as follows:
47
-
48
- ================= ====================================================
49
- nonlinearity gain
50
- ================= ====================================================
51
- Linear / Identity :math:`1`
52
- Conv{1,2,3}D :math:`1`
53
- Sigmoid :math:`1`
54
- Tanh :math:`\frac{5}{3}`
55
- ReLU :math:`\sqrt{2}`
56
- Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}`
57
- SELU :math:`\frac{3}{4}`
58
- ================= ====================================================
59
-
60
- .. warning::
61
- In order to implement `Self-Normalizing Neural Networks`_ ,
62
- you should use ``nonlinearity='linear'`` instead of ``nonlinearity='selu'``.
63
- This gives the initial weights a variance of ``1 / N``,
64
- which is necessary to induce a stable fixed point in the forward pass.
65
- In contrast, the default gain for ``SELU`` sacrifices the normalisation
66
- effect for more stable gradient flow in rectangular layers.
67
-
68
- Args:
69
- nonlinearity: the non-linear function (`nn.functional` name)
70
- param: optional parameter for the non-linear function
71
-
72
- .. _Self-Normalizing Neural Networks: https://papers.nips.cc/paper/2017/hash/5d44ee6f2c3f71b73125876103c8f6c4-Abstract.html
73
- """
74
- linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
75
- if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
76
- return 1
77
- elif nonlinearity == 'tanh':
78
- return 5.0 / 3
79
- elif nonlinearity == 'relu':
80
- return math.sqrt(2.0)
81
- elif nonlinearity == 'leaky_relu':
82
- if param is None:
83
- negative_slope = 0.01
84
- elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
85
- # True/False are instances of int, hence check above
86
- negative_slope = param
87
- else:
88
- raise ValueError("negative_slope {} not a valid number".format(param))
89
- return math.sqrt(2.0 / (1 + negative_slope ** 2))
90
- elif nonlinearity == 'selu':
91
- return 3.0 / 4
92
- else:
93
- raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
94
-
95
-
96
- def _format_shape(shape):
97
- if isinstance(shape, int):
98
- return (shape,)
99
- if len(shape) == 0:
100
- raise ValueError('Please provide shape.')
101
- if len(shape) == 1:
102
- if isinstance(shape[0], (tuple, list)):
103
- return shape[0]
104
- else:
105
- return shape
106
- else:
107
- return shape
108
-
109
-
110
- def _compute_fans(shape, in_axis=-2, out_axis=-1):
111
- receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis]
112
- fan_in = shape[in_axis] * receptive_field_size
113
- fan_out = shape[out_axis] * receptive_field_size
114
- return fan_in, fan_out
115
-
116
-
117
- class Normal(Initializer):
118
- """Initialize weights with normal distribution.
119
-
120
- Parameters
121
- ----------
122
- scale : float
123
- The gain of the derivation of the normal distribution.
124
-
125
- """
126
- __module__ = 'brainstate.init'
127
-
128
- def __init__(
129
- self,
130
- mean: ArrayLike = 0.,
131
- scale: ArrayLike = 1.,
132
- unit: u.Unit = u.UNITLESS,
133
- seed: SeedOrKey = None
134
- ):
135
- super().__init__()
136
- self.scale = scale
137
- self.mean = mean
138
- self.rng = random.default_rng(seed)
139
- self.unit = unit
140
-
141
- def __call__(self, shape, dtype: DTypeLike = None):
142
- shape = to_size(shape)
143
- dtype = dtype or environ.dftype()
144
- weights = self.rng.normal(size=shape, loc=self.mean, scale=self.scale, dtype=dtype)
145
- return u.maybe_decimal(u.Quantity(weights, unit=self.unit))
146
-
147
-
148
- class TruncatedNormal(Initializer):
149
- """Initialize weights with truncated normal distribution.
150
-
151
- Parameters
152
- ----------
153
- loc : float, ndarray
154
- Mean ("centre") of the distribution before truncating. Note that
155
- the mean of the truncated distribution will not be exactly equal
156
- to ``loc``.
157
- scale : float
158
- The standard deviation of the normal distribution before truncating.
159
- lower : float, ndarray
160
- A float or array of floats representing the lower bound for
161
- truncation. Must be broadcast-compatible with ``upper``.
162
- upper : float, ndarray
163
- A float or array of floats representing the upper bound for
164
- truncation. Must be broadcast-compatible with ``lower``.
165
-
166
- """
167
- __module__ = 'brainstate.init'
168
-
169
- def __init__(
170
- self,
171
- loc: ArrayLike = 0.,
172
- scale: ArrayLike = 1.,
173
- unit: u.Unit = u.UNITLESS,
174
- lower: ArrayLike = None,
175
- upper: ArrayLike = None,
176
- seed: SeedOrKey = None,
177
- ):
178
- super().__init__()
179
- assert scale > 0, '`scale` must be positive.'
180
- self.scale = scale
181
- self.loc = loc
182
- self.lower = lower
183
- self.upper = upper
184
- self.rng = random.default_rng(seed)
185
- self.unit = unit
186
-
187
- def __call__(self, shape, dtype: DTypeLike = None, ):
188
- dtype = dtype or environ.dftype()
189
- weights = self.rng.truncated_normal(
190
- size=shape,
191
- scale=self.scale,
192
- lower=self.lower,
193
- upper=self.upper,
194
- loc=self.loc,
195
- dtype=dtype
196
- )
197
- return u.maybe_decimal(u.Quantity(weights, unit=self.unit))
198
-
199
-
200
- class Gamma(Initializer):
201
- """Initialize weights with Gamma distribution.
202
-
203
- Parameters
204
- ----------
205
- shape: float, Array
206
- Shape parameter.
207
- scale: float, Array
208
- The gain of the derivation of the Gamma distribution.
209
-
210
- """
211
- __module__ = 'brainstate.init'
212
-
213
- def __init__(
214
- self,
215
- shape: ArrayLike,
216
- unit: u.Unit = u.UNITLESS,
217
- scale: ArrayLike = None,
218
- seed: SeedOrKey = None
219
- ):
220
- self.shape = shape
221
- self.scale = scale
222
- self.rng = random.default_rng(seed)
223
- self.unit = unit
224
-
225
- def __call__(self, shape, dtype: DTypeLike = None, ):
226
- shape = to_size(shape)
227
- dtype = dtype or environ.dftype()
228
- weights = self.rng.gamma(self.shape, scale=self.scale, size=shape, dtype=dtype)
229
- return u.maybe_decimal(u.Quantity(weights, unit=self.unit))
230
-
231
-
232
- class Exponential(Initializer):
233
- """Initialize weights with Gamma distribution.
234
-
235
- Parameters
236
- ----------
237
- scale: float, Array
238
- The gain of the derivation of the Exponential distribution.
239
-
240
- """
241
- __module__ = 'brainstate.init'
242
-
243
- def __init__(
244
- self,
245
- scale: ArrayLike = None,
246
- seed: SeedOrKey = None,
247
- unit: u.Unit = u.UNITLESS,
248
- ):
249
- self.scale = scale
250
- self.rng = random.default_rng(seed)
251
- self.unit = unit
252
-
253
- def __call__(self, shape, dtype: DTypeLike = None, ):
254
- shape = to_size(shape)
255
- dtype = dtype or environ.dftype()
256
- weights = self.rng.exponential(scale=self.scale, size=shape, dtype=dtype)
257
- return u.maybe_decimal(u.Quantity(weights, unit=self.unit))
258
-
259
-
260
- class Uniform(Initializer):
261
- """Initialize weights with uniform distribution.
262
-
263
- Parameters
264
- ----------
265
- min_val : float
266
- The lower limit of the uniform distribution.
267
- max_val : float
268
- The upper limit of the uniform distribution.
269
- """
270
- __module__ = 'brainstate.init'
271
-
272
- def __init__(
273
- self,
274
- min_val: ArrayLike = 0.,
275
- max_val: ArrayLike = 1.,
276
- seed: SeedOrKey = None,
277
- unit: u.Unit = u.UNITLESS,
278
- ):
279
- super(Uniform, self).__init__()
280
- self.min_val = min_val
281
- self.max_val = max_val
282
- self.rng = random.default_rng(seed)
283
- self.unit = unit
284
-
285
- def __call__(self, shape, dtype: DTypeLike = None, ):
286
- shape = to_size(shape)
287
- dtype = dtype or environ.dftype()
288
- weights = self.rng.uniform(low=self.min_val, high=self.max_val, size=shape, dtype=dtype)
289
- return u.maybe_decimal(u.Quantity(weights, unit=self.unit))
290
-
291
-
292
- class VarianceScaling(Initializer):
293
- __module__ = 'brainstate.init'
294
-
295
- def __init__(
296
- self,
297
- scale: ArrayLike,
298
- mode: str,
299
- distribution: str,
300
- in_axis: int = -2,
301
- out_axis: int = -1,
302
- seed: SeedOrKey = None,
303
- unit: u.Unit = u.UNITLESS,
304
- ):
305
- assert mode in ['fan_in', 'fan_out', 'fan_avg']
306
- assert distribution in ['truncated_normal', 'normal', 'uniform']
307
- self.scale = scale
308
- self.mode = mode
309
- self.in_axis = in_axis
310
- self.out_axis = out_axis
311
- self.distribution = distribution
312
- self.rng = random.default_rng(seed)
313
- self.unit = unit
314
-
315
- def __call__(self, shape, dtype: DTypeLike = None, ):
316
- shape = to_size(shape)
317
- dtype = dtype or environ.dftype()
318
- fan_in, fan_out = _compute_fans(shape, in_axis=self.in_axis, out_axis=self.out_axis)
319
- if self.mode == "fan_in":
320
- denominator = fan_in
321
- elif self.mode == "fan_out":
322
- denominator = fan_out
323
- elif self.mode == "fan_avg":
324
- denominator = (fan_in + fan_out) / 2
325
- else:
326
- raise ValueError("invalid mode for variance scaling initializer: {}".format(self.mode))
327
- variance = (self.scale / denominator).astype(dtype)
328
- if self.distribution == "truncated_normal":
329
- stddev = (jnp.sqrt(variance) / .87962566103423978).astype(dtype)
330
- res = self.rng.truncated_normal(-2, 2, shape, dtype=dtype) * stddev
331
- elif self.distribution == "normal":
332
- res = self.rng.randn(*shape, dtype=dtype) * jnp.sqrt(variance).astype(dtype)
333
- elif self.distribution == "uniform":
334
- res = (self.rng.uniform(low=-1, high=1, size=shape, dtype=dtype) *
335
- jnp.sqrt(3 * variance).astype(dtype))
336
- else:
337
- raise ValueError("invalid distribution for variance scaling initializer")
338
- return u.maybe_decimal(u.Quantity(res, unit=self.unit))
339
-
340
-
341
- class KaimingUniform(VarianceScaling):
342
- __module__ = 'brainstate.init'
343
-
344
- def __init__(
345
- self,
346
- scale: float = 2.0,
347
- mode: str = "fan_in",
348
- distribution: str = "uniform",
349
- in_axis: int = -2,
350
- out_axis: int = -1,
351
- seed: SeedOrKey = None,
352
- unit: u.Unit = u.UNITLESS,
353
- ):
354
- super().__init__(scale,
355
- mode,
356
- distribution,
357
- in_axis=in_axis,
358
- out_axis=out_axis,
359
- seed=seed,
360
- unit=unit)
361
-
362
-
363
- class KaimingNormal(VarianceScaling):
364
- __module__ = 'brainstate.init'
365
-
366
- def __init__(
367
- self,
368
- scale: float = 2.0,
369
- mode: str = "fan_in",
370
- distribution: str = "truncated_normal",
371
- in_axis: int = -2,
372
- out_axis: int = -1,
373
- seed: SeedOrKey = None,
374
- unit: u.Unit = u.UNITLESS,
375
- ):
376
- super().__init__(scale,
377
- mode,
378
- distribution,
379
- in_axis=in_axis,
380
- out_axis=out_axis,
381
- seed=seed,
382
- unit=unit)
383
-
384
-
385
- class XavierUniform(VarianceScaling):
386
- __module__ = 'brainstate.init'
387
-
388
- def __init__(
389
- self,
390
- scale: float = 1.0,
391
- mode: str = "fan_avg",
392
- distribution: str = "uniform",
393
- in_axis: int = -2,
394
- out_axis: int = -1,
395
- seed: SeedOrKey = None,
396
- unit: u.Unit = u.UNITLESS,
397
- ):
398
- super().__init__(scale,
399
- mode,
400
- distribution,
401
- in_axis=in_axis,
402
- out_axis=out_axis,
403
- seed=seed,
404
- unit=unit)
405
-
406
-
407
- class XavierNormal(VarianceScaling):
408
- __module__ = 'brainstate.init'
409
-
410
- def __init__(
411
- self,
412
- scale: float = 1.0,
413
- mode: str = "fan_avg",
414
- distribution: str = "truncated_normal",
415
- in_axis: int = -2,
416
- out_axis: int = -1,
417
- seed: SeedOrKey = None,
418
- unit: u.Unit = u.UNITLESS,
419
- ):
420
- super().__init__(scale,
421
- mode,
422
- distribution,
423
- in_axis=in_axis,
424
- out_axis=out_axis,
425
- seed=seed,
426
- unit=unit)
427
-
428
-
429
- class LecunUniform(VarianceScaling):
430
- __module__ = 'brainstate.init'
431
-
432
- def __init__(
433
- self,
434
- scale: float = 1.0,
435
- mode: str = "fan_in",
436
- distribution: str = "uniform",
437
- in_axis: int = -2,
438
- out_axis: int = -1,
439
- seed: SeedOrKey = None,
440
- unit: u.Unit = u.UNITLESS,
441
- ):
442
- super().__init__(scale,
443
- mode,
444
- distribution,
445
- in_axis=in_axis,
446
- out_axis=out_axis,
447
- seed=seed,
448
- unit=unit)
449
-
450
-
451
- class LecunNormal(VarianceScaling):
452
- __module__ = 'brainstate.init'
453
-
454
- def __init__(
455
- self,
456
- scale: float = 1.0,
457
- mode: str = "fan_in",
458
- distribution: str = "truncated_normal",
459
- in_axis: int = -2,
460
- out_axis: int = -1,
461
- seed: SeedOrKey = None,
462
- unit: u.Unit = u.UNITLESS,
463
- ):
464
- super().__init__(scale,
465
- mode,
466
- distribution,
467
- in_axis=in_axis,
468
- out_axis=out_axis,
469
- seed=seed,
470
- unit=unit)
471
-
472
-
473
- class Orthogonal(Initializer):
474
- """
475
- Construct an initializer for uniformly distributed orthogonal matrices.
476
-
477
- If the shape is not square, the matrix will have orthonormal rows or columns
478
- depending on which side is smaller.
479
- """
480
- __module__ = 'brainstate.init'
481
-
482
- def __init__(
483
- self,
484
- scale: ArrayLike = 1.,
485
- axis: int = -1,
486
- seed: SeedOrKey = None,
487
- unit: u.Unit = u.UNITLESS,
488
- ):
489
- super().__init__()
490
- self.scale = scale
491
- self.axis = axis
492
- self.rng = random.default_rng(seed)
493
- self.unit = unit
494
-
495
- def __call__(self, shape, dtype: DTypeLike = None, ):
496
- dtype = dtype or environ.dftype()
497
- shape = to_size(shape)
498
- n_rows = shape[self.axis]
499
- n_cols = np.prod(shape) // n_rows
500
- matrix_shape = (n_rows, n_cols) if n_rows > n_cols else (n_cols, n_rows)
501
- norm_dst = self.rng.normal(size=matrix_shape, dtype=dtype)
502
-
503
- q_mat, r_mat = jnp.linalg.qr(norm_dst)
504
- # Enforce Q is uniformly distributed
505
- q_mat *= jnp.sign(jnp.diag(r_mat))
506
- if n_rows < n_cols:
507
- q_mat = q_mat.T
508
- q_mat = jnp.reshape(q_mat, (n_rows,) + tuple(np.delete(shape, self.axis)))
509
- q_mat = jnp.moveaxis(q_mat, 0, self.axis)
510
- r = jnp.asarray(self.scale, dtype=dtype) * q_mat
511
- return u.maybe_decimal(u.Quantity(r, unit=self.unit))
512
-
513
-
514
- class DeltaOrthogonal(Initializer):
515
- """
516
- Construct an initializer for delta orthogonal kernels; see arXiv:1806.05393.
517
-
518
- The shape must be 3D, 4D or 5D.
519
- """
520
- __module__ = 'brainstate.init'
521
-
522
- def __init__(
523
- self,
524
- scale: ArrayLike = 1.0,
525
- axis: int = -1,
526
- seed: SeedOrKey = None,
527
- unit: u.Unit = u.UNITLESS,
528
- ):
529
- super().__init__()
530
- self.scale = scale
531
- self.axis = axis
532
- self.orghogonal = Orthogonal(scale=scale, axis=axis, seed=seed)
533
- self.unit = unit
534
-
535
- def __call__(self, shape, dtype: DTypeLike = None, ):
536
- shape = to_size(shape)
537
- dtype = dtype or environ.dftype()
538
- if len(shape) not in [3, 4, 5]:
539
- raise ValueError("Delta orthogonal initializer requires a 3D, 4D or 5D shape.")
540
- if shape[-1] < shape[-2]:
541
- raise ValueError("`fan_in` must be less or equal than `fan_out`. ")
542
- ortho_matrix = u.Quantity(self.orghogonal(shape[-2:]))
543
- W = u.Quantity(u.math.zeros(shape, dtype=dtype), unit=u.get_unit(ortho_matrix))
544
- if len(shape) == 3:
545
- k = shape[0]
546
- W = W.at[(k - 1) // 2].set(ortho_matrix)
547
- elif len(shape) == 4:
548
- k1, k2 = shape[:2]
549
- W = W.at[(k1 - 1) // 2, (k2 - 1) // 2].set(ortho_matrix)
550
- else:
551
- k1, k2, k3 = shape[:3]
552
- W = W.at[(k1 - 1) // 2, (k2 - 1) // 2, (k3 - 1) // 2].set(ortho_matrix)
553
- return u.maybe_decimal(u.Quantity(W.mantissa, unit=self.unit))
1
+ # Copyright 2024 BDP Ecosystem Limited. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ # -*- coding: utf-8 -*-
17
+
18
+ import math
19
+
20
+ import brainunit as u
21
+ import jax.numpy as jnp
22
+ import numpy as np
23
+
24
+ from brainstate import environ, random
25
+ from brainstate.typing import ArrayLike, SeedOrKey, DTypeLike
26
+ from ._base import Initializer, to_size
27
+
28
+ __all__ = [
29
+ 'Normal',
30
+ 'TruncatedNormal',
31
+ 'Uniform',
32
+ 'VarianceScaling',
33
+ 'KaimingUniform',
34
+ 'KaimingNormal',
35
+ 'XavierUniform',
36
+ 'XavierNormal',
37
+ 'LecunUniform',
38
+ 'LecunNormal',
39
+ 'Orthogonal',
40
+ 'DeltaOrthogonal',
41
+ ]
42
+
43
+
44
+ def calculate_gain(nonlinearity, param=None):
45
+ r"""Return the recommended gain value for the given nonlinearity function.
46
+ The values are as follows:
47
+
48
+ ================= ====================================================
49
+ nonlinearity gain
50
+ ================= ====================================================
51
+ Linear / Identity :math:`1`
52
+ Conv{1,2,3}D :math:`1`
53
+ Sigmoid :math:`1`
54
+ Tanh :math:`\frac{5}{3}`
55
+ ReLU :math:`\sqrt{2}`
56
+ Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}`
57
+ SELU :math:`\frac{3}{4}`
58
+ ================= ====================================================
59
+
60
+ .. warning::
61
+ In order to implement `Self-Normalizing Neural Networks`_ ,
62
+ you should use ``nonlinearity='linear'`` instead of ``nonlinearity='selu'``.
63
+ This gives the initial weights a variance of ``1 / N``,
64
+ which is necessary to induce a stable fixed point in the forward pass.
65
+ In contrast, the default gain for ``SELU`` sacrifices the normalisation
66
+ effect for more stable gradient flow in rectangular layers.
67
+
68
+ Args:
69
+ nonlinearity: the non-linear function (`nn.functional` name)
70
+ param: optional parameter for the non-linear function
71
+
72
+ .. _Self-Normalizing Neural Networks: https://papers.nips.cc/paper/2017/hash/5d44ee6f2c3f71b73125876103c8f6c4-Abstract.html
73
+ """
74
+ linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
75
+ if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
76
+ return 1
77
+ elif nonlinearity == 'tanh':
78
+ return 5.0 / 3
79
+ elif nonlinearity == 'relu':
80
+ return math.sqrt(2.0)
81
+ elif nonlinearity == 'leaky_relu':
82
+ if param is None:
83
+ negative_slope = 0.01
84
+ elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
85
+ # True/False are instances of int, hence check above
86
+ negative_slope = param
87
+ else:
88
+ raise ValueError("negative_slope {} not a valid number".format(param))
89
+ return math.sqrt(2.0 / (1 + negative_slope ** 2))
90
+ elif nonlinearity == 'selu':
91
+ return 3.0 / 4
92
+ else:
93
+ raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
94
+
95
+
96
+ def _format_shape(shape):
97
+ if isinstance(shape, int):
98
+ return (shape,)
99
+ if len(shape) == 0:
100
+ raise ValueError('Please provide shape.')
101
+ if len(shape) == 1:
102
+ if isinstance(shape[0], (tuple, list)):
103
+ return shape[0]
104
+ else:
105
+ return shape
106
+ else:
107
+ return shape
108
+
109
+
110
+ def _compute_fans(shape, in_axis=-2, out_axis=-1):
111
+ receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis]
112
+ fan_in = shape[in_axis] * receptive_field_size
113
+ fan_out = shape[out_axis] * receptive_field_size
114
+ return fan_in, fan_out
115
+
116
+
117
+ class Normal(Initializer):
118
+ """Initialize weights with normal distribution.
119
+
120
+ Parameters
121
+ ----------
122
+ scale : float
123
+ The gain of the derivation of the normal distribution.
124
+
125
+ """
126
+ __module__ = 'brainstate.init'
127
+
128
+ def __init__(
129
+ self,
130
+ mean: ArrayLike = 0.,
131
+ scale: ArrayLike = 1.,
132
+ unit: u.Unit = u.UNITLESS,
133
+ seed: SeedOrKey = None
134
+ ):
135
+ super().__init__()
136
+ self.scale = scale
137
+ self.mean = mean
138
+ self.rng = random.default_rng(seed)
139
+ self.unit = unit
140
+
141
+ def __call__(self, shape, dtype: DTypeLike = None):
142
+ shape = to_size(shape)
143
+ dtype = dtype or environ.dftype()
144
+ weights = self.rng.normal(size=shape, loc=self.mean, scale=self.scale, dtype=dtype)
145
+ return u.maybe_decimal(u.Quantity(weights, unit=self.unit))
146
+
147
+
148
+ class TruncatedNormal(Initializer):
149
+ """Initialize weights with truncated normal distribution.
150
+
151
+ Parameters
152
+ ----------
153
+ loc : float, ndarray
154
+ Mean ("centre") of the distribution before truncating. Note that
155
+ the mean of the truncated distribution will not be exactly equal
156
+ to ``loc``.
157
+ scale : float
158
+ The standard deviation of the normal distribution before truncating.
159
+ lower : float, ndarray
160
+ A float or array of floats representing the lower bound for
161
+ truncation. Must be broadcast-compatible with ``upper``.
162
+ upper : float, ndarray
163
+ A float or array of floats representing the upper bound for
164
+ truncation. Must be broadcast-compatible with ``lower``.
165
+
166
+ """
167
+ __module__ = 'brainstate.init'
168
+
169
+ def __init__(
170
+ self,
171
+ loc: ArrayLike = 0.,
172
+ scale: ArrayLike = 1.,
173
+ unit: u.Unit = u.UNITLESS,
174
+ lower: ArrayLike = None,
175
+ upper: ArrayLike = None,
176
+ seed: SeedOrKey = None,
177
+ ):
178
+ super().__init__()
179
+ assert scale > 0, '`scale` must be positive.'
180
+ self.scale = scale
181
+ self.loc = loc
182
+ self.lower = lower
183
+ self.upper = upper
184
+ self.rng = random.default_rng(seed)
185
+ self.unit = unit
186
+
187
+ def __call__(self, shape, dtype: DTypeLike = None, ):
188
+ dtype = dtype or environ.dftype()
189
+ weights = self.rng.truncated_normal(
190
+ size=shape,
191
+ scale=self.scale,
192
+ lower=self.lower,
193
+ upper=self.upper,
194
+ loc=self.loc,
195
+ dtype=dtype
196
+ )
197
+ return u.maybe_decimal(u.Quantity(weights, unit=self.unit))
198
+
199
+
200
+ class Gamma(Initializer):
201
+ """Initialize weights with Gamma distribution.
202
+
203
+ Parameters
204
+ ----------
205
+ shape: float, Array
206
+ Shape parameter.
207
+ scale: float, Array
208
+ The gain of the derivation of the Gamma distribution.
209
+
210
+ """
211
+ __module__ = 'brainstate.init'
212
+
213
+ def __init__(
214
+ self,
215
+ shape: ArrayLike,
216
+ unit: u.Unit = u.UNITLESS,
217
+ scale: ArrayLike = None,
218
+ seed: SeedOrKey = None
219
+ ):
220
+ self.shape = shape
221
+ self.scale = scale
222
+ self.rng = random.default_rng(seed)
223
+ self.unit = unit
224
+
225
+ def __call__(self, shape, dtype: DTypeLike = None, ):
226
+ shape = to_size(shape)
227
+ dtype = dtype or environ.dftype()
228
+ weights = self.rng.gamma(self.shape, scale=self.scale, size=shape, dtype=dtype)
229
+ return u.maybe_decimal(u.Quantity(weights, unit=self.unit))
230
+
231
+
232
+ class Exponential(Initializer):
233
+ """Initialize weights with Gamma distribution.
234
+
235
+ Parameters
236
+ ----------
237
+ scale: float, Array
238
+ The gain of the derivation of the Exponential distribution.
239
+
240
+ """
241
+ __module__ = 'brainstate.init'
242
+
243
+ def __init__(
244
+ self,
245
+ scale: ArrayLike = None,
246
+ seed: SeedOrKey = None,
247
+ unit: u.Unit = u.UNITLESS,
248
+ ):
249
+ self.scale = scale
250
+ self.rng = random.default_rng(seed)
251
+ self.unit = unit
252
+
253
+ def __call__(self, shape, dtype: DTypeLike = None, ):
254
+ shape = to_size(shape)
255
+ dtype = dtype or environ.dftype()
256
+ weights = self.rng.exponential(scale=self.scale, size=shape, dtype=dtype)
257
+ return u.maybe_decimal(u.Quantity(weights, unit=self.unit))
258
+
259
+
260
+ class Uniform(Initializer):
261
+ """Initialize weights with uniform distribution.
262
+
263
+ Parameters
264
+ ----------
265
+ min_val : float
266
+ The lower limit of the uniform distribution.
267
+ max_val : float
268
+ The upper limit of the uniform distribution.
269
+ """
270
+ __module__ = 'brainstate.init'
271
+
272
+ def __init__(
273
+ self,
274
+ min_val: ArrayLike = 0.,
275
+ max_val: ArrayLike = 1.,
276
+ seed: SeedOrKey = None,
277
+ unit: u.Unit = u.UNITLESS,
278
+ ):
279
+ super(Uniform, self).__init__()
280
+ self.min_val = min_val
281
+ self.max_val = max_val
282
+ self.rng = random.default_rng(seed)
283
+ self.unit = unit
284
+
285
+ def __call__(self, shape, dtype: DTypeLike = None, ):
286
+ shape = to_size(shape)
287
+ dtype = dtype or environ.dftype()
288
+ weights = self.rng.uniform(low=self.min_val, high=self.max_val, size=shape, dtype=dtype)
289
+ return u.maybe_decimal(u.Quantity(weights, unit=self.unit))
290
+
291
+
292
+ class VarianceScaling(Initializer):
293
+ __module__ = 'brainstate.init'
294
+
295
+ def __init__(
296
+ self,
297
+ scale: ArrayLike,
298
+ mode: str,
299
+ distribution: str,
300
+ in_axis: int = -2,
301
+ out_axis: int = -1,
302
+ seed: SeedOrKey = None,
303
+ unit: u.Unit = u.UNITLESS,
304
+ ):
305
+ assert mode in ['fan_in', 'fan_out', 'fan_avg']
306
+ assert distribution in ['truncated_normal', 'normal', 'uniform']
307
+ self.scale = scale
308
+ self.mode = mode
309
+ self.in_axis = in_axis
310
+ self.out_axis = out_axis
311
+ self.distribution = distribution
312
+ self.rng = random.default_rng(seed)
313
+ self.unit = unit
314
+
315
+ def __call__(self, shape, dtype: DTypeLike = None, ):
316
+ shape = to_size(shape)
317
+ dtype = dtype or environ.dftype()
318
+ fan_in, fan_out = _compute_fans(shape, in_axis=self.in_axis, out_axis=self.out_axis)
319
+ if self.mode == "fan_in":
320
+ denominator = fan_in
321
+ elif self.mode == "fan_out":
322
+ denominator = fan_out
323
+ elif self.mode == "fan_avg":
324
+ denominator = (fan_in + fan_out) / 2
325
+ else:
326
+ raise ValueError("invalid mode for variance scaling initializer: {}".format(self.mode))
327
+ variance = (self.scale / denominator).astype(dtype)
328
+ if self.distribution == "truncated_normal":
329
+ stddev = (jnp.sqrt(variance) / .87962566103423978).astype(dtype)
330
+ res = self.rng.truncated_normal(-2, 2, shape, dtype=dtype) * stddev
331
+ elif self.distribution == "normal":
332
+ res = self.rng.randn(*shape, dtype=dtype) * jnp.sqrt(variance).astype(dtype)
333
+ elif self.distribution == "uniform":
334
+ res = (self.rng.uniform(low=-1, high=1, size=shape, dtype=dtype) *
335
+ jnp.sqrt(3 * variance).astype(dtype))
336
+ else:
337
+ raise ValueError("invalid distribution for variance scaling initializer")
338
+ return u.maybe_decimal(u.Quantity(res, unit=self.unit))
339
+
340
+
341
+ class KaimingUniform(VarianceScaling):
342
+ __module__ = 'brainstate.init'
343
+
344
+ def __init__(
345
+ self,
346
+ scale: float = 2.0,
347
+ mode: str = "fan_in",
348
+ distribution: str = "uniform",
349
+ in_axis: int = -2,
350
+ out_axis: int = -1,
351
+ seed: SeedOrKey = None,
352
+ unit: u.Unit = u.UNITLESS,
353
+ ):
354
+ super().__init__(scale,
355
+ mode,
356
+ distribution,
357
+ in_axis=in_axis,
358
+ out_axis=out_axis,
359
+ seed=seed,
360
+ unit=unit)
361
+
362
+
363
+ class KaimingNormal(VarianceScaling):
364
+ __module__ = 'brainstate.init'
365
+
366
+ def __init__(
367
+ self,
368
+ scale: float = 2.0,
369
+ mode: str = "fan_in",
370
+ distribution: str = "truncated_normal",
371
+ in_axis: int = -2,
372
+ out_axis: int = -1,
373
+ seed: SeedOrKey = None,
374
+ unit: u.Unit = u.UNITLESS,
375
+ ):
376
+ super().__init__(scale,
377
+ mode,
378
+ distribution,
379
+ in_axis=in_axis,
380
+ out_axis=out_axis,
381
+ seed=seed,
382
+ unit=unit)
383
+
384
+
385
+ class XavierUniform(VarianceScaling):
386
+ __module__ = 'brainstate.init'
387
+
388
+ def __init__(
389
+ self,
390
+ scale: float = 1.0,
391
+ mode: str = "fan_avg",
392
+ distribution: str = "uniform",
393
+ in_axis: int = -2,
394
+ out_axis: int = -1,
395
+ seed: SeedOrKey = None,
396
+ unit: u.Unit = u.UNITLESS,
397
+ ):
398
+ super().__init__(scale,
399
+ mode,
400
+ distribution,
401
+ in_axis=in_axis,
402
+ out_axis=out_axis,
403
+ seed=seed,
404
+ unit=unit)
405
+
406
+
407
+ class XavierNormal(VarianceScaling):
408
+ __module__ = 'brainstate.init'
409
+
410
+ def __init__(
411
+ self,
412
+ scale: float = 1.0,
413
+ mode: str = "fan_avg",
414
+ distribution: str = "truncated_normal",
415
+ in_axis: int = -2,
416
+ out_axis: int = -1,
417
+ seed: SeedOrKey = None,
418
+ unit: u.Unit = u.UNITLESS,
419
+ ):
420
+ super().__init__(scale,
421
+ mode,
422
+ distribution,
423
+ in_axis=in_axis,
424
+ out_axis=out_axis,
425
+ seed=seed,
426
+ unit=unit)
427
+
428
+
429
+ class LecunUniform(VarianceScaling):
430
+ __module__ = 'brainstate.init'
431
+
432
+ def __init__(
433
+ self,
434
+ scale: float = 1.0,
435
+ mode: str = "fan_in",
436
+ distribution: str = "uniform",
437
+ in_axis: int = -2,
438
+ out_axis: int = -1,
439
+ seed: SeedOrKey = None,
440
+ unit: u.Unit = u.UNITLESS,
441
+ ):
442
+ super().__init__(scale,
443
+ mode,
444
+ distribution,
445
+ in_axis=in_axis,
446
+ out_axis=out_axis,
447
+ seed=seed,
448
+ unit=unit)
449
+
450
+
451
+ class LecunNormal(VarianceScaling):
452
+ __module__ = 'brainstate.init'
453
+
454
+ def __init__(
455
+ self,
456
+ scale: float = 1.0,
457
+ mode: str = "fan_in",
458
+ distribution: str = "truncated_normal",
459
+ in_axis: int = -2,
460
+ out_axis: int = -1,
461
+ seed: SeedOrKey = None,
462
+ unit: u.Unit = u.UNITLESS,
463
+ ):
464
+ super().__init__(scale,
465
+ mode,
466
+ distribution,
467
+ in_axis=in_axis,
468
+ out_axis=out_axis,
469
+ seed=seed,
470
+ unit=unit)
471
+
472
+
473
+ class Orthogonal(Initializer):
474
+ """
475
+ Construct an initializer for uniformly distributed orthogonal matrices.
476
+
477
+ If the shape is not square, the matrix will have orthonormal rows or columns
478
+ depending on which side is smaller.
479
+ """
480
+ __module__ = 'brainstate.init'
481
+
482
+ def __init__(
483
+ self,
484
+ scale: ArrayLike = 1.,
485
+ axis: int = -1,
486
+ seed: SeedOrKey = None,
487
+ unit: u.Unit = u.UNITLESS,
488
+ ):
489
+ super().__init__()
490
+ self.scale = scale
491
+ self.axis = axis
492
+ self.rng = random.default_rng(seed)
493
+ self.unit = unit
494
+
495
+ def __call__(self, shape, dtype: DTypeLike = None, ):
496
+ dtype = dtype or environ.dftype()
497
+ shape = to_size(shape)
498
+ n_rows = shape[self.axis]
499
+ n_cols = np.prod(shape) // n_rows
500
+ matrix_shape = (n_rows, n_cols) if n_rows > n_cols else (n_cols, n_rows)
501
+ norm_dst = self.rng.normal(size=matrix_shape, dtype=dtype)
502
+
503
+ q_mat, r_mat = jnp.linalg.qr(norm_dst)
504
+ # Enforce Q is uniformly distributed
505
+ q_mat *= jnp.sign(jnp.diag(r_mat))
506
+ if n_rows < n_cols:
507
+ q_mat = q_mat.T
508
+ q_mat = jnp.reshape(q_mat, (n_rows,) + tuple(np.delete(shape, self.axis)))
509
+ q_mat = jnp.moveaxis(q_mat, 0, self.axis)
510
+ r = jnp.asarray(self.scale, dtype=dtype) * q_mat
511
+ return u.maybe_decimal(u.Quantity(r, unit=self.unit))
512
+
513
+
514
+ class DeltaOrthogonal(Initializer):
515
+ """
516
+ Construct an initializer for delta orthogonal kernels; see arXiv:1806.05393.
517
+
518
+ The shape must be 3D, 4D or 5D.
519
+ """
520
+ __module__ = 'brainstate.init'
521
+
522
+ def __init__(
523
+ self,
524
+ scale: ArrayLike = 1.0,
525
+ axis: int = -1,
526
+ seed: SeedOrKey = None,
527
+ unit: u.Unit = u.UNITLESS,
528
+ ):
529
+ super().__init__()
530
+ self.scale = scale
531
+ self.axis = axis
532
+ self.orghogonal = Orthogonal(scale=scale, axis=axis, seed=seed)
533
+ self.unit = unit
534
+
535
+ def __call__(self, shape, dtype: DTypeLike = None, ):
536
+ shape = to_size(shape)
537
+ dtype = dtype or environ.dftype()
538
+ if len(shape) not in [3, 4, 5]:
539
+ raise ValueError("Delta orthogonal initializer requires a 3D, 4D or 5D shape.")
540
+ if shape[-1] < shape[-2]:
541
+ raise ValueError("`fan_in` must be less or equal than `fan_out`. ")
542
+ ortho_matrix = u.Quantity(self.orghogonal(shape[-2:]))
543
+ W = u.Quantity(u.math.zeros(shape, dtype=dtype), unit=u.get_unit(ortho_matrix))
544
+ if len(shape) == 3:
545
+ k = shape[0]
546
+ W = W.at[(k - 1) // 2].set(ortho_matrix)
547
+ elif len(shape) == 4:
548
+ k1, k2 = shape[:2]
549
+ W = W.at[(k1 - 1) // 2, (k2 - 1) // 2].set(ortho_matrix)
550
+ else:
551
+ k1, k2, k3 = shape[:3]
552
+ W = W.at[(k1 - 1) // 2, (k2 - 1) // 2, (k3 - 1) // 2].set(ortho_matrix)
553
+ return u.maybe_decimal(u.Quantity(W.mantissa, unit=self.unit))