sinabs 3.0.4.dev25__py3-none-any.whl → 3.1.1.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. sinabs/activation/reset_mechanism.py +3 -3
  2. sinabs/activation/surrogate_gradient_fn.py +4 -4
  3. sinabs/backend/dynapcnn/__init__.py +5 -4
  4. sinabs/backend/dynapcnn/chip_factory.py +33 -61
  5. sinabs/backend/dynapcnn/chips/dynapcnn.py +182 -86
  6. sinabs/backend/dynapcnn/chips/speck2e.py +6 -5
  7. sinabs/backend/dynapcnn/chips/speck2f.py +6 -5
  8. sinabs/backend/dynapcnn/config_builder.py +39 -59
  9. sinabs/backend/dynapcnn/connectivity_specs.py +48 -0
  10. sinabs/backend/dynapcnn/discretize.py +91 -155
  11. sinabs/backend/dynapcnn/dvs_layer.py +59 -101
  12. sinabs/backend/dynapcnn/dynapcnn_layer.py +185 -119
  13. sinabs/backend/dynapcnn/dynapcnn_layer_utils.py +335 -0
  14. sinabs/backend/dynapcnn/dynapcnn_network.py +602 -325
  15. sinabs/backend/dynapcnn/dynapcnnnetwork_module.py +370 -0
  16. sinabs/backend/dynapcnn/exceptions.py +122 -3
  17. sinabs/backend/dynapcnn/io.py +51 -91
  18. sinabs/backend/dynapcnn/mapping.py +111 -75
  19. sinabs/backend/dynapcnn/nir_graph_extractor.py +877 -0
  20. sinabs/backend/dynapcnn/sinabs_edges_handler.py +1024 -0
  21. sinabs/backend/dynapcnn/utils.py +214 -459
  22. sinabs/backend/dynapcnn/weight_rescaling_methods.py +53 -0
  23. sinabs/conversion.py +2 -2
  24. sinabs/from_torch.py +23 -1
  25. sinabs/hooks.py +38 -41
  26. sinabs/layers/alif.py +16 -16
  27. sinabs/layers/crop2d.py +2 -2
  28. sinabs/layers/exp_leak.py +1 -1
  29. sinabs/layers/iaf.py +11 -11
  30. sinabs/layers/lif.py +9 -9
  31. sinabs/layers/neuromorphic_relu.py +9 -8
  32. sinabs/layers/pool2d.py +5 -5
  33. sinabs/layers/quantize.py +1 -1
  34. sinabs/layers/stateful_layer.py +10 -7
  35. sinabs/layers/to_spike.py +9 -9
  36. sinabs/network.py +14 -12
  37. sinabs/synopcounter.py +10 -7
  38. sinabs/utils.py +155 -7
  39. sinabs/validate_memory_speck.py +0 -5
  40. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dev1.dist-info}/METADATA +2 -1
  41. sinabs-3.1.1.dev1.dist-info/RECORD +65 -0
  42. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dev1.dist-info}/licenses/AUTHORS +1 -0
  43. sinabs-3.1.1.dev1.dist-info/pbr.json +1 -0
  44. sinabs-3.0.4.dev25.dist-info/RECORD +0 -59
  45. sinabs-3.0.4.dev25.dist-info/pbr.json +0 -1
  46. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dev1.dist-info}/WHEEL +0 -0
  47. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dev1.dist-info}/licenses/LICENSE +0 -0
  48. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dev1.dist-info}/top_level.txt +0 -0
@@ -1,9 +1,9 @@
1
1
  from abc import ABC, abstractmethod
2
- from typing import List
2
+ from typing import Dict, List
3
3
 
4
4
  import samna
5
5
 
6
- from .dvs_layer import DVSLayer
6
+ from .dynapcnn_layer import DynapcnnLayer
7
7
  from .mapping import LayerConstraints, get_valid_mapping
8
8
 
9
9
 
@@ -13,34 +13,35 @@ class ConfigBuilder(ABC):
13
13
  def get_samna_module(self):
14
14
  """Get the samna parent module that hosts all the appropriate sub-modules and classes.
15
15
 
16
- Returns
17
- -------
18
- samna module
16
+ Returns:
17
+ samna module
19
18
  """
20
19
 
21
20
  @classmethod
22
21
  @abstractmethod
23
22
  def get_default_config(cls):
24
23
  """
25
- Returns
26
- -------
27
- Returns the default configuration for the device type
24
+ Returns:
25
+ Default configuration for the device type
28
26
  """
29
27
 
30
28
  @classmethod
31
29
  @abstractmethod
32
- def build_config(cls, model: "DynapcnnNetwork", chip_layers: List[int]):
30
+ def build_config(
31
+ cls,
32
+ layers: Dict[int, DynapcnnLayer],
33
+ layer2core_map: Dict[int, int],
34
+ destination_map: Dict[int, List[int]],
35
+ ):
33
36
  """Build the configuration given a model.
34
37
 
35
- Parameters
36
- ----------
37
- model:
38
- The target model
39
- chip_layers:
40
- Chip layers where the given model layers are to be mapped.
38
+ Args:
39
+ layers (Dict): Keys are layer indices, values are DynapcnnLayer instances.
40
+ layer2core_map (Dict): Keys are layer indices, values are corresponding
41
+ cores on hardware. Needed to map the destinations.
42
+ destination_map (Dict): Indices of destination layers for `layer`.
41
43
 
42
- Returns
43
- -------
44
+ Returns:
44
45
  Samna Configuration object
45
46
  """
46
47
 
@@ -49,8 +50,7 @@ class ConfigBuilder(ABC):
49
50
  def get_constraints(cls) -> List[LayerConstraints]:
50
51
  """Returns the layer constraints of a the given device.
51
52
 
52
- Returns
53
- -------
53
+ Returns:
54
54
  List[LayerConstraints]
55
55
  """
56
56
 
@@ -60,43 +60,27 @@ class ConfigBuilder(ABC):
60
60
  """Enable the monitor for a given set of layers in the config object."""
61
61
 
62
62
  @classmethod
63
- def get_valid_mapping(cls, model: "DynapcnnNetwork") -> List[int]:
64
- """Find a valid set of layers for a given model.
65
-
66
- Parameters
67
- ----------
68
- model (DynapcnnNetwork):
69
- A model
70
-
71
- Returns
72
- -------
73
- List of core indices corresponding to each layer of the model:
74
- The index of the core on chip to which the i-th layer in the
75
- model is mapped is the value of the i-th entry in the list.
63
+ def map_layers_to_cores(cls, layers: Dict[int, DynapcnnLayer]) -> Dict[int, int]:
64
+ """Find a mapping from DynapcnnLayers onto on-chip cores
65
+
66
+ Args:
67
+ layers: Dict with layer indices as keys and DynapcnnLayer instances as values.
68
+
69
+ Returns:
70
+ Dict mapping layer indices (keys) to assigned core IDs (values).
76
71
  """
77
- mapping = get_valid_mapping(model, cls.get_constraints())
78
- # turn the mapping into a dict
79
- mapping = {m[0]: m[1] for m in mapping}
80
- # Check if there is a dvs layer in the model
81
- num_dynapcnn_cores = len(model.sequence)
82
- if isinstance(model.sequence[0], DVSLayer):
83
- num_dynapcnn_cores -= 1
84
- # apply the mapping
85
- chip_layers_ordering = [mapping[i] for i in range(num_dynapcnn_cores)]
86
- return chip_layers_ordering
72
+
73
+ return get_valid_mapping(layers, cls.get_constraints())
87
74
 
88
75
  @classmethod
89
76
  def validate_configuration(cls, config) -> bool:
90
77
  """Check if a given configuration is valid.
91
78
 
92
- Parameters
93
- ----------
94
- config:
95
- Configuration object
79
+ Args:
80
+ config: Configuration object.
96
81
 
97
- Returns
98
- -------
99
- True if the configuration is valid, else false
82
+ Returns:
83
+ True if the configuration is valid, else false
100
84
  """
101
85
  is_valid, message = cls.get_samna_module().validate_configuration(config)
102
86
  if not is_valid:
@@ -127,22 +111,18 @@ class ConfigBuilder(ABC):
127
111
  def reset_states(cls, config, randomize=False):
128
112
  """Randomize or reset the neuron states.
129
113
 
130
- Parameters
131
- ----------
132
- randomize (bool):
133
- If true, the states will be set to random initial values. Else they will be set to zero
114
+ Args:
115
+ randomize (bool): If true, the states will be set to random initial values.
116
+ Else they will be set to zero
134
117
  """
135
118
 
136
119
  @classmethod
137
120
  def set_all_v_mem_to_zeros(cls, samna_device, layer_id: int) -> None:
138
121
  """Reset all memory states to zeros.
139
122
 
140
- Parameters
141
- ----------
142
- samna_device:
143
- samna device object to erase vmem memory.
144
- layer_id:
145
- layer index
123
+ Args:
124
+ samna_device: samna device object to erase vmem memory.
125
+ layer_id: layer index
146
126
  """
147
127
  mod = cls.get_samna_module()
148
128
  layer_constraint: LayerConstraints = cls.get_constraints()[layer_id]
@@ -0,0 +1,48 @@
1
+ """
2
+ functionality : list device-independent supported connections between layers on chip
3
+ """
4
+
5
+ from typing import Union
6
+
7
+ import torch.nn as nn
8
+
9
+ import sinabs.layers as sl
10
+
11
+ from .dvs_layer import DVSLayer
12
+
13
+ Pooling = (sl.SumPool2d, nn.AvgPool2d)
14
+ Weight = (nn.Conv2d, nn.Linear)
15
+ Neuron = (sl.IAFSqueeze,)
16
+ DVS = (DVSLayer,)
17
+ SupportedNodeTypes = (*Pooling, *Weight, *Neuron, *DVS)
18
+
19
+ VALID_SINABS_EDGE_TYPES_ABSTRACT = {
20
+ # convoluion is always followed by a neuron layer.
21
+ (Weight, Neuron): "weight-neuron",
22
+ # Neuron layer can be followed by pooling
23
+ (Neuron, Pooling): "neuron-pooling",
24
+ # Pooling can be followed by another pooling (will be consolidated)
25
+ (Pooling, Pooling): "pooling-pooling",
26
+ # Neuron layer can be followed by weight layer of next core
27
+ (Neuron, Weight): "neuron-weight",
28
+ # Pooling can be followed by weight layer of next core
29
+ (Pooling, Weight): "pooling-weight",
30
+ # Dvs can be followed by weight layer of next core
31
+ (DVS, Weight): "dvs-weight",
32
+ # Dvs can be followed by pooling layer
33
+ (DVS, Pooling): "dvs-pooling",
34
+ }
35
+
36
+ # Unpack dict
37
+ VALID_SINABS_EDGE_TYPES = {
38
+ (source_type, target_type): name
39
+ for types, name in VALID_SINABS_EDGE_TYPES_ABSTRACT.items()
40
+ for source_type in types[0]
41
+ for target_type in types[1]
42
+ }
43
+
44
+ # Only `Merge` layers are allowed to join multiple inputs
45
+ LAYER_TYPES_WITH_MULTIPLE_INPUTS = (sl.Merge,)
46
+
47
+ # Neuron and pooling layers can have their output sent to multiple cores
48
+ LAYER_TYPES_WITH_MULTIPLE_OUTPUTS = (*Neuron, *Pooling, *DVS)
@@ -19,21 +19,14 @@ def discretize_conv_spike(
19
19
  This function takes a 2D convolutional and a spiking layer and returns a
20
20
  copy of each, with discretized weights, bias and threshold.
21
21
 
22
- Parameters
23
- ----------
24
- conv_lyr: nn.Conv2d
25
- Convolutional layer
26
- spike_lyr: sl.IAF
27
- Spiking layer
28
- to_int: bool
29
- Use integer types for discretized parameter
30
-
31
- Returns
32
- -------
33
- nn.Conv2d
34
- Discretized copy of convolutional layer
35
- sl.IAF
36
- Discretized copy of spiking layer
22
+ Args:
23
+ conv_lyr (nn.Conv2d): Convolutional layer.
24
+ spike_lyr (sl.IAF): Spiking layer.
25
+ to_int (bool): Use integer types for discretized parameter.
26
+
27
+ Returns:
28
+ Tuple containing a discretized copy of convolutional layer and
29
+ a discretized copy of spiking layer.
37
30
  """
38
31
  conv_lyr_copy = deepcopy(conv_lyr)
39
32
  spike_lyr_copy = deepcopy(spike_lyr)
@@ -48,21 +41,14 @@ def discretize_conv_spike_(
48
41
  This function takes a 2D convolutional and a spiking layer and discretizes
49
42
  weights, bias and threshold in-place.
50
43
 
51
- Parameters
52
- ----------
53
- conv_lyr: nn.Conv2d
54
- Convolutional layer
55
- spike_lyr: sl.IAF
56
- Spiking layer
57
- to_int: bool
58
- Use integer types for discretized parameter
59
-
60
- Returns
61
- -------
62
- nn.Conv2d
63
- Discretized convolutional layer
64
- sl.IAF
65
- Discretized spiking layer
44
+ Args:
45
+ conv_lyr (nn.Conv2d): Convolutional layer.
46
+ spike_lyr (sl.IAF): Spiking layer.
47
+ to_int (bool): Use integer types for discretized parameter.
48
+
49
+ Returns:
50
+ A tuple containing a discretized convolutional layer and a
51
+ discretized spiking layer.
66
52
  """
67
53
 
68
54
  return _discretize_conv_spk_(conv_lyr, spike_lyr, to_int=to_int)
@@ -74,28 +60,20 @@ def discretize_conv(
74
60
  spk_thr_low: float,
75
61
  spk_state: Optional[torch.Tensor] = None,
76
62
  to_int: bool = True,
77
- ):
63
+ ) -> nn.Conv2d:
78
64
  """Discretize convolutional layer.
79
65
 
80
66
  This function takes a 2D convolutional layer and parameters of a subsequent
81
67
  spiking layer to return a discretized copy of the convolutional layer.
82
68
 
83
- Parameters
84
- ----------
85
- layer: nn.Conv2d
86
- Convolutional layer
87
- spk_thr: float
88
- Upper threshold of subsequent spiking layer
89
- spk_thr_low: float
90
- Lower threshold of subsequent spiking layer
91
- spk_state: torch.Tensor or None
92
- State of spiking layer.
93
- to_int: bool
94
- Use integer types for discretized parameter
95
-
96
- Returns
97
- -------
98
- nn.Conv2d
69
+ Args:
70
+ layer (nn.Conv2d): Convolutional layer.
71
+ spk_thr (float): Upper threshold of subsequent spiking layer.
72
+ spk_thr_low (float): Lower threshold of subsequent spiking layer.
73
+ spk_state (torch.Tensor): State of spiking layer. Defaults to None.
74
+ to_int (bool): Use integer types for discretized parameter. Defaults to True.
75
+
76
+ Returns:
99
77
  Discretized copy of convolutional layer
100
78
  """
101
79
  lyr_copy = deepcopy(layer)
@@ -115,28 +93,20 @@ def discretize_conv_(
115
93
  spk_thr_low: float,
116
94
  spk_state: Optional[torch.Tensor] = None,
117
95
  to_int: bool = True,
118
- ):
96
+ ) -> nn.Conv2d:
119
97
  """Discretize convolutional layer, in-place.
120
98
 
121
99
  This function discretizes a 2D convolutional layer in-place, based on
122
100
  parameters of a subsequent spiking layer.
123
101
 
124
- Parameters
125
- ----------
126
- layer: nn.Conv2d
127
- Convolutional layer
128
- spk_thr: float
129
- Upper threshold of subsequent spiking layer
130
- spk_thr_low: float
131
- Lower threshold of subsequent spiking layer
132
- spk_state: torch.Tensor or None
133
- State of spiking layer.
134
- to_int: bool
135
- Use integer types for discretized parameter
136
-
137
- Returns
138
- -------
139
- nn.Conv2d
102
+ Args:
103
+ layer (nn.Conv2d): Convolutional layer.
104
+ spk_thr (float): Upper threshold of subsequent spiking layer.
105
+ spk_thr_low (float): Lower threshold of subsequent spiking layer.
106
+ spk_state (torch.Tensor): State of spiking layer. Defaults to None.
107
+ to_int (bool): Use integer types for discretized parameter. Defaults to True.
108
+
109
+ Returns:
140
110
  Discretized convolutional layer
141
111
  """
142
112
  layer_discr, __ = _discretize_conv_spk_(
@@ -154,26 +124,20 @@ def discretize_spk(
154
124
  conv_weight: torch.Tensor,
155
125
  conv_bias: Optional[torch.Tensor] = None,
156
126
  to_int: bool = True,
157
- ):
127
+ ) -> sl.IAF:
158
128
  """Discretize spiking layer.
159
129
 
160
130
  This function takes a spiking layer and parameters of a preceding
161
131
  convolutional layer to return a discretized copy of the spiking layer.
162
132
 
163
- Parameters
164
- ----------
165
- layer: sl.IAF
166
- Spiking layer
167
- conv_weight: torch.Tensor
168
- Weight tensor of preceding convolutional layer
169
- conv_bias: torch.Tensor or None
170
- Bias of preceding convolutional layer
171
- to_int: bool
172
- Use integer types for discretized parameter
173
-
174
- Returns
175
- -------
176
- sl.IAF
133
+ Args:
134
+ layer (sl.IAF): Spiking layer.
135
+ conv_weight (torch.Tensor): Weight tensor of preceding convolutional layer.
136
+ conv_bias (torch.Tensor): Bias of preceding convolutional layer.
137
+ Optional argument, defaults to None.
138
+ to_int (bool): Use integer types for discretized parameter.
139
+
140
+ Returns:
177
141
  Discretized copy of spiking layer
178
142
  """
179
143
  lyr_copy = deepcopy(layer)
@@ -188,26 +152,20 @@ def discretize_spk_(
188
152
  conv_weight: torch.Tensor,
189
153
  conv_bias: Optional[torch.Tensor] = None,
190
154
  to_int: bool = True,
191
- ):
155
+ ) -> sl.IAF:
192
156
  """Discretize spiking layer in-place.
193
157
 
194
158
  This function discretizes a spiking layer in-place, based on parameters of a
195
159
  preceding convolutional layer.
196
160
 
197
- Parameters
198
- ----------
199
- layer: sl.IAF
200
- Spiking layer
201
- conv_weight: torch.Tensor
202
- Weight tensor of preceding convolutional layer
203
- conv_bias: torch.Tensor or None
204
- Bias of preceding convolutional layer
205
- to_int: bool
206
- Use integer types for discretized parameter
207
-
208
- Returns
209
- -------
210
- sl.IAF
161
+ Args:
162
+ layer (sl.IAF): Spiking layer.
163
+ conv_weight (torch.Tensor): Weight tensor of preceding convolutional layer.
164
+ conv_bias (torch.Tensor): Bias of preceding convolutional layer.
165
+ Optional argument, defaults to None.
166
+ to_int (bool): Use integer types for discretized parameter.
167
+
168
+ Returns:
211
169
  Discretized spiking
212
170
  """
213
171
  __, layer_discr = _discretize_conv_spk_(
@@ -225,7 +183,7 @@ def _discretize_conv_spk_(
225
183
  conv_weight: Optional[torch.Tensor] = None,
226
184
  conv_bias: Optional[torch.Tensor] = None,
227
185
  to_int: bool = True,
228
- ):
186
+ ) -> Tuple[nn.Conv2d, sl.IAF]:
229
187
  """Discretize convolutional and spiking layer.
230
188
 
231
189
  Determine and apply a suitable scaling factor for weight and bias of
@@ -234,34 +192,27 @@ def _discretize_conv_spk_(
234
192
  providing layers, respective parameters can be provided directly. If a layer
235
193
  is not provided, `None` will be returned instead of its discrete version.
236
194
 
237
- Parameters
238
- ----------
239
- conv_lyr: nn.Conv2d or None
240
- Convolutional layer
241
- spike_lyr: sl.IAF or None
242
- Spiking layer
243
- spk_thr: float or None
244
- Upper threshold of spiking layer. Has to be provided if `spike_lyr` is `None`.
195
+ Args:
196
+ conv_lyr (nn.Conv2d): Convolutional layer. Optional argument, defaults to None.
197
+ spike_lyr (sl.IAF): Spiking layer. Optional argument, defaults to None.
198
+ spk_thr (float): Upper threshold of spiking layer. Optional argument, defaults
199
+ to None. Has to be provided if `spike_lyr` is `None`.
245
200
  Is ignored otherwise.
246
- spk_thr_low: float or None
247
- Lower threshold of spiking layer. Has to be provided if `spike_lyr` is `None`.
201
+ spk_thr_low (float): Lower threshold of spiking layer. Optional argument,
202
+ defaults to None. Has to be provided if `spike_lyr` is `None`.
248
203
  Is ignored otherwise.
249
- spk_state: torch.Tensor or None
250
- State of spiking layer. Ignored if `spike_lyr` is not `None`.
251
- conv_weight: torch.Tensor or None
252
- Weight of convolutional layer. Has to be provided if `conv_lyr` is `None`.
204
+ spk_state (torch.Tensor): State of spiking layer. Optional argument
205
+ defaults to None. Ignored if `spike_lyr` is not `None`.
206
+ conv_weight (torch.Tensor): Weight of convolutional layer. Optional argument,
207
+ defaults to None. Has to be provided if `conv_lyr` is `None`.
253
208
  Is ignored otherwise.
254
- conv_bias: torch.Tensor or None
255
- Bias of convolutional layer. Ignored if `conv_lyr` is not `None`.
256
- to_int: bool
257
- Use integer types for discretized parameters.
258
-
259
- Returns
260
- -------
261
- nn.Conv2d or None
262
- Discretized convolutional layer if `conv_lyr` is not `None`, else `None`
263
- sl.IAF or None
264
- Discretized spiking layer if `spk_lyr` is not `None`, else `None`
209
+ conv_bias (torch.Tensor): Bias of convolutional layer. Optional argument,
210
+ defaults to None. Ignored if `conv_lyr` is not `None`.
211
+ to_int (bool): Use integer types for discretized parameters.
212
+
213
+ Returns:
214
+ Discretized convolutional layer if `conv_lyr` is not `None`, else `None`.
215
+ and discretized spiking layer if `spk_lyr` is not `None`, else `None`
265
216
  """
266
217
 
267
218
  if conv_lyr is None:
@@ -360,17 +311,12 @@ def determine_discretization_scale(obj: torch.Tensor, bit_precision: int) -> flo
360
311
  Determine how much the values of a torch tensor can be scaled in order to fit
361
312
  the given precision
362
313
 
363
- Parameters
364
- ----------
365
- obj: torch.Tensor
366
- Tensor that is to be scaled
367
- bit_precision: int
368
- The precision in bits
369
-
370
- Returns
371
- -------
372
- float
373
- The scaling factor
314
+ Args:
315
+ obj (torch.Tensor): Tensor that is to be scaled.
316
+ bit_precision (int): The precision in bits.
317
+
318
+ Returns:
319
+ The scaling factor
374
320
  """
375
321
 
376
322
  # Discrete range
@@ -397,19 +343,14 @@ def discretize_tensor(
397
343
  ) -> torch.Tensor:
398
344
  """Scale a torch.Tensor and cast it to discrete integer values.
399
345
 
400
- Parameters
401
- ----------
402
- obj: torch.Tensor
403
- Tensor that is to be discretized
404
- scaling: float
405
- Scaling factor to be applied before discretization
406
- to_int: bool
407
- If False, round the values, but don't cast to Int. (Default True).
408
-
409
- Returns
410
- -------
411
- torch.Tensor
412
- Scaled and discretized copy of `obj`.
346
+ Args:
347
+ obj (torch.Tensor): Tensor that is to be discretized.
348
+ scaling (float): Scaling factor to be applied before discretization.
349
+ to_int (bool): If False, round the values, but don't cast to Int.
350
+ Defaults to True.
351
+
352
+ Returns:
353
+ Scaled and discretized copy of `obj`.
413
354
  """
414
355
 
415
356
  # Scale the values
@@ -427,17 +368,12 @@ def discretize_tensor(
427
368
  def discretize_scalar(obj: float, scaling: float) -> int:
428
369
  """Scale a float and cast it to discrete integer values.
429
370
 
430
- Parameters
431
- ----------
432
- obj: float
433
- Value that is to be discretized
434
- scaling: float
435
- Scaling factor to be applied before discretization
436
-
437
- Returns
438
- -------
439
- int
440
- Scaled and discretized copy of `obj`.
371
+ Args:
372
+ obj (float): Value that is to be discretized.
373
+ scaling (float): Scaling factor to be applied before discretization.
374
+
375
+ Returns:
376
+ Scaled and discretized copy of `obj`.
441
377
  """
442
378
 
443
379
  # Scale the values