sinabs 3.0.4.dev2__py3-none-any.whl → 3.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. sinabs/activation/reset_mechanism.py +3 -3
  2. sinabs/activation/surrogate_gradient_fn.py +4 -4
  3. sinabs/backend/dynapcnn/__init__.py +5 -4
  4. sinabs/backend/dynapcnn/chip_factory.py +33 -61
  5. sinabs/backend/dynapcnn/chips/dynapcnn.py +182 -86
  6. sinabs/backend/dynapcnn/chips/speck2e.py +6 -5
  7. sinabs/backend/dynapcnn/chips/speck2f.py +6 -5
  8. sinabs/backend/dynapcnn/config_builder.py +39 -59
  9. sinabs/backend/dynapcnn/connectivity_specs.py +48 -0
  10. sinabs/backend/dynapcnn/discretize.py +91 -156
  11. sinabs/backend/dynapcnn/dvs_layer.py +59 -101
  12. sinabs/backend/dynapcnn/dynapcnn_layer.py +185 -119
  13. sinabs/backend/dynapcnn/dynapcnn_layer_utils.py +335 -0
  14. sinabs/backend/dynapcnn/dynapcnn_network.py +602 -326
  15. sinabs/backend/dynapcnn/dynapcnnnetwork_module.py +370 -0
  16. sinabs/backend/dynapcnn/exceptions.py +122 -3
  17. sinabs/backend/dynapcnn/io.py +51 -91
  18. sinabs/backend/dynapcnn/mapping.py +111 -75
  19. sinabs/backend/dynapcnn/nir_graph_extractor.py +877 -0
  20. sinabs/backend/dynapcnn/sinabs_edges_handler.py +1024 -0
  21. sinabs/backend/dynapcnn/utils.py +214 -459
  22. sinabs/backend/dynapcnn/weight_rescaling_methods.py +53 -0
  23. sinabs/conversion.py +2 -2
  24. sinabs/from_torch.py +23 -1
  25. sinabs/hooks.py +38 -41
  26. sinabs/layers/alif.py +16 -16
  27. sinabs/layers/crop2d.py +2 -2
  28. sinabs/layers/exp_leak.py +1 -1
  29. sinabs/layers/iaf.py +11 -11
  30. sinabs/layers/lif.py +9 -9
  31. sinabs/layers/neuromorphic_relu.py +9 -8
  32. sinabs/layers/pool2d.py +5 -5
  33. sinabs/layers/quantize.py +1 -1
  34. sinabs/layers/stateful_layer.py +10 -7
  35. sinabs/layers/to_spike.py +9 -9
  36. sinabs/network.py +14 -12
  37. sinabs/synopcounter.py +10 -7
  38. sinabs/utils.py +155 -7
  39. sinabs/validate_memory_speck.py +0 -5
  40. {sinabs-3.0.4.dev2.dist-info → sinabs-3.1.0.dist-info}/METADATA +16 -6
  41. sinabs-3.1.0.dist-info/RECORD +65 -0
  42. {sinabs-3.0.4.dev2.dist-info → sinabs-3.1.0.dist-info}/WHEEL +1 -1
  43. {sinabs-3.0.4.dev2.dist-info → sinabs-3.1.0.dist-info/licenses}/AUTHORS +1 -0
  44. sinabs-3.1.0.dist-info/pbr.json +1 -0
  45. sinabs-3.0.4.dev2.dist-info/RECORD +0 -59
  46. sinabs-3.0.4.dev2.dist-info/pbr.json +0 -1
  47. {sinabs-3.0.4.dev2.dist-info → sinabs-3.1.0.dist-info/licenses}/LICENSE +0 -0
  48. {sinabs-3.0.4.dev2.dist-info → sinabs-3.1.0.dist-info}/top_level.txt +0 -0
@@ -6,7 +6,7 @@ from typing import Optional
6
6
  class MembraneReset:
7
7
  """Reset the membrane potential v_mem to a given value after it spiked.
8
8
 
9
- Parameters:
9
+ Args:
10
10
  reset_value: fixed value that a neuron should be reset to. Defaults to zero.
11
11
 
12
12
  Example:
@@ -27,9 +27,9 @@ class MembraneReset:
27
27
  class MembraneSubtract:
28
28
  """Subtract the spiking threshold from the membrane potential for every neuron that spiked.
29
29
 
30
- Parameters:
30
+ Args:
31
31
  subtract_value: optional value that will be subtraced from
32
- v_mem if it spiked. Defaults to spiking threshold if None.
32
+ v_mem if it spiked. Defaults to spiking threshold if None.
33
33
 
34
34
  Example:
35
35
  >>> layer = sinabs.layers.LIF(reset_fn=MembraneSubtract(subtract_value=None), ...)
@@ -8,9 +8,9 @@ import torch
8
8
  class Heaviside:
9
9
  """Heaviside surrogate gradient with optional shift.
10
10
 
11
- Parameters:
11
+ Args:
12
12
  window: Distance between step of Heaviside surrogate gradient and
13
- threshold, relative to threshold.
13
+ threshold, relative to threshold.
14
14
  """
15
15
 
16
16
  window: float = 1.0
@@ -29,7 +29,7 @@ def gaussian(x: torch.Tensor, mu: float, sigma: float):
29
29
  class Gaussian:
30
30
  """Gaussian surrogate gradient function.
31
31
 
32
- Parameters
32
+ Args:
33
33
  mu: The mean of the Gaussian.
34
34
  sigma: The standard deviation of the Gaussian.
35
35
  grad_scale: Scale the gradients arbitrarily.
@@ -52,7 +52,7 @@ class MultiGaussian:
52
52
 
53
53
  https://www.biorxiv.org/content/10.1101/2021.03.22.436372v2
54
54
 
55
- Parameters
55
+ Args:
56
56
  mu: The mean of the Gaussian.
57
57
  sigma: The standard deviation of the Gaussian.
58
58
  h: Controls the magnitude of the negative parts of the kernel.
@@ -1,5 +1,6 @@
1
- from .dynapcnn_network import ( # second one for compatibility purposes
2
- DynapcnnCompatibleNetwork,
3
- DynapcnnNetwork,
4
- )
1
+ from .dvs_layer import DVSLayer
2
+ from .dynapcnn_layer import DynapcnnLayer
3
+ from .dynapcnn_network import DynapcnnCompatibleNetwork, DynapcnnNetwork
5
4
  from .dynapcnn_visualizer import DynapcnnVisualizer
5
+ from .dynapcnnnetwork_module import DynapcnnNetworkModule
6
+ from .nir_graph_extractor import GraphExtractor
@@ -25,9 +25,8 @@ class ChipFactory:
25
25
  def __init__(self, device_str: str):
26
26
  """Factory class to access config builder and other device specific methods.
27
27
 
28
- Parameters
29
- ----------
30
- device_str
28
+ Args:
29
+ device_str: name of the device
31
30
  """
32
31
  self.device_name, self.device_id = parse_device_id(device_str)
33
32
  if self.device_name not in self.supported_devices:
@@ -46,30 +45,19 @@ class ChipFactory:
46
45
  ) -> List:
47
46
  """Convert spike raster to events for DynapcnnNetworks.
48
47
 
49
- Parameters
50
- ----------
51
-
52
- raster: torch.Tensor
53
- A 4 dimensional tensor of spike events with the dimensions [Time, Channel, Height, Width]
54
-
55
- layer: int
56
- The index of the layer to route the events to
57
-
58
- dt: float
59
- Length of time step of the raster in seconds
60
-
61
- truncate: bool
62
- (default = False) Limit time-bins with more than one spikes to one spike.
63
-
64
- delay_factor: float
65
- (default = 0) Start simulation from this time. (in seconds)
66
-
67
-
68
- Returns
69
- -------
70
-
71
- events: List[Spike]
72
- A list of events that will be streamed to the device
48
+ Args:
49
+ raster (torch.Tensor): A 4 dimensional tensor of spike events with the
50
+ dimensions [Time, Channel, Height, Width]
51
+ layer (int): The index of the layer to route the events to
52
+ dt (float): Length of time step of the raster in seconds
53
+ truncate (bool): Limit time-bins with more than one spikes to one spike.
54
+ Defaults to False.
55
+ delay_factor (float): Start simulation from this time (in seconds).
56
+ Defaults to zero.
57
+
58
+ Returns:
59
+ A list of events that will be streamed to the device.
60
+ Returned type is List[Spike].
73
61
  """
74
62
  assert delay_factor >= 0.0, print("Delay factor cannot be a negative value!")
75
63
  samna_module = self.get_config_builder().get_samna_module()
@@ -107,26 +95,16 @@ class ChipFactory:
107
95
  """Convert series of spikes in a structured array (eg. from aermanager) to events for
108
96
  DynaapcnnDevKit.
109
97
 
110
- Parameters
111
- ----------
112
-
113
- xytp: torch.Tensor
114
- A numpy structured array with columns x, y, t(timestamp), p(polarity)
115
-
116
- layer: int
117
- The index of the layer to route the events to
118
-
119
- reset_timestamps: Boolean
120
- If set to True, timestamps will be aligned to start from 0
98
+ Args:
99
+ xytp (torch.Tensor): A numpy structured array with columns x, y, t(timestamp), p(polarity).
100
+ layer (int): The index of the layer to route the events to.
101
+ reset_timestamps (bool): If set to True, timestamps will be aligned to start from 0.
102
+ delay_factor (float): Start simulation from this time, in seconds.
103
+ Defaults to zero.
121
104
 
122
- delay_factor: float
123
- (default = 0) Start simulation from this time. (in seconds)
124
-
125
- Returns
126
- -------
127
-
128
- events: List[Spike]
129
- A list of events that will be streamed to the device
105
+ Returns:
106
+ A list of events that will be streamed to the device.
107
+ Returned type is List[Spike].
130
108
  """
131
109
 
132
110
  # Check delay factor as it being negative will crash the method.
@@ -162,21 +140,15 @@ class ChipFactory:
162
140
  Convert events from DynapcnnNetworks to spike raster
163
141
  Note: Timestamp of first event will be considered as start time.
164
142
 
165
- Parameters
166
- ----------
167
-
168
- events: List[Spike]
169
- A list of events that will be streamed to the device
170
- dt: float
171
- Length of each time step for rasterization (in seconds)
172
- shape: Optional[Tuple]
173
- Shape of the raster to be produced, excluding the time dimension. (Channel, Height, Width)
174
- If this is not specified, the shape is inferred based on the max values found in the events.
175
-
176
- Returns
177
- -------
178
- raster: torch.Tensor
179
- A 4 dimensional tensor of spike events with the dimensions [Time, Channel, Height, Width]
143
+ Args:
144
+ events (List[Spike]): A list of events that will be streamed to the device.
145
+ dt (float): Length of each time step for rasterization, in seconds.
146
+ shape (Optional[Tuple]): Shape of the raster to be produced, excluding the
147
+ time dimension. (Channel, Height, Width). If this is not specified,
148
+ the shape is inferred based on the max values found in the events.
149
+
150
+ Returns:
151
+ A 4 dimensional tensor of spike events with the dimensions [Time, Channel, Height, Width].
180
152
  """
181
153
  # Timestamps are in microseconds
182
154
  timestamps = [event.timestamp for event in events]
@@ -1,13 +1,13 @@
1
+ import copy
2
+ from typing import Dict, List
1
3
  from abc import abstractmethod
2
- from typing import List
3
4
  from warnings import warn
4
5
 
5
- import samna
6
6
  import torch
7
7
  import sinabs
8
8
 
9
9
  from sinabs.backend.dynapcnn.config_builder import ConfigBuilder
10
- from sinabs.backend.dynapcnn.dvs_layer import DVSLayer, expand_to_pair
10
+ from sinabs.backend.dynapcnn.dvs_layer import DVSLayer
11
11
  from sinabs.backend.dynapcnn.dynapcnn_layer import DynapcnnLayer
12
12
  from sinabs.backend.dynapcnn.mapping import LayerConstraints
13
13
 
@@ -19,9 +19,8 @@ class DynapcnnConfigBuilder(ConfigBuilder):
19
19
  """
20
20
  Get the samna parent module that hosts all the appropriate sub-modules and classes.
21
21
 
22
- Returns
23
- -------
24
- samna module
22
+ Returns:
23
+ samna module
25
24
  """
26
25
 
27
26
  @classmethod
@@ -32,17 +31,71 @@ class DynapcnnConfigBuilder(ConfigBuilder):
32
31
  """
33
32
 
34
33
  @classmethod
35
- def write_dvs_layer_config(cls, layer: DVSLayer, config: "DvsLayerConfig"):
34
+ @abstractmethod
35
+ def get_dvs_layer_config(cls):
36
+ """
37
+ Returns the DVS Layer configuration for the device type
38
+ """
39
+
40
+ @classmethod
41
+ def write_dvs_layer_config(
42
+ cls,
43
+ layer: DVSLayer,
44
+ layer2core_map: Dict[int, int],
45
+ destination_indices: List[int],
46
+ chip_layer: "DVSLayerConfig",
47
+ ) -> None:
48
+ """Write a DVS layer configuration to the conf object.
49
+
50
+ Uses the data in `layer` to configure a `DVSLayerConfig` to use the
51
+ chip's DVS camera.
52
+
53
+ Args:
54
+ layer (DVSLayer): Layer instance from which to generate the config.
55
+ layer2core_map (Dict): Keys are layer indices, values are
56
+ corresponding cores on hardware. Needed to map the destinations.
57
+ destination_indices (List): Indices of destination layers for `layer`.
58
+ chip_layer (DVSLayerConfig): Configuration object of the corresponding
59
+ on-chip core. Will be changed in-place based on `layer`.
60
+ """
36
61
  for param, value in layer.get_config_dict().items():
37
- setattr(config, param, value)
62
+ setattr(chip_layer, param, value)
63
+
64
+ # Set destinations.
65
+ for dest_idx, dest in enumerate(destination_indices):
66
+ chip_layer.destinations[dest_idx].layer = layer2core_map[dest]
67
+ chip_layer.destinations[dest_idx].enable = True
68
+
69
+ chip_layer.pass_sensor_events = not layer.disable_pixel_array
70
+
71
+ if layer.merge_polarities:
72
+ chip_layer.merge = True
38
73
 
39
74
  @classmethod
40
- def get_dynapcnn_layer_config_dict(cls, layer: DynapcnnLayer):
75
+ def get_dynapcnn_layer_config_dict(
76
+ cls,
77
+ layer: DynapcnnLayer,
78
+ layer2core_map: Dict[int, int],
79
+ destination_indices: List[int],
80
+ ) -> dict:
81
+ """Generate config dict from DynapcnnLayer instance
82
+
83
+ Args:
84
+ layer (DynapcnnLayer): Layer instance from which to generate the
85
+ config.
86
+ layer2core_map (Dict): Keys are layer indices, values are
87
+ corresponding cores on hardware. Needed to map the destinations.
88
+ destination_indices (List): Indices of destination layers for
89
+ `layer`.
90
+
91
+ Returns:
92
+ Dictionary that holds the information to configure the on-chip core.
93
+ """
41
94
  config_dict = {}
42
95
  config_dict["destinations"] = [{}, {}]
43
96
 
44
97
  # Update the dimensions
45
- channel_count, input_size_y, input_size_x = layer.input_shape
98
+ channel_count, input_size_y, input_size_x = layer.in_shape
46
99
  dimensions = {"input_shape": {}, "output_shape": {}}
47
100
  dimensions["input_shape"]["size"] = {"x": input_size_x, "y": input_size_y}
48
101
  dimensions["input_shape"]["feature_count"] = channel_count
@@ -101,10 +154,6 @@ class DynapcnnConfigBuilder(ConfigBuilder):
101
154
  "Unknown reset mechanism. Only MembraneReset and MembraneSubtract are currently understood."
102
155
  )
103
156
 
104
- # if (not return_to_zero) and self.spk_layer.membrane_subtract != self.spk_layer.threshold:
105
- # warn(
106
- # "SpikingConv2dLayer: Subtraction of membrane potential is always by high threshold."
107
- # )
108
157
  if layer.spk_layer.min_v_mem is None:
109
158
  min_v_mem = -(2**15)
110
159
  else:
@@ -118,41 +167,73 @@ class DynapcnnConfigBuilder(ConfigBuilder):
118
167
  "neurons_initial_value": neurons_state.int().tolist(),
119
168
  }
120
169
  )
121
- # Update parameters from pooling
122
- if layer.pool_layer is not None:
123
- config_dict["destinations"][0]["pooling"] = expand_to_pair(
124
- layer.pool_layer.kernel_size
125
- )[0]
126
- config_dict["destinations"][0]["enable"] = True
127
- else:
128
- pass
170
+
171
+ # Configure destinations
172
+ destinations = []
173
+ pooling_sizes = layer.pool
174
+ for dest_layer_id, pool in zip(destination_indices, pooling_sizes):
175
+ # Ignore exit point destinations
176
+ if dest_layer_id >= 0:
177
+ try:
178
+ # Use scalar value for pooling
179
+ pool = sinabs.utils.collapse_pair(pool)
180
+ except ValueError:
181
+ raise ValueError(
182
+ f"Can only do pooling with quadratic kernels. Received {pool}"
183
+ )
184
+
185
+ dest_data = {
186
+ "layer": layer2core_map[dest_layer_id],
187
+ "enable": True,
188
+ "pooling": pool,
189
+ }
190
+ destinations.append(dest_data)
191
+
192
+ config_dict["destinations"] = destinations
129
193
 
130
194
  return config_dict
131
195
 
132
196
  @classmethod
133
197
  def write_dynapcnn_layer_config(
134
- cls, layer: DynapcnnLayer, chip_layer: "CNNLayerConfig"
135
- ):
198
+ cls,
199
+ layer: DynapcnnLayer,
200
+ layer2core_map: Dict[int, int],
201
+ destination_indices: List[int],
202
+ chip_layer: "CNNLayerConfig",
203
+ ) -> None:
136
204
  """Write a single layer configuration to the dynapcnn conf object.
137
205
 
138
- Parameters
139
- ----------
140
- layer:
141
- The dynapcnn layer to write the configuration for
142
- chip_layer: CNNLayerConfig
143
- DYNAPCNN configuration object representing the layer to which
144
- configuration is written.
206
+ Uses the data in `layer` to configure a `CNNLayerConfig` to be
207
+ deployed on chip.
208
+
209
+ Args:
210
+ layer (DynapcnnLayer): Layer instance from which to generate the
211
+ config.
212
+ layer2core_map (Dict): Keys are layer indices, values are
213
+ corresponding cores on hardware. Needed to map the destinations.
214
+ destination_indices (List): Indices of destination layers for `layer`.
215
+ chip_layer (CNNLayerConfig): Configuration object of the
216
+ corresponding on-chip core. Will be changed in-place based on
217
+ `layer`.
145
218
  """
146
- config_dict = cls.get_dynapcnn_layer_config_dict(layer=layer)
147
- # Update configuration of the DYNAPCNN layer
148
- chip_layer.dimensions = config_dict["dimensions"]
149
- config_dict.pop("dimensions")
150
- for i in range(len(config_dict["destinations"])):
151
- if "pooling" in config_dict["destinations"][i]:
152
- chip_layer.destinations[i].pooling = config_dict["destinations"][i][
153
- "pooling"
154
- ]
155
- config_dict.pop("destinations")
219
+
220
+ # extracting from a DynapcnnLayer the config. variables for its CNNLayerConfig.
221
+ config_dict = cls.get_dynapcnn_layer_config_dict(
222
+ layer=layer,
223
+ layer2core_map=layer2core_map,
224
+ destination_indices=destination_indices,
225
+ )
226
+
227
+ # update configuration of the DYNAPCNN layer.
228
+ chip_layer.dimensions = config_dict.pop("dimensions")
229
+
230
+ # set the destinations configuration.
231
+ for dest_idx, destination in enumerate(config_dict.pop("destinations")):
232
+ chip_layer.destinations[dest_idx].layer = destination["layer"]
233
+ chip_layer.destinations[dest_idx].enable = destination["enable"]
234
+ chip_layer.destinations[dest_idx].pooling = destination["pooling"]
235
+
236
+ # set remaining configuration.
156
237
  for param, value in config_dict.items():
157
238
  try:
158
239
  setattr(chip_layer, param, value)
@@ -160,36 +241,57 @@ class DynapcnnConfigBuilder(ConfigBuilder):
160
241
  raise TypeError(f"Unexpected parameter {param} or value. {e}")
161
242
 
162
243
  @classmethod
163
- def build_config(cls, model: "DynapcnnNetwork", chip_layers: List[int]):
164
- layers = model.sequence
244
+ def build_config(
245
+ cls,
246
+ layers: Dict[int, DynapcnnLayer],
247
+ layer2core_map: Dict[int, int],
248
+ destination_map: Dict[int, List[int]],
249
+ ) -> "DynapcnnConfiguration":
250
+ """Uses `DynapcnnLayer` objects to configure their equivalent chip cores
251
+
252
+ Args:
253
+ layers (Dict): Keys are layer indices, values are DynapcnnLayer
254
+ instances.
255
+ layer2core_map (Dict): Keys are layer indices, values are
256
+ corresponding cores on hardware. Needed to map the destinations.
257
+ destination_map (Dict): Indices of destination layers for `layer`.
258
+
259
+ Returns:
260
+ Config object holding the information to configure the chip based
261
+ on the provided `layers`.
262
+ """
165
263
  config = cls.get_default_config()
166
-
167
- has_dvs_layer = False
168
- i_cnn_layer = 0 # Instantiate an iterator for the cnn cores
169
- for i, chip_equivalent_layer in enumerate(layers):
170
- if isinstance(chip_equivalent_layer, DVSLayer):
264
+ config.dvs_layer.pass_sensor_events = False
265
+
266
+ # Loop over layers in network and write corresponding configurations
267
+ for layer_index, ith_dcnnl in layers.items():
268
+ if isinstance(ith_dcnnl, DynapcnnLayer):
269
+ # retrieve config dict for current layer
270
+ chip_layer = config.cnn_layers[layer2core_map[layer_index]]
271
+ # write core configuration.
272
+ cls.write_dynapcnn_layer_config(
273
+ layer=ith_dcnnl,
274
+ layer2core_map=layer2core_map,
275
+ chip_layer=chip_layer,
276
+ destination_indices=destination_map[layer_index],
277
+ )
278
+ elif isinstance(ith_dcnnl, DVSLayer):
279
+ # Uses the DVS camera.
171
280
  chip_layer = config.dvs_layer
172
- cls.write_dvs_layer_config(chip_equivalent_layer, chip_layer)
173
- has_dvs_layer = True
174
- elif isinstance(chip_equivalent_layer, DynapcnnLayer):
175
- chip_layer = config.cnn_layers[chip_layers[i_cnn_layer]]
176
- cls.write_dynapcnn_layer_config(chip_equivalent_layer, chip_layer)
177
- i_cnn_layer += 1
178
- else:
179
- # in our generated network there is a spurious layer...
180
- # should never happen
181
- raise TypeError("Unexpected layer in the model")
182
-
183
- if i == len(layers) - 1:
184
- # last layer
185
- chip_layer.destinations[0].enable = False
281
+ sw_layer = ith_dcnnl
282
+ destination_indices = destination_map[layer_index]
283
+ # Write camera configuration.
284
+ cls.write_dvs_layer_config(
285
+ layer=sw_layer,
286
+ layer2core_map=layer2core_map,
287
+ destination_indices=destination_indices,
288
+ chip_layer=chip_layer,
289
+ )
186
290
  else:
187
- # Set destination layer
188
- chip_layer.destinations[0].layer = chip_layers[i_cnn_layer]
189
- chip_layer.destinations[0].enable = True
190
-
191
- if not has_dvs_layer:
192
- config.dvs_layer.pass_sensor_events = False
291
+ # shouldn't happen since type checks are made previously.
292
+ raise TypeError(
293
+ f"Layer (index {layer_index}) is unexpected in the model: \n{ith_dcnnl}"
294
+ )
193
295
 
194
296
  return config
195
297
 
@@ -233,29 +335,25 @@ class DynapcnnConfigBuilder(ConfigBuilder):
233
335
  def monitor_layers(cls, config, layers: List):
234
336
  """Updates the config object in place.
235
337
 
236
- Parameters
237
- ----------
238
- config:
239
- samna config object
240
- monitor_chip_layers:
241
- The layers to be monitored on the chip.
242
-
243
- Returns
244
- -------
245
- config:
246
- Returns the modified config. (The config object is modified in place)
338
+ Args:
339
+ config: samna config object.
340
+ monitor_chip_layers: The layers to be monitored on the chip.
341
+
342
+ Returns:
343
+ Returns the modified config. The config object is modified in place.
247
344
  """
248
345
  monitor_layers = layers.copy()
249
346
  if "dvs" in monitor_layers:
250
347
  config.dvs_layer.monitor_enable = True
251
348
  if config.dvs_layer.pooling.x != 1 or config.dvs_layer.pooling.y != 1:
252
349
  warn(
253
- f"DVS layer has pooling and is being monitored. "
350
+ "DVS layer has pooling and is being monitored. "
254
351
  "Note that pooling will not be reflected in the monitored events."
255
352
  )
256
353
  monitor_layers.remove("dvs")
257
354
  for lyr_indx in monitor_layers:
258
355
  config.cnn_layers[lyr_indx].monitor_enable = True
356
+
259
357
  if any(
260
358
  dest.pooling != 1 for dest in config.cnn_layers[lyr_indx].destinations
261
359
  ):
@@ -289,12 +387,10 @@ class DynapcnnConfigBuilder(ConfigBuilder):
289
387
  @classmethod
290
388
  def reset_states(cls, config, randomize=False):
291
389
  """
292
- Parameters
293
- ----------
294
- config:
295
- samna config object
296
- randomize (bool):
297
- If true, the states will be set to random initial values. Else, they will be set to zero
390
+ Args:
391
+ config: samna config object.
392
+ randomize (bool): If true, the states will be set to random initial values.
393
+ Else, they will be set to zero
298
394
  """
299
395
  for idx, lyr in enumerate(config.cnn_layers):
300
396
  shape = torch.tensor(lyr.neurons_initial_value).shape
@@ -1,3 +1,5 @@
1
+ from typing import Dict
2
+
1
3
  import samna
2
4
  from samna.speck2e.configuration import SpeckConfiguration
3
5
 
@@ -16,6 +18,10 @@ class Speck2EConfigBuilder(DynapcnnConfigBuilder):
16
18
  def get_default_config(cls) -> "SpeckConfiguration":
17
19
  return SpeckConfiguration()
18
20
 
21
+ @classmethod
22
+ def get_dvs_layer_config(cls):
23
+ return SpeckConfiguration().DVSLayerConfig
24
+
19
25
  @classmethod
20
26
  def get_input_buffer(cls):
21
27
  return samna.BasicSourceNode_speck2e_event_speck2e_input_event()
@@ -23,8 +29,3 @@ class Speck2EConfigBuilder(DynapcnnConfigBuilder):
23
29
  @classmethod
24
30
  def get_output_buffer(cls):
25
31
  return samna.BasicSinkNode_speck2e_event_output_event()
26
-
27
- @classmethod
28
- def get_dynapcnn_layer_config_dict(cls, layer: DynapcnnLayer):
29
- config_dict = super().get_dynapcnn_layer_config_dict(layer=layer)
30
- return config_dict
@@ -1,3 +1,5 @@
1
+ from typing import Dict, List
2
+
1
3
  import samna
2
4
  from samna.speck2f.configuration import SpeckConfiguration
3
5
 
@@ -16,6 +18,10 @@ class Speck2FConfigBuilder(DynapcnnConfigBuilder):
16
18
  def get_default_config(cls) -> "SpeckConfiguration":
17
19
  return SpeckConfiguration()
18
20
 
21
+ @classmethod
22
+ def get_dvs_layer_config(cls):
23
+ return SpeckConfiguration().DVSLayerConfig
24
+
19
25
  @classmethod
20
26
  def get_input_buffer(cls):
21
27
  return samna.BasicSourceNode_speck2f_event_input_event()
@@ -23,8 +29,3 @@ class Speck2FConfigBuilder(DynapcnnConfigBuilder):
23
29
  @classmethod
24
30
  def get_output_buffer(cls):
25
31
  return samna.BasicSinkNode_speck2f_event_output_event()
26
-
27
- @classmethod
28
- def get_dynapcnn_layer_config_dict(cls, layer: DynapcnnLayer):
29
- config_dict = super().get_dynapcnn_layer_config_dict(layer=layer)
30
- return config_dict