sinabs 3.0.4.dev25__py3-none-any.whl → 3.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. sinabs/activation/reset_mechanism.py +3 -3
  2. sinabs/activation/surrogate_gradient_fn.py +4 -4
  3. sinabs/backend/dynapcnn/__init__.py +5 -4
  4. sinabs/backend/dynapcnn/chip_factory.py +33 -61
  5. sinabs/backend/dynapcnn/chips/dynapcnn.py +182 -86
  6. sinabs/backend/dynapcnn/chips/speck2e.py +6 -5
  7. sinabs/backend/dynapcnn/chips/speck2f.py +6 -5
  8. sinabs/backend/dynapcnn/config_builder.py +39 -59
  9. sinabs/backend/dynapcnn/connectivity_specs.py +48 -0
  10. sinabs/backend/dynapcnn/discretize.py +91 -155
  11. sinabs/backend/dynapcnn/dvs_layer.py +59 -101
  12. sinabs/backend/dynapcnn/dynapcnn_layer.py +185 -119
  13. sinabs/backend/dynapcnn/dynapcnn_layer_utils.py +335 -0
  14. sinabs/backend/dynapcnn/dynapcnn_network.py +602 -325
  15. sinabs/backend/dynapcnn/dynapcnnnetwork_module.py +370 -0
  16. sinabs/backend/dynapcnn/exceptions.py +122 -3
  17. sinabs/backend/dynapcnn/io.py +55 -92
  18. sinabs/backend/dynapcnn/mapping.py +111 -75
  19. sinabs/backend/dynapcnn/nir_graph_extractor.py +877 -0
  20. sinabs/backend/dynapcnn/sinabs_edges_handler.py +1024 -0
  21. sinabs/backend/dynapcnn/utils.py +214 -459
  22. sinabs/backend/dynapcnn/weight_rescaling_methods.py +53 -0
  23. sinabs/conversion.py +2 -2
  24. sinabs/from_torch.py +23 -1
  25. sinabs/hooks.py +38 -41
  26. sinabs/layers/alif.py +16 -16
  27. sinabs/layers/crop2d.py +2 -2
  28. sinabs/layers/exp_leak.py +1 -1
  29. sinabs/layers/iaf.py +11 -11
  30. sinabs/layers/lif.py +9 -9
  31. sinabs/layers/neuromorphic_relu.py +9 -8
  32. sinabs/layers/pool2d.py +5 -5
  33. sinabs/layers/quantize.py +1 -1
  34. sinabs/layers/stateful_layer.py +10 -7
  35. sinabs/layers/to_spike.py +9 -9
  36. sinabs/network.py +14 -12
  37. sinabs/nir.py +4 -3
  38. sinabs/synopcounter.py +10 -7
  39. sinabs/utils.py +155 -7
  40. sinabs/validate_memory_speck.py +0 -5
  41. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/METADATA +3 -2
  42. sinabs-3.1.1.dist-info/RECORD +65 -0
  43. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/licenses/AUTHORS +1 -0
  44. sinabs-3.1.1.dist-info/pbr.json +1 -0
  45. sinabs-3.0.4.dev25.dist-info/RECORD +0 -59
  46. sinabs-3.0.4.dev25.dist-info/pbr.json +0 -1
  47. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/WHEEL +0 -0
  48. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/licenses/LICENSE +0 -0
  49. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/top_level.txt +0 -0
@@ -1,174 +1,378 @@
1
1
  import time
2
- from subprocess import CalledProcessError
3
- from typing import List, Optional, Sequence, Tuple, Union
2
+ from pprint import pformat
3
+ from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
4
+ from warnings import warn
4
5
 
5
6
  import samna
6
7
  import torch
7
8
  import torch.nn as nn
9
+ from torch import Tensor
8
10
 
9
11
  import sinabs
12
+ import sinabs.layers as sl
10
13
 
11
14
  from .chip_factory import ChipFactory
12
15
  from .dvs_layer import DVSLayer
13
16
  from .dynapcnn_layer import DynapcnnLayer
14
17
  from .io import disable_timestamps, enable_timestamps, open_device, reset_timestamps
18
+ from .nir_graph_extractor import GraphExtractor
15
19
  from .utils import (
16
- DEFAULT_IGNORED_LAYER_TYPES,
17
- build_from_list,
18
- convert_model_to_layer_list,
20
+ COMPLETELY_IGNORED_LAYER_TYPES,
21
+ IGNORED_LAYER_TYPES,
19
22
  infer_input_shape,
20
23
  parse_device_id,
21
24
  )
25
+ from .weight_rescaling_methods import rescale_method_1
22
26
 
23
27
 
24
28
  class DynapcnnNetwork(nn.Module):
25
- """Given a sinabs spiking network, prepare a dynapcnn-compatible network. This can be used to
26
- test the network will be equivalent once on DYNAPCNN. This class also provides utilities to
27
- make the dynapcnn configuration and upload it to DYNAPCNN.
28
-
29
- The following operations are done when converting to dynapcnn-compatible:
30
-
31
- * multiple avg pooling layers in a row are consolidated into one and \
32
- turned into sum pooling layers;
33
- * checks are performed on layer hyperparameter compatibility with dynapcnn \
34
- (kernel sizes, strides, padding)
35
- * checks are performed on network structure compatibility with dynapcnn \
36
- (certain layers can only be followed by other layers)
37
- * linear layers are turned into convolutional layers
38
- * dropout layers are ignored
39
- * weights, biases and thresholds are discretized according to dynapcnn requirements
40
-
41
- Note that the model parameters are only ever transferred to the device
42
- on the `to` call, so changing a threshold or weight of a model that
43
- is deployed will have no effect on the model on chip until `to` is called again.
44
- """
45
-
46
29
  def __init__(
47
30
  self,
48
- snn: Union[nn.Sequential, sinabs.Network],
31
+ snn: nn.Module,
49
32
  input_shape: Optional[Tuple[int, int, int]] = None,
50
- dvs_input: bool = False,
33
+ batch_size: Optional[int] = None,
34
+ dvs_input: Optional[bool] = None,
51
35
  discretize: bool = True,
36
+ weight_rescaling_fn: Callable = rescale_method_1,
52
37
  ):
53
- """
54
- DynapcnnNetwork: a class turning sinabs networks into dynapcnn
55
- compatible networks, and making dynapcnn configurations.
56
-
57
- Parameters
58
- ----------
59
- snn: sinabs.Network
60
- SNN that determines the structure of the `DynapcnnNetwork`
61
- input_shape: None or tuple of ints
62
- Shape of the input, convention: (features, height, width)
63
- If None, `snn` needs an InputLayer
64
- dvs_input: bool
65
- Does dynapcnn receive input from its DVS camera?
66
- discretize: bool
67
- If True, discretize the parameters and thresholds.
68
- This is needed for uploading weights to dynapcnn. Set to False only for
69
- testing purposes.
38
+ """Given a sinabs spiking network, prepare a dynapcnn-compatible network. This can be used to
39
+ test the network will be equivalent once on DYNAPCNN. This class also provides utilities to
40
+ make the dynapcnn configuration and upload it to DYNAPCNN.
41
+
42
+ Attributes:
43
+ snn (nn.Module): a implementing a spiking network.
44
+ input_shape (tuple or None): a description of the input dimensions
45
+ as `(features, height, width)`. If `None`, `snn` must contain a
46
+ `DVSLayer` instance, from which the input shape will be inferred.
47
+ batch_size (optional int): If `None`, will try to infer the batch
48
+ size from the model. If int value is provided, it has to match
49
+ the actual batch size of the model.
50
+ dvs_input (bool): optional (default as `None`). Wether or not dynapcnn
51
+ receive input from its DVS camera.
52
+ If a `DVSLayer` is part of `snn`...
53
+ ... and `dvs_input` is `False`, its `disable_pixel_array` attribute
54
+ will be set `True`. This means the DVS sensor will be configured
55
+ upon deployment but its output will not be sent as input
56
+ ... and `dvs_input` is `None`, the `disable_pixel_array` attribute
57
+ of the layer will not be changed.
58
+ ... and `dvs_input` is `True`, `disable_pixel_array` will be set
59
+ `False`, so that the DVS sensor data is sent to the network.
60
+ If no `DVSLayer` is part of `snn`...
61
+ ... and `dvs_input` is `False` or `None`, no `DVSLayer` will be added
62
+ and the DVS sensor will not be configured upon deployment.
63
+ ... and `dvs_input` is `True`, a `DVSLayer` instance will be added
64
+ to the network, with `disable_pixel_array` set to `False`.
65
+ discretize (bool): If `True`, discretize the parameters and thresholds.
66
+ This is needed for uploading weights to dynapcnn. Set to `False`
67
+ only for testing purposes.
68
+ weight_rescaling_fn (callable): a method that handles how the re-scaling
69
+ factor for one or more `SumPool2d` projecting to the same convolutional
70
+ layer are combined/re-scaled before applying them.
70
71
  """
71
72
  super().__init__()
72
73
 
73
- # This attribute stores the location/core-id of each of the DynapcnnLayers upon placement on chip
74
- self.chip_layers_ordering = []
74
+ if isinstance(snn, sinabs.Network):
75
+ # Ignore `analog_model` of sinabs `Network` instances
76
+ snn = snn.spiking_model
77
+
78
+ self.dvs_input = dvs_input
79
+ self.input_shape = infer_input_shape(snn, input_shape)
80
+ self._layer2core_map = None
81
+
82
+ # Infer batch size for dummy input to graph extractor
83
+ if batch_size is None:
84
+ batch_size = sinabs.utils.get_smallest_compatible_time_dimension(snn)
85
+ # computational graph from original PyTorch module.
86
+ self._graph_extractor = GraphExtractor(
87
+ snn,
88
+ torch.randn((batch_size, *self.input_shape)),
89
+ self.dvs_input,
90
+ ignore_node_types=COMPLETELY_IGNORED_LAYER_TYPES,
91
+ )
92
+
93
+ # Remove nodes of ignored classes (including merge nodes)
94
+ # Other than `COMPLETELY_IGNORED_LAYER_TYPES`, `IGNORED_LAYER_TYPES` are
95
+ # part of the graph initially and are needed to ensure proper handling of
96
+ # graph structure (e.g. Merge nodes) or meta-information (e.g.
97
+ # `nn.Flatten` for io-shapes)
98
+ self._graph_extractor.remove_nodes_by_class(IGNORED_LAYER_TYPES)
75
99
 
76
- self.input_shape = input_shape # Convert models to sequential
77
- layers = convert_model_to_layer_list(
78
- model=snn, ignore=DEFAULT_IGNORED_LAYER_TYPES
100
+ # Module to execute forward pass through network
101
+ self._dynapcnn_module = self._graph_extractor.get_dynapcnn_network_module(
102
+ discretize=discretize, weight_rescaling_fn=weight_rescaling_fn
79
103
  )
80
- # Check if dvs input is expected
81
- if dvs_input:
82
- self.dvs_input = True
83
- else:
84
- self.dvs_input = False
104
+ self._dynapcnn_module.setup_dynapcnnlayer_graph(index_layers_topologically=True)
105
+
106
+ @property
107
+ def all_layers(self):
108
+ return self._dynapcnn_module.all_layers
109
+
110
+ @property
111
+ def dvs_node_info(self):
112
+ return self._dynapcnn_module.dvs_node_info
85
113
 
86
- input_shape = infer_input_shape(layers, input_shape=input_shape)
87
- assert len(input_shape) == 3, "infer_input_shape did not return 3-tuple"
114
+ @property
115
+ def dvs_layer(self):
116
+ return self._dynapcnn_module.dvs_layer
88
117
 
89
- # Build model from layers
90
- self.sequence = build_from_list(
91
- layers,
92
- in_shape=input_shape,
93
- discretize=discretize,
94
- dvs_input=self.dvs_input,
118
+ @property
119
+ def chip_layers_ordering(self):
120
+ warn(
121
+ "`chip_layers_ordering` is deprecated. Returning `layer2core_map` instead.",
122
+ DeprecationWarning,
95
123
  )
124
+ return self._layer2core_map
96
125
 
97
- def to(
98
- self,
99
- device="cpu",
100
- chip_layers_ordering="auto",
101
- monitor_layers: Optional[Union[List, str]] = None,
102
- config_modifier=None,
103
- slow_clk_frequency: int = None,
104
- ):
105
- """Note that the model parameters are only ever transferred to the device on the `to` call,
106
- so changing a threshold or weight of a model that is deployed will have no effect on the
107
- model on chip until `to` is called again.
126
+ @property
127
+ def dynapcnn_layers(self):
128
+ return self._dynapcnn_module.dynapcnn_layers
129
+
130
+ @property
131
+ def dynapcnn_module(self):
132
+ return self._dynapcnn_module
133
+
134
+ @property
135
+ def exit_layers(self):
136
+ return [self.all_layers[i] for i in self._dynapcnn_module.get_exit_layers()]
137
+
138
+ @property
139
+ def exit_layer_ids(self):
140
+ return self._dynapcnn_module.get_exit_layers()
141
+
142
+ @property
143
+ def is_deployed_on_dynapcnn_device(self):
144
+ return (
145
+ hasattr(self, "device")
146
+ and parse_device_id(self.device)[0] in ChipFactory.supported_devices
147
+ )
148
+
149
+ @property
150
+ def layer_destination_map(self):
151
+ return self._dynapcnn_module.destination_map
152
+
153
+ @property
154
+ def layer2core_map(self):
155
+ return self._layer2core_map
156
+
157
+ @property
158
+ def name_2_indx_map(self):
159
+ return self._graph_extractor.name_2_indx_map
160
+
161
+ def hw_forward(self, x):
162
+ """Forwards data through the chip."""
108
163
 
109
- Parameters
110
- ----------
164
+ # flush buffer.
165
+ _ = self.samna_output_buffer.get_events()
111
166
 
112
- device: String
113
- cpu:0, cuda:0, speck2edevkit
167
+ # Reset and enable timestamp
168
+ reset_timestamps(self.device)
169
+ enable_timestamps(self.device)
114
170
 
115
- chip_layers_ordering: sequence of integers or `auto`
116
- The order in which the dynapcnn layers will be used. If `auto`,
117
- an automated procedure will be used to find a valid ordering.
118
- A list of layers on the device where you want each of the model's DynapcnnLayers to be placed.
119
- The index of the core on chip to which the i-th layer in the model is mapped is the value of the i-th entry in the list.
120
- Note: This list should be the same length as the number of dynapcnn layers in your model.
171
+ # send input.
172
+ self.samna_input_buffer.write(x)
173
+ received_evts = []
121
174
 
122
- monitor_layers: None/List
123
- A list of all layers in the module that you want to monitor. Indexing starts with the first non-dvs layer.
124
- If you want to monitor the dvs layer for eg.
125
- ::
175
+ # record at least until the last event has been replayed.
176
+ min_duration = max(event.timestamp for event in x) * 1e-6
177
+ time.sleep(min_duration)
126
178
 
127
- monitor_layers = ["dvs"] # If you want to monitor the output of the pre-processing layer
128
- monitor_layers = ["dvs", 8] # If you want to monitor preprocessing and layer 8
129
- monitor_layers = "all" # If you want to monitor all the layers
179
+ # keep recording if more events are being registered.
180
+ while True:
181
+ prev_length = len(received_evts)
182
+ time.sleep(0.1)
183
+ received_evts.extend(self.samna_output_buffer.get_events())
184
+ if prev_length == len(received_evts):
185
+ break
130
186
 
131
- config_modifier:
132
- A user configuration modifier method.
133
- This function can be used to make any custom changes you want to make to the configuration object.
187
+ # disable timestamp
188
+ disable_timestamps(self.device)
189
+
190
+ return received_evts
191
+
192
+ def forward(
193
+ self, x, return_complete: bool = False
194
+ ) -> Union[List["event"], Tensor, Dict[int, Dict[int, Tensor]]]:
195
+ """Forwards data through the `DynapcnnNetwork` instance.
196
+
197
+ If the network has been deployed on a Dynapcnn/Speck device the forward
198
+ pass happens on the devices. Otherwise the device will be simulated by
199
+ passing the data through the `DynapcnnLayer` instances.
200
+
201
+ Args:
202
+ x: Tensor that serves as input to network. Is passed to all layers
203
+ that are marked as entry points
204
+ return_complete: bool that indicates whether all layer outputs should
205
+ be return or only those with no further destinations (default)
206
+
207
+ Returns:
208
+ The returned object depends on whether the network has been deployed
209
+ on chip. If this is the case, a flat list of samna events is returned,
210
+ in the order in which the events have been collected.
211
+ If the data is passed through the `DynapcnnLayer` instances, the output
212
+ depends on `return_complete` and on the network configuration:
213
+ * If `return_complete` is `True`, all layer outputs will be returned in a
214
+ dict, with layer indices as keys, and nested dicts as values, which
215
+ hold destination indices as keys and output tensors as values.
216
+ * If `return_complete` is `False` and there is only a single destination
217
+ in the whole network that is marked as final (i.e. destination
218
+ index in dynapcnn layer handler is negative), it will return the
219
+ output as a single tensor.
220
+ * If `return_complete` is `False` and no destination in the network
221
+ is marked as final, a warning will be raised and the function
222
+ returns an empty dict.
223
+ * In all other cases a dict will be returned that is of the same
224
+ structure as if `return_complete` is `True`, but only with entries
225
+ where the destination is marked as final.
226
+ """
227
+ if self.is_deployed_on_dynapcnn_device:
228
+ return self.hw_forward(x)
229
+ else:
230
+ # Forward pass through software DynapcnnLayer instance
231
+ return self.dynapcnn_module(x, return_complete=return_complete)
232
+
233
+ def parameters(self) -> list:
234
+ """Gathers all the parameters of the network in a list. This is done by accessing the convolutional layer in each `DynapcnnLayer`,
235
+ calling its `.parameters` method and saving it to a list.
236
+
237
+ Note: the method assumes no biases are used.
238
+
239
+ Returns:
240
+ List of parameters of all convolutional layers in the `DynapcnnNetwok`.
241
+ """
242
+ parameters = []
243
+
244
+ for layer in self.dynapcnn_layers.values():
245
+ if isinstance(layer, DynapcnnLayer):
246
+ parameters.extend(layer.conv_layer.parameters())
247
+
248
+ return parameters
249
+
250
+ def memory_summary(self) -> Dict[str, Dict[int, int]]:
251
+ """Get a summary of the network's memory requirements.
252
+
253
+ Returns:
254
+ A dictionary with keys kernel, neuron, bias. The values are a dicts.
255
+ Each nested dict has as keys the indices of all dynapcnn_layers and
256
+ as values the corresonding memory values for each layer.
257
+ """
258
+ # For each entry (kernel, neuron, bias) provide one nested dict with
259
+ # one entry for each layer
260
+ summary = {key: dict() for key in ("kernel", "neuron", "bias")}
261
+
262
+ for layer_index, layer in self.dynapcnn_layers.items():
263
+ for key, val in layer.memory_summary().items():
264
+ summary[key][layer_index] = val
265
+
266
+ return summary
134
267
 
135
- Note
268
+ def init_weights(self, init_fn: nn.init = nn.init.xavier_normal_) -> None:
269
+ """Call the weight initialization method `init_fn` on each `DynapcnnLayer.conv_layer.weight.data` in the `DynapcnnNetwork` instance.
270
+
271
+ Args:
272
+ init_fn (torch.nn.init): the weight initialization method to be used.
273
+ """
274
+ for layer in self.dynapcnn_layers.values():
275
+ if isinstance(layer, DynapcnnLayer):
276
+ init_fn(layer.conv_layer.weight.data)
277
+
278
+ def detach_neuron_states(self) -> None:
279
+ """Detach the neuron states and activations from current computation graph (necessary)."""
280
+
281
+ for module in self.dynapcnn_layers.values():
282
+ if isinstance(module, DynapcnnLayer):
283
+ if isinstance(module.spk_layer, sl.StatefulLayer):
284
+ for name, buffer in module.spk_layer.named_buffers():
285
+ buffer.detach_()
286
+
287
+ def to(
288
+ self,
289
+ device: str = "cpu",
290
+ monitor_layers: Optional[Union[List, str]] = None,
291
+ config_modifier: Optional[Callable] = None,
292
+ slow_clk_frequency: Optional[int] = None,
293
+ layer2core_map: Union[Dict[int, int], str] = "auto",
294
+ chip_layers_ordering: Optional[Union[Sequence[int], str]] = None,
295
+ ):
296
+ """Deploy model to cpu, gpu or a SynSense device.
297
+
298
+ Note that the model parameters are only ever transferred to the device on the `to` call,
299
+ so changing a threshold or weight of a model that is deployed will have no effect on the
300
+ model on chip until `to` is called again.
301
+
302
+ Args:
303
+ device (str): cpu:0, cuda:0, speck2edevkit
304
+ monitor_layers: None/List. A list of all layers in the module that
305
+ you want to monitor. Indexing starts with the first non-dvs
306
+ layer. If you want to monitor the dvs layer for eg.
307
+ ::
308
+
309
+ monitor_layers = ["dvs"] # If you want to monitor the output of the pre-processing layer
310
+ monitor_layers = ["dvs", 8] # If you want to monitor preprocessing and layer 8
311
+ monitor_layers = "all" # If you want to monitor all the layers
312
+ monitor_layers = [-1] # If you want to only monitor exit points of the network (i.e. final layers)
313
+ config_modifier: A user configuration modifier method. This function
314
+ can be used to make any custom changes you want to make to the configuration object.
315
+ layer2core_map (dict or "auto"): Defines how cores on chip are
316
+ assigned to DynapcnnLayers. If `auto`, an automated procedure
317
+ will be used to find a valid ordering. Otherwise a dict needs
318
+ to be passed, with DynapcnnLayer indices as keys and assigned
319
+ core IDs as values. DynapcnnLayer indices have to match those of
320
+ `self.dynapcnn_layers`.
321
+ chip_layers_ordering: sequence of integers or `auto`. The order in
322
+ which the dynapcnn layers will be used. If `auto`,an automated
323
+ procedure will be used to find a valid ordering. A list of
324
+ layers on the device where you want each of the model's
325
+ DynapcnnLayers to be placed.
326
+ The index of the core on chip to which the i-th layer in the
327
+ model is mapped is the value of the i-th entry in the list.
328
+ Note: This list should be the same length as the number of
329
+ dynapcnn layers in your model.
330
+ Note: This parameter is obsolete and should not be passed
331
+ anymore. Use `layer2core_map` instead.
332
+
333
+ Note
136
334
  ----
137
335
  chip_layers_ordering and monitor_layers are used only when using synsense devices.
138
336
  For GPU or CPU usage these options are ignored.
139
337
  """
140
338
  self.device = device
339
+
141
340
  if isinstance(device, torch.device):
142
- return super().to(device)
341
+ self._to_device(device)
342
+
143
343
  elif isinstance(device, str):
144
344
  device_name, _ = parse_device_id(device)
145
- if device_name in ChipFactory.supported_devices: # pragma: no cover
146
- # Generate config
345
+
346
+ if device_name in ChipFactory.supported_devices:
347
+ # generate config.
147
348
  config = self.make_config(
349
+ layer2core_map=layer2core_map,
148
350
  chip_layers_ordering=chip_layers_ordering,
149
351
  device=device,
150
352
  monitor_layers=monitor_layers,
151
353
  config_modifier=config_modifier,
152
354
  )
153
355
 
154
- # Apply configuration to device
356
+ # apply configuration to device
155
357
  self.samna_device = open_device(device)
156
358
  self.samna_device.get_model().apply_configuration(config)
157
359
  time.sleep(1)
158
360
 
159
- # Set external slow-clock if need
361
+ # set external slow-clock if needed
160
362
  if slow_clk_frequency is not None:
161
363
  dk_io = self.samna_device.get_io_module()
162
364
  dk_io.set_slow_clk(True)
163
365
  dk_io.set_slow_clk_rate(slow_clk_frequency) # Hz
164
366
 
165
367
  builder = ChipFactory(device).get_config_builder()
166
- # Create input source node
368
+
369
+ # create input source node
167
370
  self.samna_input_buffer = builder.get_input_buffer()
168
- # Create output sink node node
371
+
372
+ # create output sink node node.
169
373
  self.samna_output_buffer = builder.get_output_buffer()
170
374
 
171
- # Connect source node to device sink
375
+ # connect source node to device sink.
172
376
  self.device_input_graph = samna.graph.EventFilterGraph()
173
377
  self.device_input_graph.sequential(
174
378
  [
@@ -177,7 +381,7 @@ class DynapcnnNetwork(nn.Module):
177
381
  ]
178
382
  )
179
383
 
180
- # Connect sink node to device
384
+ # connect sink node to device.
181
385
  self.device_output_graph = samna.graph.EventFilterGraph()
182
386
  self.device_output_graph.sequential(
183
387
  [
@@ -185,108 +389,18 @@ class DynapcnnNetwork(nn.Module):
185
389
  self.samna_output_buffer,
186
390
  ]
187
391
  )
392
+
188
393
  self.device_input_graph.start()
189
394
  self.device_output_graph.start()
190
395
  self.samna_config = config
191
- return self
192
- else:
193
- return super().to(device)
194
- else:
195
- raise Exception("Unknown device description.")
196
-
197
- def _make_config(
198
- self,
199
- chip_layers_ordering: Union[Sequence[int], str] = "auto",
200
- device="speck2edevkit:0",
201
- monitor_layers: Optional[Union[List, str]] = None,
202
- config_modifier=None,
203
- ) -> Tuple["SamnaConfiguration", bool]:
204
- """Prepare and output the `samna` configuration for this network.
205
-
206
- Parameters
207
- ----------
208
-
209
- chip_layers_ordering: sequence of integers or `auto`
210
- The order in which the dynapcnn layers will be used. If `auto`,
211
- an automated procedure will be used to find a valid ordering.
212
- A list of layers on the device where you want each of the model's DynapcnnLayers to be placed.
213
- The index of the core on chip to which the i-th layer in the model is mapped is the value of the i-th entry in the list.
214
- Note: This list should be the same length as the number of dynapcnn layers in your model.
215
-
216
- device: String
217
- speck2edevkit or speck2fdevkit
218
-
219
- monitor_layers: None/List/Str
220
- A list of all layers in the module that you want to monitor. Indexing starts with the first non-dvs layer.
221
- If you want to monitor the dvs layer for eg.
222
- ::
223
396
 
224
- monitor_layers = ["dvs"] # If you want to monitor the output of the pre-processing layer
225
- monitor_layers = ["dvs", 8] # If you want to monitor preprocessing and layer 8
226
- monitor_layers = "all" # If you want to monitor all the layers
227
-
228
- If this value is left as None, by default the last layer of the model is monitored.
229
-
230
- config_modifier:
231
- A user configuration modifier method.
232
- This function can be used to make any custom changes you want to make to the configuration object.
233
-
234
- Returns
235
- -------
236
- Configuration object
237
- Object defining the configuration for the device
238
- Bool
239
- True if the configuration is valid for the given device.
240
-
241
- Raises
242
- ------
243
- ImportError
244
- If samna is not available.
245
- """
246
- config_builder = ChipFactory(device).get_config_builder()
397
+ return self
247
398
 
248
- has_dvs_layer = isinstance(self.sequence[0], DVSLayer)
399
+ else:
400
+ self._to_device(device)
249
401
 
250
- # Figure out layer ordering
251
- if chip_layers_ordering == "auto":
252
- chip_layers_ordering = config_builder.get_valid_mapping(self)
253
402
  else:
254
- # Truncate chip_layers_ordering just in case a longer list is passed
255
- if has_dvs_layer:
256
- chip_layers_ordering = chip_layers_ordering[: len(self.sequence) - 1]
257
- chip_layers_ordering = chip_layers_ordering[: len(self.sequence)]
258
-
259
- # Save the chip layers
260
- self.chip_layers_ordering = chip_layers_ordering
261
- # Update config
262
- config = config_builder.build_config(self, chip_layers_ordering)
263
- if self.input_shape and self.input_shape[0] == 1:
264
- config.dvs_layer.merge = True
265
- # Check if any monitoring is enabled and if not, enable monitoring for the last layer
266
- if monitor_layers is None:
267
- monitor_layers = [-1]
268
- elif monitor_layers == "all":
269
- num_cnn_layers = len(self.sequence) - int(has_dvs_layer)
270
- monitor_layers = list(range(num_cnn_layers))
271
-
272
- # Enable monitors on the specified layers
273
- # Find layers corresponding to the chip
274
- monitor_chip_layers = [
275
- self.find_chip_layer(lyr) for lyr in monitor_layers if lyr != "dvs"
276
- ]
277
- if "dvs" in monitor_layers:
278
- monitor_chip_layers.append("dvs")
279
- config_builder.monitor_layers(config, monitor_chip_layers)
280
-
281
- # Fix default factory setting to not return input events (UGLY!! Ideally this should happen in samna)
282
- # config.factory_settings.monitor_input_enable = False
283
-
284
- # Apply user config modifier
285
- if config_modifier is not None:
286
- config = config_modifier(config)
287
-
288
- # Validate config
289
- return config, config_builder.validate_configuration(config)
403
+ raise Exception("Unknown device description.")
290
404
 
291
405
  def make_config(
292
406
  self,
@@ -297,44 +411,34 @@ class DynapcnnNetwork(nn.Module):
297
411
  ):
298
412
  """Prepare and output the `samna` DYNAPCNN configuration for this network.
299
413
 
300
- Parameters
301
- ----------
302
-
303
- chip_layers_ordering: sequence of integers or `auto`
304
- The order in which the dynapcnn layers will be used. If `auto`,
305
- an automated procedure will be used to find a valid ordering.
306
- A list of layers on the device where you want each of the model's DynapcnnLayers to be placed.
307
- Note: This list should be the same length as the number of dynapcnn layers in your model.
308
-
309
- device: String
310
- speck2edevkit or speck2fdevkit
311
-
312
- monitor_layers: None/List/Str
313
- A list of all layers in the module that you want to monitor. Indexing starts with the first non-dvs layer.
314
- If you want to monitor the dvs layer for eg.
315
- ::
316
-
317
- monitor_layers = ["dvs"] # If you want to monitor the output of the pre-processing layer
318
- monitor_layers = ["dvs", 8] # If you want to monitor preprocessing and layer 8
319
- monitor_layers = "all" # If you want to monitor all the layers
320
-
321
- If this value is left as None, by default the last layer of the model is monitored.
322
-
323
- config_modifier:
324
- A user configuration modifier method.
325
- This function can be used to make any custom changes you want to make to the configuration object.
414
+ Args:
415
+ chip_layers_ordering: sequence of integers or `auto`. The order in
416
+ which the dynapcnn layers will be used. If `auto`, an automated
417
+ procedure will be used to find a valid ordering. A list of
418
+ layers on the device where you want each of the model's
419
+ DynapcnnLayers to be placed. Note: This list should be the same
420
+ length as the number of dynapcnn layers in your model.
421
+ device (str): speck2edevkit or speck2fdevkit
422
+ monitor_layers: A list of all layers in the module that you want to
423
+ monitor. Indexing starts with the first non-dvs layer. If you
424
+ want to monitor the dvs layer for eg.
425
+ ::
426
+
427
+ monitor_layers = ["dvs"] # If you want to monitor the output of the pre-processing layer
428
+ monitor_layers = ["dvs", 8] # If you want to monitor preprocessing and layer 8
429
+ monitor_layers = "all" # If you want to monitor all the layers
430
+
431
+ If this value is left as None, by default the last layer of the model is monitored.
432
+ config_modifier: A user configuration modifier method. This
433
+ function can be used to make any custom changes you want to
434
+ make to the configuration object.
326
435
 
327
- Returns
328
- -------
329
- Configuration object
330
- Object defining the configuration for the device
436
+ Returns:
437
+ Object defining the configuration for the device.
331
438
 
332
- Raises
333
- ------
334
- ImportError
335
- If samna is not available.
336
- ValueError
337
- If the generated configuration is not valid for the specified device.
439
+ Raises:
440
+ ImportError: If samna is not available.
441
+ ValueError: If the generated configuration is not valid for the specified device.
338
442
  """
339
443
  config, is_compatible = self._make_config(
340
444
  chip_layers_ordering=chip_layers_ordering,
@@ -367,10 +471,104 @@ class DynapcnnNetwork(nn.Module):
367
471
  raise e
368
472
  return is_compatible
369
473
 
474
+ def make_config(
475
+ self,
476
+ layer2core_map: Union[Dict[int, int], str] = "auto",
477
+ device: str = "speck2fdevkit:0",
478
+ monitor_layers: Optional[Union[List, str]] = None,
479
+ config_modifier: Optional[Callable] = None,
480
+ chip_layers_ordering: Optional[Union[Sequence[int], str]] = None,
481
+ ):
482
+ """Prepare and output the `samna` DYNAPCNN configuration for this network.
483
+
484
+ Args:
485
+ layer2core_map (dict or "auto"): Defines how cores on chip are
486
+ assigned to DynapcnnLayers. If `auto`, an automated procedure
487
+ will be used to find a valid ordering. Otherwise a dict needs
488
+ to be passed, with DynapcnnLayer indices as keys and assigned
489
+ core IDs as values. DynapcnnLayer indices have to match those of
490
+ `self.dynapcnn_layers`.
491
+ device: (string): speck2devkit
492
+ monitor_layers: A list of all layers in the module that you want to
493
+ monitor. Indexing starts with the first non-dvs layer. If you
494
+ want to monitor the dvs layer for eg.
495
+ ::
496
+
497
+ monitor_layers = ["dvs"] # If you want to monitor the output of the pre-processing layer
498
+ monitor_layers = ["dvs", 8] # If you want to monitor preprocessing and layer 8
499
+ monitor_layers = "all" # If you want to monitor all the layers
500
+ monitor_layers = [-1] # If you want to only monitor exit points of the network (i.e. final layers)
501
+
502
+ If this value is left as None, by default the last layer of the
503
+ model is monitored.
504
+ config_modifier (Callable or None): A user configuration modifier
505
+ method. This function can be used to make any custom changes
506
+ you want to make to the configuration object.
507
+ chip_layers_ordering (None, sequence of integers or "auto", obsolete):
508
+ The order in which the dynapcnn layers will be used. If `auto`,
509
+ an automated procedure will be used to find a valid ordering.
510
+ A list of layers on the device where you want each of the model's
511
+ DynapcnnLayers to be placed. Note: This list should be the same
512
+ length as the number of dynapcnn layers in your model. Note:
513
+ This parameter is obsolete and should not be passed anymore.
514
+ Use `layer2core_map` instead.
515
+
516
+ Returns:
517
+ Object defining the configuration for the device
518
+
519
+ Raises:
520
+ ImportError: If samna is not available.
521
+ ValueError: If the generated configuration is not valid for the specified device.
522
+ """
523
+ config, is_compatible = self._make_config(
524
+ layer2core_map=layer2core_map,
525
+ device=device,
526
+ monitor_layers=monitor_layers,
527
+ config_modifier=config_modifier,
528
+ chip_layers_ordering=chip_layers_ordering,
529
+ )
530
+
531
+ # Validate config
532
+ if is_compatible:
533
+ print("Network is valid")
534
+ return config
535
+ else:
536
+ raise ValueError(
537
+ f"Generated config is not valid for {device}. "
538
+ "Probably one or more layers are too large. Try "
539
+ "Reducing the number of neurons or the kernel sizes."
540
+ )
541
+
542
+ def has_dvs_layer(self) -> bool:
543
+ """Return True if there is a DVSLayer in the network
544
+
545
+ Returns:
546
+ True if DVSLayer is found within the network.
547
+ """
548
+ return self.dvs_layer is not None
549
+
550
+ def zero_grad(self, set_to_none: bool = False) -> None:
551
+ """Call `zero_grad` method of each DynapCNN layer
552
+
553
+ Args:
554
+ set_to_none (bool): This argument is passed directly to the
555
+ `zero_grad` method of each DynapCNN layer
556
+ """
557
+ for lyr in self.dynapcnn_layers.values():
558
+ lyr.zero_grad(set_to_none)
559
+
370
560
  def reset_states(self, randomize=False):
371
- """Reset the states of the network."""
561
+ """Reset the states of the network.
562
+ Note that setting `randomize` to `True` is only supported for models
563
+ that have not yet been deployed on a SynSense device.
564
+
565
+ Args:
566
+ randomize (bool): If `False` (default), will set all states to 0.
567
+ Otherwise will set to random values.
568
+ """
372
569
  if hasattr(self, "device") and isinstance(self.device, str): # pragma: no cover
373
570
  device_name, _ = parse_device_id(self.device)
571
+ # Reset states on SynSense device
374
572
  if device_name in ChipFactory.supported_devices:
375
573
  config_builder = ChipFactory(self.device).get_config_builder()
376
574
  # Set all the vmem states in the samna config to zero
@@ -390,118 +588,197 @@ class DynapcnnNetwork(nn.Module):
390
588
  time.sleep(0.1)
391
589
  self.samna_input_graph.start()
392
590
  return
591
+
592
+ # Reset states of `DynapcnnLayer` instances
393
593
  for layer in self.sequence:
394
594
  if isinstance(layer, DynapcnnLayer):
395
595
  layer.spk_layer.reset_states(randomize=randomize)
396
596
 
397
- def find_chip_layer(self, layer_idx):
398
- """Given an index of a layer in the model, find the corresponding cnn core id where it is
399
- placed.
597
+ def _make_config(
598
+ self,
599
+ layer2core_map: Union[Dict[int, int], str] = "auto",
600
+ device: str = "speck2fdevkit:0",
601
+ monitor_layers: Optional[Union[List, str]] = None,
602
+ config_modifier: Optional[Callable] = None,
603
+ chip_layers_ordering: Optional[Union[Sequence[int], str]] = None,
604
+ ) -> Tuple["SamnaConfiguration", bool]:
605
+ """Prepare and output the `samna` DYNAPCNN configuration for this network.
400
606
 
401
- > Note that the layer index does not include the DVSLayer.
402
- > For instance your model comprises two layers [DVSLayer, DynapcnnLayer],
403
- > then the index of DynapcnnLayer is 0 and not 1.
607
+ Args:
608
+ layer2core_map (dict or "auto"): Defines how cores on chip are
609
+ assigned to DynapcnnLayers. If `auto`, an automated procedure
610
+ will be used to find a valid ordering. Otherwise a dict needs
611
+ to be passed, with DynapcnnLayer indices as keys and assigned
612
+ core IDs as values. DynapcnnLayer indices have to match those
613
+ of `self.dynapcnn_layers`.
614
+ device: (string): dynapcnndevkit, speck2b or speck2devkit
615
+ monitor_layers: A list of all layers in the module that you want
616
+ to monitor. Indexing starts with the first non-dvs layer.
617
+ If you want to monitor the dvs layer for eg.
618
+ ::
619
+
620
+ monitor_layers = ["dvs"] # If you want to monitor the output of the pre-processing layer
621
+ monitor_layers = ["dvs", 8] # If you want to monitor preprocessing and layer 8
622
+ monitor_layers = "all" # If you want to monitor all the layers
623
+ monitor_layers = [-1] # If you want to only monitor exit points of the network (i.e. final layers)
624
+
625
+ If this value is left as None, by default the last layer of the model is monitored.
626
+
627
+ config_modifier (Callable or None): A user configuration modifier
628
+ method. This function can be used to make any custom changes
629
+ you want to make to the configuration object.
630
+ chip_layers_ordering (None, sequence of integers or "auto", obsolete):
631
+ The order in which the dynapcnn layers will be used. If `auto`,
632
+ an automated procedure will be used to find a valid ordering.
633
+ A list of layers on the device where you want each of the
634
+ model's DynapcnnLayers to be placed. Note: This list should be
635
+ the same length as the number of dynapcnn layers in your model.
636
+ Note: This parameter is obsolete and should not be passed
637
+ anymore. Use `layer2core_map` instead.
404
638
 
405
- Parameters
406
- ----------
407
- layer_idx: int
408
- Index of a layer
639
+ Returns:
640
+ An object defining the configuration for the device and a boolean
641
+ that determines if the configuration is valid for the given device.
409
642
 
410
- Returns
411
- -------
412
- chip_lyr_idx: int
413
- Index of the layer on the chip where the model layer is placed.
643
+ Raises:
644
+ ImportError: If samna is not available.
645
+ ValueError: If no valid mapping between the layers of this object
646
+ and the cores ofthe provided device can be found.
414
647
  """
415
- # Compute the expected number of cores
416
- num_cores_required = len(self.sequence)
417
- if isinstance(self.sequence[0], DVSLayer):
418
- num_cores_required -= 1
419
- if len(self.chip_layers_ordering) != num_cores_required:
420
- raise Exception(
421
- f"Number of layers specified in chip_layers_ordering {self.chip_layers_ordering} does not correspond to the number of cores required for this model {num_cores_required}"
422
- )
423
-
424
- return self.chip_layers_ordering[layer_idx]
648
+ config_builder = ChipFactory(device).get_config_builder()
425
649
 
426
- def forward(self, x):
427
- if (
428
- hasattr(self, "device")
429
- and parse_device_id(self.device)[0] in ChipFactory.supported_devices
430
- ): # pragma: no cover
431
- _ = self.samna_output_buffer.get_events() # Flush buffer
432
- # NOTE: The code to start and stop time stamping is device specific
433
- reset_timestamps(self.device)
434
- enable_timestamps(self.device)
435
- # Send input
436
- self.samna_input_buffer.write(x)
437
- received_evts = []
438
-
439
- # Wait a minimum time to guarantee the events were played
440
- time.sleep(1)
441
-
442
- # Keep recording if more events are being registered
443
- while True:
444
- prev_length = len(received_evts)
445
- time.sleep(0.1)
446
- received_evts.extend(self.samna_output_buffer.get_events())
447
- if prev_length == len(received_evts):
448
- break
449
- # Disable timestamp
450
- disable_timestamps(self.device)
451
- return received_evts
650
+ if chip_layers_ordering is not None:
651
+ if layer2core_map != "auto":
652
+ warn(
653
+ "Both `chip_layers_ordering` and `layer2core_map are provided. "
654
+ "The parameter `chip_layers_ordering` is deprecated and will "
655
+ "be ignored.",
656
+ DeprecationWarning,
657
+ )
658
+ elif chip_layers_ordering == "auto":
659
+ warn(
660
+ "The parameter `chip_layers_ordering` is deprecated. Passing "
661
+ "'auto' is still accepted, but in the future please use "
662
+ "`layer2core_map` instead.",
663
+ DeprecationWarning,
664
+ )
665
+ else:
666
+ layer2core_map = {
667
+ idx: core
668
+ for idx, core in zip(self.dynapcnn_layers, chip_layers_ordering)
669
+ }
670
+ warn(
671
+ "The parameter `chip_layers_ordering` is deprecated. "
672
+ "Because `layer2core_map` is 'auto', and `chip_layers_ordering` "
673
+ "is not, will convert `chip_layers_ordering` to a "
674
+ "dict matching `layer2core_map`. In the future please use "
675
+ "`layer2core_map` instead. Please make sure the inferred "
676
+ "mapping from DynapcnnLayer index to core index is correct: "
677
+ + pformat(layer2core_map),
678
+ DeprecationWarning,
679
+ )
680
+ if layer2core_map == "auto":
681
+ # Assign chip core ID for each DynapcnnLayer.
682
+ layer2core_map = config_builder.map_layers_to_cores(self.dynapcnn_layers)
452
683
  else:
453
- """Torch's forward pass."""
454
- return self.sequence(x)
684
+ if not layer2core_map.keys() == self.dynapcnn_layers.keys():
685
+ raise ValueError(
686
+ "The keys provided in `layer2core_map` must exactly match "
687
+ "the keys in `self.dynapcnn_layers`"
688
+ )
455
689
 
456
- def memory_summary(self):
457
- """Get a summary of the network's memory requirements.
690
+ self._layer2core_map = layer2core_map
458
691
 
459
- Returns
460
- -------
461
- dict:
462
- A dictionary with keys kernel, neuron, bias.
463
- The values are a list of the corresponding number per layer in the same order as the model
464
- """
465
- summary = {}
466
-
467
- dynapcnn_layers = [
468
- lyr for lyr in self.sequence if isinstance(lyr, DynapcnnLayer)
469
- ]
470
- summary.update({k: list() for k in dynapcnn_layers[0].memory_summary().keys()})
471
- for lyr in dynapcnn_layers:
472
- lyr_summary = lyr.memory_summary()
473
- for k, v in lyr_summary.items():
474
- summary[k].append(v)
475
- return summary
692
+ # update config (config. DynapcnnLayer instances into their assigned core).
693
+ config = config_builder.build_config(
694
+ layers=self.all_layers,
695
+ layer2core_map=layer2core_map,
696
+ destination_map=self.layer_destination_map,
697
+ )
476
698
 
477
- def zero_grad(self, set_to_none: bool = False) -> None:
478
- for lyr in self.sequence:
479
- lyr.zero_grad(set_to_none)
699
+ if monitor_layers is None:
700
+ # Monitor all layers with exit point destinations
701
+ monitor_layers = self._dynapcnn_module.get_exit_layers()
702
+ elif monitor_layers == "all":
703
+ monitor_layers = [
704
+ lyr_idx
705
+ for lyr_idx, layer in self.dynapcnn_layers.items()
706
+ if not isinstance(layer, DVSLayer)
707
+ ]
708
+ elif -1 in monitor_layers:
709
+ # Replace `-1` with exit layer IDs
710
+ monitor_layers.remove(-1)
711
+ monitor_layers += self._dynapcnn_module.get_exit_layers()
712
+
713
+ # Collect cores (chip layers) that are to be monitored
714
+ monitor_chip_layers = []
715
+ for lyr_idx in monitor_layers:
716
+ if str(lyr_idx).lower() == "dvs":
717
+ monitor_chip_layers.append("dvs")
718
+ else:
719
+ monitor_chip_layers.append(layer2core_map[lyr_idx])
480
720
 
481
- def __del__(self):
482
- # Stop the input graph
483
- if hasattr(self, "device_input_graph") and self.device_input_graph:
484
- self.device_input_graph.stop()
721
+ # enable monitors on the specified layers
722
+ config_builder.monitor_layers(config, monitor_chip_layers)
723
+
724
+ if config_modifier is not None:
725
+ # apply user config modifier.
726
+ config = config_modifier(config)
727
+
728
+ # Validate config
729
+ return config, config_builder.validate_configuration(config)
485
730
 
486
- # Stop the output graph.
487
- if hasattr(self, "device_output_graph") and self.device_output_graph:
488
- self.device_output_graph.stop()
731
+ def _to_device(self, device: torch.device) -> None:
732
+ """Access each sub-layer within all `DynapcnnLayer` instances and call `.to(device)` on them."""
733
+ for layer in self.dynapcnn_layers.values():
734
+ if isinstance(layer, sinabs.backend.dynapcnn.dynapcnn_layer.DynapcnnLayer):
735
+ layer.to(device)
736
+
737
+ for _, data in self._merge_points.items():
738
+ data["merge"].to(device)
739
+
740
+ def __str__(self):
741
+ pretty_print = ""
742
+ if self.dvs_layer is not None:
743
+ pretty_print += (
744
+ "-------------------------- [ DVSLayer ] --------------------------\n"
745
+ )
746
+ pretty_print += f"{self.dvs_layer}\n\n"
747
+ for idx, layer_data in self.dynapcnn_layers.items():
748
+ pretty_print += f"----------------------- [ DynapcnnLayer {idx} ] -----------------------\n"
749
+ if self.is_deployed_on_dynapcnn_device:
750
+ pretty_print += f"Core {self.layer2core_map[idx]}\n"
751
+ pretty_print += f"{layer_data}\n\n"
752
+
753
+ return pretty_print
754
+
755
+ def __repr__(self):
756
+ if self.is_deployed_on_dynapcnn_device:
757
+ layer_info = "\n\n".join(
758
+ f"{idx} - core: {self.layer2core_map[idx]}\n{pformat(layer)}"
759
+ for idx, layer in self.dynapcnn_layers.items()
760
+ )
761
+ device_info = f" deployed on {self.device},"
762
+ else:
763
+ layer_info = "\n\n".join(
764
+ f"Index: {idx}\n{pformat(layer)}"
765
+ for idx, layer in self.dynapcnn_layers.items()
766
+ )
767
+ device_info = f" on {self.device}," if hasattr(self, "device") else ""
768
+ return (
769
+ f"DynapCNN Network{device_info} containing:\nDVS Layer: {pformat(self.dvs_layer)}"
770
+ "\n\nDynapCNN Layers:\n\n" + layer_info
771
+ )
489
772
 
490
773
 
491
774
  class DynapcnnCompatibleNetwork(DynapcnnNetwork):
492
775
  """Deprecated class, use DynapcnnNetwork instead."""
493
776
 
494
- def __init__(
495
- self,
496
- snn: Union[nn.Sequential, sinabs.Network],
497
- input_shape: Optional[Tuple[int, int, int]] = None,
498
- dvs_input: bool = False,
499
- discretize: bool = True,
500
- ):
777
+ def __init__(self, *args, **kwargs):
501
778
  from warnings import warn
502
779
 
503
780
  warn(
504
781
  "DynapcnnCompatibleNetwork has been renamed to DynapcnnNetwork "
505
782
  + "and will be removed in a future release."
506
783
  )
507
- super().__init__(snn, input_shape, dvs_input, discretize)
784
+ super().__init__(*args, **kwargs)