sinabs 3.0.4.dev25__py3-none-any.whl → 3.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sinabs/activation/reset_mechanism.py +3 -3
- sinabs/activation/surrogate_gradient_fn.py +4 -4
- sinabs/backend/dynapcnn/__init__.py +5 -4
- sinabs/backend/dynapcnn/chip_factory.py +33 -61
- sinabs/backend/dynapcnn/chips/dynapcnn.py +182 -86
- sinabs/backend/dynapcnn/chips/speck2e.py +6 -5
- sinabs/backend/dynapcnn/chips/speck2f.py +6 -5
- sinabs/backend/dynapcnn/config_builder.py +39 -59
- sinabs/backend/dynapcnn/connectivity_specs.py +48 -0
- sinabs/backend/dynapcnn/discretize.py +91 -155
- sinabs/backend/dynapcnn/dvs_layer.py +59 -101
- sinabs/backend/dynapcnn/dynapcnn_layer.py +185 -119
- sinabs/backend/dynapcnn/dynapcnn_layer_utils.py +335 -0
- sinabs/backend/dynapcnn/dynapcnn_network.py +602 -325
- sinabs/backend/dynapcnn/dynapcnnnetwork_module.py +370 -0
- sinabs/backend/dynapcnn/exceptions.py +122 -3
- sinabs/backend/dynapcnn/io.py +55 -92
- sinabs/backend/dynapcnn/mapping.py +111 -75
- sinabs/backend/dynapcnn/nir_graph_extractor.py +877 -0
- sinabs/backend/dynapcnn/sinabs_edges_handler.py +1024 -0
- sinabs/backend/dynapcnn/utils.py +214 -459
- sinabs/backend/dynapcnn/weight_rescaling_methods.py +53 -0
- sinabs/conversion.py +2 -2
- sinabs/from_torch.py +23 -1
- sinabs/hooks.py +38 -41
- sinabs/layers/alif.py +16 -16
- sinabs/layers/crop2d.py +2 -2
- sinabs/layers/exp_leak.py +1 -1
- sinabs/layers/iaf.py +11 -11
- sinabs/layers/lif.py +9 -9
- sinabs/layers/neuromorphic_relu.py +9 -8
- sinabs/layers/pool2d.py +5 -5
- sinabs/layers/quantize.py +1 -1
- sinabs/layers/stateful_layer.py +10 -7
- sinabs/layers/to_spike.py +9 -9
- sinabs/network.py +14 -12
- sinabs/nir.py +4 -3
- sinabs/synopcounter.py +10 -7
- sinabs/utils.py +155 -7
- sinabs/validate_memory_speck.py +0 -5
- {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/METADATA +3 -2
- sinabs-3.1.1.dist-info/RECORD +65 -0
- {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/licenses/AUTHORS +1 -0
- sinabs-3.1.1.dist-info/pbr.json +1 -0
- sinabs-3.0.4.dev25.dist-info/RECORD +0 -59
- sinabs-3.0.4.dev25.dist-info/pbr.json +0 -1
- {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/WHEEL +0 -0
- {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/licenses/LICENSE +0 -0
- {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/top_level.txt +0 -0
|
@@ -1,174 +1,378 @@
|
|
|
1
1
|
import time
|
|
2
|
-
from
|
|
3
|
-
from typing import List, Optional, Sequence, Tuple, Union
|
|
2
|
+
from pprint import pformat
|
|
3
|
+
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
|
|
4
|
+
from warnings import warn
|
|
4
5
|
|
|
5
6
|
import samna
|
|
6
7
|
import torch
|
|
7
8
|
import torch.nn as nn
|
|
9
|
+
from torch import Tensor
|
|
8
10
|
|
|
9
11
|
import sinabs
|
|
12
|
+
import sinabs.layers as sl
|
|
10
13
|
|
|
11
14
|
from .chip_factory import ChipFactory
|
|
12
15
|
from .dvs_layer import DVSLayer
|
|
13
16
|
from .dynapcnn_layer import DynapcnnLayer
|
|
14
17
|
from .io import disable_timestamps, enable_timestamps, open_device, reset_timestamps
|
|
18
|
+
from .nir_graph_extractor import GraphExtractor
|
|
15
19
|
from .utils import (
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
convert_model_to_layer_list,
|
|
20
|
+
COMPLETELY_IGNORED_LAYER_TYPES,
|
|
21
|
+
IGNORED_LAYER_TYPES,
|
|
19
22
|
infer_input_shape,
|
|
20
23
|
parse_device_id,
|
|
21
24
|
)
|
|
25
|
+
from .weight_rescaling_methods import rescale_method_1
|
|
22
26
|
|
|
23
27
|
|
|
24
28
|
class DynapcnnNetwork(nn.Module):
|
|
25
|
-
"""Given a sinabs spiking network, prepare a dynapcnn-compatible network. This can be used to
|
|
26
|
-
test the network will be equivalent once on DYNAPCNN. This class also provides utilities to
|
|
27
|
-
make the dynapcnn configuration and upload it to DYNAPCNN.
|
|
28
|
-
|
|
29
|
-
The following operations are done when converting to dynapcnn-compatible:
|
|
30
|
-
|
|
31
|
-
* multiple avg pooling layers in a row are consolidated into one and \
|
|
32
|
-
turned into sum pooling layers;
|
|
33
|
-
* checks are performed on layer hyperparameter compatibility with dynapcnn \
|
|
34
|
-
(kernel sizes, strides, padding)
|
|
35
|
-
* checks are performed on network structure compatibility with dynapcnn \
|
|
36
|
-
(certain layers can only be followed by other layers)
|
|
37
|
-
* linear layers are turned into convolutional layers
|
|
38
|
-
* dropout layers are ignored
|
|
39
|
-
* weights, biases and thresholds are discretized according to dynapcnn requirements
|
|
40
|
-
|
|
41
|
-
Note that the model parameters are only ever transferred to the device
|
|
42
|
-
on the `to` call, so changing a threshold or weight of a model that
|
|
43
|
-
is deployed will have no effect on the model on chip until `to` is called again.
|
|
44
|
-
"""
|
|
45
|
-
|
|
46
29
|
def __init__(
|
|
47
30
|
self,
|
|
48
|
-
snn:
|
|
31
|
+
snn: nn.Module,
|
|
49
32
|
input_shape: Optional[Tuple[int, int, int]] = None,
|
|
50
|
-
|
|
33
|
+
batch_size: Optional[int] = None,
|
|
34
|
+
dvs_input: Optional[bool] = None,
|
|
51
35
|
discretize: bool = True,
|
|
36
|
+
weight_rescaling_fn: Callable = rescale_method_1,
|
|
52
37
|
):
|
|
53
|
-
"""
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
If
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
If
|
|
68
|
-
|
|
69
|
-
|
|
38
|
+
"""Given a sinabs spiking network, prepare a dynapcnn-compatible network. This can be used to
|
|
39
|
+
test the network will be equivalent once on DYNAPCNN. This class also provides utilities to
|
|
40
|
+
make the dynapcnn configuration and upload it to DYNAPCNN.
|
|
41
|
+
|
|
42
|
+
Attributes:
|
|
43
|
+
snn (nn.Module): a implementing a spiking network.
|
|
44
|
+
input_shape (tuple or None): a description of the input dimensions
|
|
45
|
+
as `(features, height, width)`. If `None`, `snn` must contain a
|
|
46
|
+
`DVSLayer` instance, from which the input shape will be inferred.
|
|
47
|
+
batch_size (optional int): If `None`, will try to infer the batch
|
|
48
|
+
size from the model. If int value is provided, it has to match
|
|
49
|
+
the actual batch size of the model.
|
|
50
|
+
dvs_input (bool): optional (default as `None`). Wether or not dynapcnn
|
|
51
|
+
receive input from its DVS camera.
|
|
52
|
+
If a `DVSLayer` is part of `snn`...
|
|
53
|
+
... and `dvs_input` is `False`, its `disable_pixel_array` attribute
|
|
54
|
+
will be set `True`. This means the DVS sensor will be configured
|
|
55
|
+
upon deployment but its output will not be sent as input
|
|
56
|
+
... and `dvs_input` is `None`, the `disable_pixel_array` attribute
|
|
57
|
+
of the layer will not be changed.
|
|
58
|
+
... and `dvs_input` is `True`, `disable_pixel_array` will be set
|
|
59
|
+
`False`, so that the DVS sensor data is sent to the network.
|
|
60
|
+
If no `DVSLayer` is part of `snn`...
|
|
61
|
+
... and `dvs_input` is `False` or `None`, no `DVSLayer` will be added
|
|
62
|
+
and the DVS sensor will not be configured upon deployment.
|
|
63
|
+
... and `dvs_input` is `True`, a `DVSLayer` instance will be added
|
|
64
|
+
to the network, with `disable_pixel_array` set to `False`.
|
|
65
|
+
discretize (bool): If `True`, discretize the parameters and thresholds.
|
|
66
|
+
This is needed for uploading weights to dynapcnn. Set to `False`
|
|
67
|
+
only for testing purposes.
|
|
68
|
+
weight_rescaling_fn (callable): a method that handles how the re-scaling
|
|
69
|
+
factor for one or more `SumPool2d` projecting to the same convolutional
|
|
70
|
+
layer are combined/re-scaled before applying them.
|
|
70
71
|
"""
|
|
71
72
|
super().__init__()
|
|
72
73
|
|
|
73
|
-
|
|
74
|
-
|
|
74
|
+
if isinstance(snn, sinabs.Network):
|
|
75
|
+
# Ignore `analog_model` of sinabs `Network` instances
|
|
76
|
+
snn = snn.spiking_model
|
|
77
|
+
|
|
78
|
+
self.dvs_input = dvs_input
|
|
79
|
+
self.input_shape = infer_input_shape(snn, input_shape)
|
|
80
|
+
self._layer2core_map = None
|
|
81
|
+
|
|
82
|
+
# Infer batch size for dummy input to graph extractor
|
|
83
|
+
if batch_size is None:
|
|
84
|
+
batch_size = sinabs.utils.get_smallest_compatible_time_dimension(snn)
|
|
85
|
+
# computational graph from original PyTorch module.
|
|
86
|
+
self._graph_extractor = GraphExtractor(
|
|
87
|
+
snn,
|
|
88
|
+
torch.randn((batch_size, *self.input_shape)),
|
|
89
|
+
self.dvs_input,
|
|
90
|
+
ignore_node_types=COMPLETELY_IGNORED_LAYER_TYPES,
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
# Remove nodes of ignored classes (including merge nodes)
|
|
94
|
+
# Other than `COMPLETELY_IGNORED_LAYER_TYPES`, `IGNORED_LAYER_TYPES` are
|
|
95
|
+
# part of the graph initially and are needed to ensure proper handling of
|
|
96
|
+
# graph structure (e.g. Merge nodes) or meta-information (e.g.
|
|
97
|
+
# `nn.Flatten` for io-shapes)
|
|
98
|
+
self._graph_extractor.remove_nodes_by_class(IGNORED_LAYER_TYPES)
|
|
75
99
|
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
100
|
+
# Module to execute forward pass through network
|
|
101
|
+
self._dynapcnn_module = self._graph_extractor.get_dynapcnn_network_module(
|
|
102
|
+
discretize=discretize, weight_rescaling_fn=weight_rescaling_fn
|
|
79
103
|
)
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
104
|
+
self._dynapcnn_module.setup_dynapcnnlayer_graph(index_layers_topologically=True)
|
|
105
|
+
|
|
106
|
+
@property
|
|
107
|
+
def all_layers(self):
|
|
108
|
+
return self._dynapcnn_module.all_layers
|
|
109
|
+
|
|
110
|
+
@property
|
|
111
|
+
def dvs_node_info(self):
|
|
112
|
+
return self._dynapcnn_module.dvs_node_info
|
|
85
113
|
|
|
86
|
-
|
|
87
|
-
|
|
114
|
+
@property
|
|
115
|
+
def dvs_layer(self):
|
|
116
|
+
return self._dynapcnn_module.dvs_layer
|
|
88
117
|
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
dvs_input=self.dvs_input,
|
|
118
|
+
@property
|
|
119
|
+
def chip_layers_ordering(self):
|
|
120
|
+
warn(
|
|
121
|
+
"`chip_layers_ordering` is deprecated. Returning `layer2core_map` instead.",
|
|
122
|
+
DeprecationWarning,
|
|
95
123
|
)
|
|
124
|
+
return self._layer2core_map
|
|
96
125
|
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
126
|
+
@property
|
|
127
|
+
def dynapcnn_layers(self):
|
|
128
|
+
return self._dynapcnn_module.dynapcnn_layers
|
|
129
|
+
|
|
130
|
+
@property
|
|
131
|
+
def dynapcnn_module(self):
|
|
132
|
+
return self._dynapcnn_module
|
|
133
|
+
|
|
134
|
+
@property
|
|
135
|
+
def exit_layers(self):
|
|
136
|
+
return [self.all_layers[i] for i in self._dynapcnn_module.get_exit_layers()]
|
|
137
|
+
|
|
138
|
+
@property
|
|
139
|
+
def exit_layer_ids(self):
|
|
140
|
+
return self._dynapcnn_module.get_exit_layers()
|
|
141
|
+
|
|
142
|
+
@property
|
|
143
|
+
def is_deployed_on_dynapcnn_device(self):
|
|
144
|
+
return (
|
|
145
|
+
hasattr(self, "device")
|
|
146
|
+
and parse_device_id(self.device)[0] in ChipFactory.supported_devices
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
@property
|
|
150
|
+
def layer_destination_map(self):
|
|
151
|
+
return self._dynapcnn_module.destination_map
|
|
152
|
+
|
|
153
|
+
@property
|
|
154
|
+
def layer2core_map(self):
|
|
155
|
+
return self._layer2core_map
|
|
156
|
+
|
|
157
|
+
@property
|
|
158
|
+
def name_2_indx_map(self):
|
|
159
|
+
return self._graph_extractor.name_2_indx_map
|
|
160
|
+
|
|
161
|
+
def hw_forward(self, x):
|
|
162
|
+
"""Forwards data through the chip."""
|
|
108
163
|
|
|
109
|
-
|
|
110
|
-
|
|
164
|
+
# flush buffer.
|
|
165
|
+
_ = self.samna_output_buffer.get_events()
|
|
111
166
|
|
|
112
|
-
|
|
113
|
-
|
|
167
|
+
# Reset and enable timestamp
|
|
168
|
+
reset_timestamps(self.device)
|
|
169
|
+
enable_timestamps(self.device)
|
|
114
170
|
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
A list of layers on the device where you want each of the model's DynapcnnLayers to be placed.
|
|
119
|
-
The index of the core on chip to which the i-th layer in the model is mapped is the value of the i-th entry in the list.
|
|
120
|
-
Note: This list should be the same length as the number of dynapcnn layers in your model.
|
|
171
|
+
# send input.
|
|
172
|
+
self.samna_input_buffer.write(x)
|
|
173
|
+
received_evts = []
|
|
121
174
|
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
::
|
|
175
|
+
# record at least until the last event has been replayed.
|
|
176
|
+
min_duration = max(event.timestamp for event in x) * 1e-6
|
|
177
|
+
time.sleep(min_duration)
|
|
126
178
|
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
179
|
+
# keep recording if more events are being registered.
|
|
180
|
+
while True:
|
|
181
|
+
prev_length = len(received_evts)
|
|
182
|
+
time.sleep(0.1)
|
|
183
|
+
received_evts.extend(self.samna_output_buffer.get_events())
|
|
184
|
+
if prev_length == len(received_evts):
|
|
185
|
+
break
|
|
130
186
|
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
187
|
+
# disable timestamp
|
|
188
|
+
disable_timestamps(self.device)
|
|
189
|
+
|
|
190
|
+
return received_evts
|
|
191
|
+
|
|
192
|
+
def forward(
|
|
193
|
+
self, x, return_complete: bool = False
|
|
194
|
+
) -> Union[List["event"], Tensor, Dict[int, Dict[int, Tensor]]]:
|
|
195
|
+
"""Forwards data through the `DynapcnnNetwork` instance.
|
|
196
|
+
|
|
197
|
+
If the network has been deployed on a Dynapcnn/Speck device the forward
|
|
198
|
+
pass happens on the devices. Otherwise the device will be simulated by
|
|
199
|
+
passing the data through the `DynapcnnLayer` instances.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
x: Tensor that serves as input to network. Is passed to all layers
|
|
203
|
+
that are marked as entry points
|
|
204
|
+
return_complete: bool that indicates whether all layer outputs should
|
|
205
|
+
be return or only those with no further destinations (default)
|
|
206
|
+
|
|
207
|
+
Returns:
|
|
208
|
+
The returned object depends on whether the network has been deployed
|
|
209
|
+
on chip. If this is the case, a flat list of samna events is returned,
|
|
210
|
+
in the order in which the events have been collected.
|
|
211
|
+
If the data is passed through the `DynapcnnLayer` instances, the output
|
|
212
|
+
depends on `return_complete` and on the network configuration:
|
|
213
|
+
* If `return_complete` is `True`, all layer outputs will be returned in a
|
|
214
|
+
dict, with layer indices as keys, and nested dicts as values, which
|
|
215
|
+
hold destination indices as keys and output tensors as values.
|
|
216
|
+
* If `return_complete` is `False` and there is only a single destination
|
|
217
|
+
in the whole network that is marked as final (i.e. destination
|
|
218
|
+
index in dynapcnn layer handler is negative), it will return the
|
|
219
|
+
output as a single tensor.
|
|
220
|
+
* If `return_complete` is `False` and no destination in the network
|
|
221
|
+
is marked as final, a warning will be raised and the function
|
|
222
|
+
returns an empty dict.
|
|
223
|
+
* In all other cases a dict will be returned that is of the same
|
|
224
|
+
structure as if `return_complete` is `True`, but only with entries
|
|
225
|
+
where the destination is marked as final.
|
|
226
|
+
"""
|
|
227
|
+
if self.is_deployed_on_dynapcnn_device:
|
|
228
|
+
return self.hw_forward(x)
|
|
229
|
+
else:
|
|
230
|
+
# Forward pass through software DynapcnnLayer instance
|
|
231
|
+
return self.dynapcnn_module(x, return_complete=return_complete)
|
|
232
|
+
|
|
233
|
+
def parameters(self) -> list:
|
|
234
|
+
"""Gathers all the parameters of the network in a list. This is done by accessing the convolutional layer in each `DynapcnnLayer`,
|
|
235
|
+
calling its `.parameters` method and saving it to a list.
|
|
236
|
+
|
|
237
|
+
Note: the method assumes no biases are used.
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
List of parameters of all convolutional layers in the `DynapcnnNetwok`.
|
|
241
|
+
"""
|
|
242
|
+
parameters = []
|
|
243
|
+
|
|
244
|
+
for layer in self.dynapcnn_layers.values():
|
|
245
|
+
if isinstance(layer, DynapcnnLayer):
|
|
246
|
+
parameters.extend(layer.conv_layer.parameters())
|
|
247
|
+
|
|
248
|
+
return parameters
|
|
249
|
+
|
|
250
|
+
def memory_summary(self) -> Dict[str, Dict[int, int]]:
|
|
251
|
+
"""Get a summary of the network's memory requirements.
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
A dictionary with keys kernel, neuron, bias. The values are a dicts.
|
|
255
|
+
Each nested dict has as keys the indices of all dynapcnn_layers and
|
|
256
|
+
as values the corresonding memory values for each layer.
|
|
257
|
+
"""
|
|
258
|
+
# For each entry (kernel, neuron, bias) provide one nested dict with
|
|
259
|
+
# one entry for each layer
|
|
260
|
+
summary = {key: dict() for key in ("kernel", "neuron", "bias")}
|
|
261
|
+
|
|
262
|
+
for layer_index, layer in self.dynapcnn_layers.items():
|
|
263
|
+
for key, val in layer.memory_summary().items():
|
|
264
|
+
summary[key][layer_index] = val
|
|
265
|
+
|
|
266
|
+
return summary
|
|
134
267
|
|
|
135
|
-
|
|
268
|
+
def init_weights(self, init_fn: nn.init = nn.init.xavier_normal_) -> None:
|
|
269
|
+
"""Call the weight initialization method `init_fn` on each `DynapcnnLayer.conv_layer.weight.data` in the `DynapcnnNetwork` instance.
|
|
270
|
+
|
|
271
|
+
Args:
|
|
272
|
+
init_fn (torch.nn.init): the weight initialization method to be used.
|
|
273
|
+
"""
|
|
274
|
+
for layer in self.dynapcnn_layers.values():
|
|
275
|
+
if isinstance(layer, DynapcnnLayer):
|
|
276
|
+
init_fn(layer.conv_layer.weight.data)
|
|
277
|
+
|
|
278
|
+
def detach_neuron_states(self) -> None:
|
|
279
|
+
"""Detach the neuron states and activations from current computation graph (necessary)."""
|
|
280
|
+
|
|
281
|
+
for module in self.dynapcnn_layers.values():
|
|
282
|
+
if isinstance(module, DynapcnnLayer):
|
|
283
|
+
if isinstance(module.spk_layer, sl.StatefulLayer):
|
|
284
|
+
for name, buffer in module.spk_layer.named_buffers():
|
|
285
|
+
buffer.detach_()
|
|
286
|
+
|
|
287
|
+
def to(
|
|
288
|
+
self,
|
|
289
|
+
device: str = "cpu",
|
|
290
|
+
monitor_layers: Optional[Union[List, str]] = None,
|
|
291
|
+
config_modifier: Optional[Callable] = None,
|
|
292
|
+
slow_clk_frequency: Optional[int] = None,
|
|
293
|
+
layer2core_map: Union[Dict[int, int], str] = "auto",
|
|
294
|
+
chip_layers_ordering: Optional[Union[Sequence[int], str]] = None,
|
|
295
|
+
):
|
|
296
|
+
"""Deploy model to cpu, gpu or a SynSense device.
|
|
297
|
+
|
|
298
|
+
Note that the model parameters are only ever transferred to the device on the `to` call,
|
|
299
|
+
so changing a threshold or weight of a model that is deployed will have no effect on the
|
|
300
|
+
model on chip until `to` is called again.
|
|
301
|
+
|
|
302
|
+
Args:
|
|
303
|
+
device (str): cpu:0, cuda:0, speck2edevkit
|
|
304
|
+
monitor_layers: None/List. A list of all layers in the module that
|
|
305
|
+
you want to monitor. Indexing starts with the first non-dvs
|
|
306
|
+
layer. If you want to monitor the dvs layer for eg.
|
|
307
|
+
::
|
|
308
|
+
|
|
309
|
+
monitor_layers = ["dvs"] # If you want to monitor the output of the pre-processing layer
|
|
310
|
+
monitor_layers = ["dvs", 8] # If you want to monitor preprocessing and layer 8
|
|
311
|
+
monitor_layers = "all" # If you want to monitor all the layers
|
|
312
|
+
monitor_layers = [-1] # If you want to only monitor exit points of the network (i.e. final layers)
|
|
313
|
+
config_modifier: A user configuration modifier method. This function
|
|
314
|
+
can be used to make any custom changes you want to make to the configuration object.
|
|
315
|
+
layer2core_map (dict or "auto"): Defines how cores on chip are
|
|
316
|
+
assigned to DynapcnnLayers. If `auto`, an automated procedure
|
|
317
|
+
will be used to find a valid ordering. Otherwise a dict needs
|
|
318
|
+
to be passed, with DynapcnnLayer indices as keys and assigned
|
|
319
|
+
core IDs as values. DynapcnnLayer indices have to match those of
|
|
320
|
+
`self.dynapcnn_layers`.
|
|
321
|
+
chip_layers_ordering: sequence of integers or `auto`. The order in
|
|
322
|
+
which the dynapcnn layers will be used. If `auto`,an automated
|
|
323
|
+
procedure will be used to find a valid ordering. A list of
|
|
324
|
+
layers on the device where you want each of the model's
|
|
325
|
+
DynapcnnLayers to be placed.
|
|
326
|
+
The index of the core on chip to which the i-th layer in the
|
|
327
|
+
model is mapped is the value of the i-th entry in the list.
|
|
328
|
+
Note: This list should be the same length as the number of
|
|
329
|
+
dynapcnn layers in your model.
|
|
330
|
+
Note: This parameter is obsolete and should not be passed
|
|
331
|
+
anymore. Use `layer2core_map` instead.
|
|
332
|
+
|
|
333
|
+
Note
|
|
136
334
|
----
|
|
137
335
|
chip_layers_ordering and monitor_layers are used only when using synsense devices.
|
|
138
336
|
For GPU or CPU usage these options are ignored.
|
|
139
337
|
"""
|
|
140
338
|
self.device = device
|
|
339
|
+
|
|
141
340
|
if isinstance(device, torch.device):
|
|
142
|
-
|
|
341
|
+
self._to_device(device)
|
|
342
|
+
|
|
143
343
|
elif isinstance(device, str):
|
|
144
344
|
device_name, _ = parse_device_id(device)
|
|
145
|
-
|
|
146
|
-
|
|
345
|
+
|
|
346
|
+
if device_name in ChipFactory.supported_devices:
|
|
347
|
+
# generate config.
|
|
147
348
|
config = self.make_config(
|
|
349
|
+
layer2core_map=layer2core_map,
|
|
148
350
|
chip_layers_ordering=chip_layers_ordering,
|
|
149
351
|
device=device,
|
|
150
352
|
monitor_layers=monitor_layers,
|
|
151
353
|
config_modifier=config_modifier,
|
|
152
354
|
)
|
|
153
355
|
|
|
154
|
-
#
|
|
356
|
+
# apply configuration to device
|
|
155
357
|
self.samna_device = open_device(device)
|
|
156
358
|
self.samna_device.get_model().apply_configuration(config)
|
|
157
359
|
time.sleep(1)
|
|
158
360
|
|
|
159
|
-
#
|
|
361
|
+
# set external slow-clock if needed
|
|
160
362
|
if slow_clk_frequency is not None:
|
|
161
363
|
dk_io = self.samna_device.get_io_module()
|
|
162
364
|
dk_io.set_slow_clk(True)
|
|
163
365
|
dk_io.set_slow_clk_rate(slow_clk_frequency) # Hz
|
|
164
366
|
|
|
165
367
|
builder = ChipFactory(device).get_config_builder()
|
|
166
|
-
|
|
368
|
+
|
|
369
|
+
# create input source node
|
|
167
370
|
self.samna_input_buffer = builder.get_input_buffer()
|
|
168
|
-
|
|
371
|
+
|
|
372
|
+
# create output sink node node.
|
|
169
373
|
self.samna_output_buffer = builder.get_output_buffer()
|
|
170
374
|
|
|
171
|
-
#
|
|
375
|
+
# connect source node to device sink.
|
|
172
376
|
self.device_input_graph = samna.graph.EventFilterGraph()
|
|
173
377
|
self.device_input_graph.sequential(
|
|
174
378
|
[
|
|
@@ -177,7 +381,7 @@ class DynapcnnNetwork(nn.Module):
|
|
|
177
381
|
]
|
|
178
382
|
)
|
|
179
383
|
|
|
180
|
-
#
|
|
384
|
+
# connect sink node to device.
|
|
181
385
|
self.device_output_graph = samna.graph.EventFilterGraph()
|
|
182
386
|
self.device_output_graph.sequential(
|
|
183
387
|
[
|
|
@@ -185,108 +389,18 @@ class DynapcnnNetwork(nn.Module):
|
|
|
185
389
|
self.samna_output_buffer,
|
|
186
390
|
]
|
|
187
391
|
)
|
|
392
|
+
|
|
188
393
|
self.device_input_graph.start()
|
|
189
394
|
self.device_output_graph.start()
|
|
190
395
|
self.samna_config = config
|
|
191
|
-
return self
|
|
192
|
-
else:
|
|
193
|
-
return super().to(device)
|
|
194
|
-
else:
|
|
195
|
-
raise Exception("Unknown device description.")
|
|
196
|
-
|
|
197
|
-
def _make_config(
|
|
198
|
-
self,
|
|
199
|
-
chip_layers_ordering: Union[Sequence[int], str] = "auto",
|
|
200
|
-
device="speck2edevkit:0",
|
|
201
|
-
monitor_layers: Optional[Union[List, str]] = None,
|
|
202
|
-
config_modifier=None,
|
|
203
|
-
) -> Tuple["SamnaConfiguration", bool]:
|
|
204
|
-
"""Prepare and output the `samna` configuration for this network.
|
|
205
|
-
|
|
206
|
-
Parameters
|
|
207
|
-
----------
|
|
208
|
-
|
|
209
|
-
chip_layers_ordering: sequence of integers or `auto`
|
|
210
|
-
The order in which the dynapcnn layers will be used. If `auto`,
|
|
211
|
-
an automated procedure will be used to find a valid ordering.
|
|
212
|
-
A list of layers on the device where you want each of the model's DynapcnnLayers to be placed.
|
|
213
|
-
The index of the core on chip to which the i-th layer in the model is mapped is the value of the i-th entry in the list.
|
|
214
|
-
Note: This list should be the same length as the number of dynapcnn layers in your model.
|
|
215
|
-
|
|
216
|
-
device: String
|
|
217
|
-
speck2edevkit or speck2fdevkit
|
|
218
|
-
|
|
219
|
-
monitor_layers: None/List/Str
|
|
220
|
-
A list of all layers in the module that you want to monitor. Indexing starts with the first non-dvs layer.
|
|
221
|
-
If you want to monitor the dvs layer for eg.
|
|
222
|
-
::
|
|
223
396
|
|
|
224
|
-
|
|
225
|
-
monitor_layers = ["dvs", 8] # If you want to monitor preprocessing and layer 8
|
|
226
|
-
monitor_layers = "all" # If you want to monitor all the layers
|
|
227
|
-
|
|
228
|
-
If this value is left as None, by default the last layer of the model is monitored.
|
|
229
|
-
|
|
230
|
-
config_modifier:
|
|
231
|
-
A user configuration modifier method.
|
|
232
|
-
This function can be used to make any custom changes you want to make to the configuration object.
|
|
233
|
-
|
|
234
|
-
Returns
|
|
235
|
-
-------
|
|
236
|
-
Configuration object
|
|
237
|
-
Object defining the configuration for the device
|
|
238
|
-
Bool
|
|
239
|
-
True if the configuration is valid for the given device.
|
|
240
|
-
|
|
241
|
-
Raises
|
|
242
|
-
------
|
|
243
|
-
ImportError
|
|
244
|
-
If samna is not available.
|
|
245
|
-
"""
|
|
246
|
-
config_builder = ChipFactory(device).get_config_builder()
|
|
397
|
+
return self
|
|
247
398
|
|
|
248
|
-
|
|
399
|
+
else:
|
|
400
|
+
self._to_device(device)
|
|
249
401
|
|
|
250
|
-
# Figure out layer ordering
|
|
251
|
-
if chip_layers_ordering == "auto":
|
|
252
|
-
chip_layers_ordering = config_builder.get_valid_mapping(self)
|
|
253
402
|
else:
|
|
254
|
-
|
|
255
|
-
if has_dvs_layer:
|
|
256
|
-
chip_layers_ordering = chip_layers_ordering[: len(self.sequence) - 1]
|
|
257
|
-
chip_layers_ordering = chip_layers_ordering[: len(self.sequence)]
|
|
258
|
-
|
|
259
|
-
# Save the chip layers
|
|
260
|
-
self.chip_layers_ordering = chip_layers_ordering
|
|
261
|
-
# Update config
|
|
262
|
-
config = config_builder.build_config(self, chip_layers_ordering)
|
|
263
|
-
if self.input_shape and self.input_shape[0] == 1:
|
|
264
|
-
config.dvs_layer.merge = True
|
|
265
|
-
# Check if any monitoring is enabled and if not, enable monitoring for the last layer
|
|
266
|
-
if monitor_layers is None:
|
|
267
|
-
monitor_layers = [-1]
|
|
268
|
-
elif monitor_layers == "all":
|
|
269
|
-
num_cnn_layers = len(self.sequence) - int(has_dvs_layer)
|
|
270
|
-
monitor_layers = list(range(num_cnn_layers))
|
|
271
|
-
|
|
272
|
-
# Enable monitors on the specified layers
|
|
273
|
-
# Find layers corresponding to the chip
|
|
274
|
-
monitor_chip_layers = [
|
|
275
|
-
self.find_chip_layer(lyr) for lyr in monitor_layers if lyr != "dvs"
|
|
276
|
-
]
|
|
277
|
-
if "dvs" in monitor_layers:
|
|
278
|
-
monitor_chip_layers.append("dvs")
|
|
279
|
-
config_builder.monitor_layers(config, monitor_chip_layers)
|
|
280
|
-
|
|
281
|
-
# Fix default factory setting to not return input events (UGLY!! Ideally this should happen in samna)
|
|
282
|
-
# config.factory_settings.monitor_input_enable = False
|
|
283
|
-
|
|
284
|
-
# Apply user config modifier
|
|
285
|
-
if config_modifier is not None:
|
|
286
|
-
config = config_modifier(config)
|
|
287
|
-
|
|
288
|
-
# Validate config
|
|
289
|
-
return config, config_builder.validate_configuration(config)
|
|
403
|
+
raise Exception("Unknown device description.")
|
|
290
404
|
|
|
291
405
|
def make_config(
|
|
292
406
|
self,
|
|
@@ -297,44 +411,34 @@ class DynapcnnNetwork(nn.Module):
|
|
|
297
411
|
):
|
|
298
412
|
"""Prepare and output the `samna` DYNAPCNN configuration for this network.
|
|
299
413
|
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
If this value is left as None, by default the last layer of the model is monitored.
|
|
322
|
-
|
|
323
|
-
config_modifier:
|
|
324
|
-
A user configuration modifier method.
|
|
325
|
-
This function can be used to make any custom changes you want to make to the configuration object.
|
|
414
|
+
Args:
|
|
415
|
+
chip_layers_ordering: sequence of integers or `auto`. The order in
|
|
416
|
+
which the dynapcnn layers will be used. If `auto`, an automated
|
|
417
|
+
procedure will be used to find a valid ordering. A list of
|
|
418
|
+
layers on the device where you want each of the model's
|
|
419
|
+
DynapcnnLayers to be placed. Note: This list should be the same
|
|
420
|
+
length as the number of dynapcnn layers in your model.
|
|
421
|
+
device (str): speck2edevkit or speck2fdevkit
|
|
422
|
+
monitor_layers: A list of all layers in the module that you want to
|
|
423
|
+
monitor. Indexing starts with the first non-dvs layer. If you
|
|
424
|
+
want to monitor the dvs layer for eg.
|
|
425
|
+
::
|
|
426
|
+
|
|
427
|
+
monitor_layers = ["dvs"] # If you want to monitor the output of the pre-processing layer
|
|
428
|
+
monitor_layers = ["dvs", 8] # If you want to monitor preprocessing and layer 8
|
|
429
|
+
monitor_layers = "all" # If you want to monitor all the layers
|
|
430
|
+
|
|
431
|
+
If this value is left as None, by default the last layer of the model is monitored.
|
|
432
|
+
config_modifier: A user configuration modifier method. This
|
|
433
|
+
function can be used to make any custom changes you want to
|
|
434
|
+
make to the configuration object.
|
|
326
435
|
|
|
327
|
-
Returns
|
|
328
|
-
|
|
329
|
-
Configuration object
|
|
330
|
-
Object defining the configuration for the device
|
|
436
|
+
Returns:
|
|
437
|
+
Object defining the configuration for the device.
|
|
331
438
|
|
|
332
|
-
Raises
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
If samna is not available.
|
|
336
|
-
ValueError
|
|
337
|
-
If the generated configuration is not valid for the specified device.
|
|
439
|
+
Raises:
|
|
440
|
+
ImportError: If samna is not available.
|
|
441
|
+
ValueError: If the generated configuration is not valid for the specified device.
|
|
338
442
|
"""
|
|
339
443
|
config, is_compatible = self._make_config(
|
|
340
444
|
chip_layers_ordering=chip_layers_ordering,
|
|
@@ -367,10 +471,104 @@ class DynapcnnNetwork(nn.Module):
|
|
|
367
471
|
raise e
|
|
368
472
|
return is_compatible
|
|
369
473
|
|
|
474
|
+
def make_config(
|
|
475
|
+
self,
|
|
476
|
+
layer2core_map: Union[Dict[int, int], str] = "auto",
|
|
477
|
+
device: str = "speck2fdevkit:0",
|
|
478
|
+
monitor_layers: Optional[Union[List, str]] = None,
|
|
479
|
+
config_modifier: Optional[Callable] = None,
|
|
480
|
+
chip_layers_ordering: Optional[Union[Sequence[int], str]] = None,
|
|
481
|
+
):
|
|
482
|
+
"""Prepare and output the `samna` DYNAPCNN configuration for this network.
|
|
483
|
+
|
|
484
|
+
Args:
|
|
485
|
+
layer2core_map (dict or "auto"): Defines how cores on chip are
|
|
486
|
+
assigned to DynapcnnLayers. If `auto`, an automated procedure
|
|
487
|
+
will be used to find a valid ordering. Otherwise a dict needs
|
|
488
|
+
to be passed, with DynapcnnLayer indices as keys and assigned
|
|
489
|
+
core IDs as values. DynapcnnLayer indices have to match those of
|
|
490
|
+
`self.dynapcnn_layers`.
|
|
491
|
+
device: (string): speck2devkit
|
|
492
|
+
monitor_layers: A list of all layers in the module that you want to
|
|
493
|
+
monitor. Indexing starts with the first non-dvs layer. If you
|
|
494
|
+
want to monitor the dvs layer for eg.
|
|
495
|
+
::
|
|
496
|
+
|
|
497
|
+
monitor_layers = ["dvs"] # If you want to monitor the output of the pre-processing layer
|
|
498
|
+
monitor_layers = ["dvs", 8] # If you want to monitor preprocessing and layer 8
|
|
499
|
+
monitor_layers = "all" # If you want to monitor all the layers
|
|
500
|
+
monitor_layers = [-1] # If you want to only monitor exit points of the network (i.e. final layers)
|
|
501
|
+
|
|
502
|
+
If this value is left as None, by default the last layer of the
|
|
503
|
+
model is monitored.
|
|
504
|
+
config_modifier (Callable or None): A user configuration modifier
|
|
505
|
+
method. This function can be used to make any custom changes
|
|
506
|
+
you want to make to the configuration object.
|
|
507
|
+
chip_layers_ordering (None, sequence of integers or "auto", obsolete):
|
|
508
|
+
The order in which the dynapcnn layers will be used. If `auto`,
|
|
509
|
+
an automated procedure will be used to find a valid ordering.
|
|
510
|
+
A list of layers on the device where you want each of the model's
|
|
511
|
+
DynapcnnLayers to be placed. Note: This list should be the same
|
|
512
|
+
length as the number of dynapcnn layers in your model. Note:
|
|
513
|
+
This parameter is obsolete and should not be passed anymore.
|
|
514
|
+
Use `layer2core_map` instead.
|
|
515
|
+
|
|
516
|
+
Returns:
|
|
517
|
+
Object defining the configuration for the device
|
|
518
|
+
|
|
519
|
+
Raises:
|
|
520
|
+
ImportError: If samna is not available.
|
|
521
|
+
ValueError: If the generated configuration is not valid for the specified device.
|
|
522
|
+
"""
|
|
523
|
+
config, is_compatible = self._make_config(
|
|
524
|
+
layer2core_map=layer2core_map,
|
|
525
|
+
device=device,
|
|
526
|
+
monitor_layers=monitor_layers,
|
|
527
|
+
config_modifier=config_modifier,
|
|
528
|
+
chip_layers_ordering=chip_layers_ordering,
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
# Validate config
|
|
532
|
+
if is_compatible:
|
|
533
|
+
print("Network is valid")
|
|
534
|
+
return config
|
|
535
|
+
else:
|
|
536
|
+
raise ValueError(
|
|
537
|
+
f"Generated config is not valid for {device}. "
|
|
538
|
+
"Probably one or more layers are too large. Try "
|
|
539
|
+
"Reducing the number of neurons or the kernel sizes."
|
|
540
|
+
)
|
|
541
|
+
|
|
542
|
+
def has_dvs_layer(self) -> bool:
|
|
543
|
+
"""Return True if there is a DVSLayer in the network
|
|
544
|
+
|
|
545
|
+
Returns:
|
|
546
|
+
True if DVSLayer is found within the network.
|
|
547
|
+
"""
|
|
548
|
+
return self.dvs_layer is not None
|
|
549
|
+
|
|
550
|
+
def zero_grad(self, set_to_none: bool = False) -> None:
|
|
551
|
+
"""Call `zero_grad` method of each DynapCNN layer
|
|
552
|
+
|
|
553
|
+
Args:
|
|
554
|
+
set_to_none (bool): This argument is passed directly to the
|
|
555
|
+
`zero_grad` method of each DynapCNN layer
|
|
556
|
+
"""
|
|
557
|
+
for lyr in self.dynapcnn_layers.values():
|
|
558
|
+
lyr.zero_grad(set_to_none)
|
|
559
|
+
|
|
370
560
|
def reset_states(self, randomize=False):
|
|
371
|
-
"""Reset the states of the network.
|
|
561
|
+
"""Reset the states of the network.
|
|
562
|
+
Note that setting `randomize` to `True` is only supported for models
|
|
563
|
+
that have not yet been deployed on a SynSense device.
|
|
564
|
+
|
|
565
|
+
Args:
|
|
566
|
+
randomize (bool): If `False` (default), will set all states to 0.
|
|
567
|
+
Otherwise will set to random values.
|
|
568
|
+
"""
|
|
372
569
|
if hasattr(self, "device") and isinstance(self.device, str): # pragma: no cover
|
|
373
570
|
device_name, _ = parse_device_id(self.device)
|
|
571
|
+
# Reset states on SynSense device
|
|
374
572
|
if device_name in ChipFactory.supported_devices:
|
|
375
573
|
config_builder = ChipFactory(self.device).get_config_builder()
|
|
376
574
|
# Set all the vmem states in the samna config to zero
|
|
@@ -390,118 +588,197 @@ class DynapcnnNetwork(nn.Module):
|
|
|
390
588
|
time.sleep(0.1)
|
|
391
589
|
self.samna_input_graph.start()
|
|
392
590
|
return
|
|
591
|
+
|
|
592
|
+
# Reset states of `DynapcnnLayer` instances
|
|
393
593
|
for layer in self.sequence:
|
|
394
594
|
if isinstance(layer, DynapcnnLayer):
|
|
395
595
|
layer.spk_layer.reset_states(randomize=randomize)
|
|
396
596
|
|
|
397
|
-
def
|
|
398
|
-
|
|
399
|
-
|
|
597
|
+
def _make_config(
|
|
598
|
+
self,
|
|
599
|
+
layer2core_map: Union[Dict[int, int], str] = "auto",
|
|
600
|
+
device: str = "speck2fdevkit:0",
|
|
601
|
+
monitor_layers: Optional[Union[List, str]] = None,
|
|
602
|
+
config_modifier: Optional[Callable] = None,
|
|
603
|
+
chip_layers_ordering: Optional[Union[Sequence[int], str]] = None,
|
|
604
|
+
) -> Tuple["SamnaConfiguration", bool]:
|
|
605
|
+
"""Prepare and output the `samna` DYNAPCNN configuration for this network.
|
|
400
606
|
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
607
|
+
Args:
|
|
608
|
+
layer2core_map (dict or "auto"): Defines how cores on chip are
|
|
609
|
+
assigned to DynapcnnLayers. If `auto`, an automated procedure
|
|
610
|
+
will be used to find a valid ordering. Otherwise a dict needs
|
|
611
|
+
to be passed, with DynapcnnLayer indices as keys and assigned
|
|
612
|
+
core IDs as values. DynapcnnLayer indices have to match those
|
|
613
|
+
of `self.dynapcnn_layers`.
|
|
614
|
+
device: (string): dynapcnndevkit, speck2b or speck2devkit
|
|
615
|
+
monitor_layers: A list of all layers in the module that you want
|
|
616
|
+
to monitor. Indexing starts with the first non-dvs layer.
|
|
617
|
+
If you want to monitor the dvs layer for eg.
|
|
618
|
+
::
|
|
619
|
+
|
|
620
|
+
monitor_layers = ["dvs"] # If you want to monitor the output of the pre-processing layer
|
|
621
|
+
monitor_layers = ["dvs", 8] # If you want to monitor preprocessing and layer 8
|
|
622
|
+
monitor_layers = "all" # If you want to monitor all the layers
|
|
623
|
+
monitor_layers = [-1] # If you want to only monitor exit points of the network (i.e. final layers)
|
|
624
|
+
|
|
625
|
+
If this value is left as None, by default the last layer of the model is monitored.
|
|
626
|
+
|
|
627
|
+
config_modifier (Callable or None): A user configuration modifier
|
|
628
|
+
method. This function can be used to make any custom changes
|
|
629
|
+
you want to make to the configuration object.
|
|
630
|
+
chip_layers_ordering (None, sequence of integers or "auto", obsolete):
|
|
631
|
+
The order in which the dynapcnn layers will be used. If `auto`,
|
|
632
|
+
an automated procedure will be used to find a valid ordering.
|
|
633
|
+
A list of layers on the device where you want each of the
|
|
634
|
+
model's DynapcnnLayers to be placed. Note: This list should be
|
|
635
|
+
the same length as the number of dynapcnn layers in your model.
|
|
636
|
+
Note: This parameter is obsolete and should not be passed
|
|
637
|
+
anymore. Use `layer2core_map` instead.
|
|
404
638
|
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
Index of a layer
|
|
639
|
+
Returns:
|
|
640
|
+
An object defining the configuration for the device and a boolean
|
|
641
|
+
that determines if the configuration is valid for the given device.
|
|
409
642
|
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
643
|
+
Raises:
|
|
644
|
+
ImportError: If samna is not available.
|
|
645
|
+
ValueError: If no valid mapping between the layers of this object
|
|
646
|
+
and the cores ofthe provided device can be found.
|
|
414
647
|
"""
|
|
415
|
-
|
|
416
|
-
num_cores_required = len(self.sequence)
|
|
417
|
-
if isinstance(self.sequence[0], DVSLayer):
|
|
418
|
-
num_cores_required -= 1
|
|
419
|
-
if len(self.chip_layers_ordering) != num_cores_required:
|
|
420
|
-
raise Exception(
|
|
421
|
-
f"Number of layers specified in chip_layers_ordering {self.chip_layers_ordering} does not correspond to the number of cores required for this model {num_cores_required}"
|
|
422
|
-
)
|
|
423
|
-
|
|
424
|
-
return self.chip_layers_ordering[layer_idx]
|
|
648
|
+
config_builder = ChipFactory(device).get_config_builder()
|
|
425
649
|
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
650
|
+
if chip_layers_ordering is not None:
|
|
651
|
+
if layer2core_map != "auto":
|
|
652
|
+
warn(
|
|
653
|
+
"Both `chip_layers_ordering` and `layer2core_map are provided. "
|
|
654
|
+
"The parameter `chip_layers_ordering` is deprecated and will "
|
|
655
|
+
"be ignored.",
|
|
656
|
+
DeprecationWarning,
|
|
657
|
+
)
|
|
658
|
+
elif chip_layers_ordering == "auto":
|
|
659
|
+
warn(
|
|
660
|
+
"The parameter `chip_layers_ordering` is deprecated. Passing "
|
|
661
|
+
"'auto' is still accepted, but in the future please use "
|
|
662
|
+
"`layer2core_map` instead.",
|
|
663
|
+
DeprecationWarning,
|
|
664
|
+
)
|
|
665
|
+
else:
|
|
666
|
+
layer2core_map = {
|
|
667
|
+
idx: core
|
|
668
|
+
for idx, core in zip(self.dynapcnn_layers, chip_layers_ordering)
|
|
669
|
+
}
|
|
670
|
+
warn(
|
|
671
|
+
"The parameter `chip_layers_ordering` is deprecated. "
|
|
672
|
+
"Because `layer2core_map` is 'auto', and `chip_layers_ordering` "
|
|
673
|
+
"is not, will convert `chip_layers_ordering` to a "
|
|
674
|
+
"dict matching `layer2core_map`. In the future please use "
|
|
675
|
+
"`layer2core_map` instead. Please make sure the inferred "
|
|
676
|
+
"mapping from DynapcnnLayer index to core index is correct: "
|
|
677
|
+
+ pformat(layer2core_map),
|
|
678
|
+
DeprecationWarning,
|
|
679
|
+
)
|
|
680
|
+
if layer2core_map == "auto":
|
|
681
|
+
# Assign chip core ID for each DynapcnnLayer.
|
|
682
|
+
layer2core_map = config_builder.map_layers_to_cores(self.dynapcnn_layers)
|
|
452
683
|
else:
|
|
453
|
-
|
|
454
|
-
|
|
684
|
+
if not layer2core_map.keys() == self.dynapcnn_layers.keys():
|
|
685
|
+
raise ValueError(
|
|
686
|
+
"The keys provided in `layer2core_map` must exactly match "
|
|
687
|
+
"the keys in `self.dynapcnn_layers`"
|
|
688
|
+
)
|
|
455
689
|
|
|
456
|
-
|
|
457
|
-
"""Get a summary of the network's memory requirements.
|
|
690
|
+
self._layer2core_map = layer2core_map
|
|
458
691
|
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
summary = {}
|
|
466
|
-
|
|
467
|
-
dynapcnn_layers = [
|
|
468
|
-
lyr for lyr in self.sequence if isinstance(lyr, DynapcnnLayer)
|
|
469
|
-
]
|
|
470
|
-
summary.update({k: list() for k in dynapcnn_layers[0].memory_summary().keys()})
|
|
471
|
-
for lyr in dynapcnn_layers:
|
|
472
|
-
lyr_summary = lyr.memory_summary()
|
|
473
|
-
for k, v in lyr_summary.items():
|
|
474
|
-
summary[k].append(v)
|
|
475
|
-
return summary
|
|
692
|
+
# update config (config. DynapcnnLayer instances into their assigned core).
|
|
693
|
+
config = config_builder.build_config(
|
|
694
|
+
layers=self.all_layers,
|
|
695
|
+
layer2core_map=layer2core_map,
|
|
696
|
+
destination_map=self.layer_destination_map,
|
|
697
|
+
)
|
|
476
698
|
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
699
|
+
if monitor_layers is None:
|
|
700
|
+
# Monitor all layers with exit point destinations
|
|
701
|
+
monitor_layers = self._dynapcnn_module.get_exit_layers()
|
|
702
|
+
elif monitor_layers == "all":
|
|
703
|
+
monitor_layers = [
|
|
704
|
+
lyr_idx
|
|
705
|
+
for lyr_idx, layer in self.dynapcnn_layers.items()
|
|
706
|
+
if not isinstance(layer, DVSLayer)
|
|
707
|
+
]
|
|
708
|
+
elif -1 in monitor_layers:
|
|
709
|
+
# Replace `-1` with exit layer IDs
|
|
710
|
+
monitor_layers.remove(-1)
|
|
711
|
+
monitor_layers += self._dynapcnn_module.get_exit_layers()
|
|
712
|
+
|
|
713
|
+
# Collect cores (chip layers) that are to be monitored
|
|
714
|
+
monitor_chip_layers = []
|
|
715
|
+
for lyr_idx in monitor_layers:
|
|
716
|
+
if str(lyr_idx).lower() == "dvs":
|
|
717
|
+
monitor_chip_layers.append("dvs")
|
|
718
|
+
else:
|
|
719
|
+
monitor_chip_layers.append(layer2core_map[lyr_idx])
|
|
480
720
|
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
721
|
+
# enable monitors on the specified layers
|
|
722
|
+
config_builder.monitor_layers(config, monitor_chip_layers)
|
|
723
|
+
|
|
724
|
+
if config_modifier is not None:
|
|
725
|
+
# apply user config modifier.
|
|
726
|
+
config = config_modifier(config)
|
|
727
|
+
|
|
728
|
+
# Validate config
|
|
729
|
+
return config, config_builder.validate_configuration(config)
|
|
485
730
|
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
731
|
+
def _to_device(self, device: torch.device) -> None:
|
|
732
|
+
"""Access each sub-layer within all `DynapcnnLayer` instances and call `.to(device)` on them."""
|
|
733
|
+
for layer in self.dynapcnn_layers.values():
|
|
734
|
+
if isinstance(layer, sinabs.backend.dynapcnn.dynapcnn_layer.DynapcnnLayer):
|
|
735
|
+
layer.to(device)
|
|
736
|
+
|
|
737
|
+
for _, data in self._merge_points.items():
|
|
738
|
+
data["merge"].to(device)
|
|
739
|
+
|
|
740
|
+
def __str__(self):
|
|
741
|
+
pretty_print = ""
|
|
742
|
+
if self.dvs_layer is not None:
|
|
743
|
+
pretty_print += (
|
|
744
|
+
"-------------------------- [ DVSLayer ] --------------------------\n"
|
|
745
|
+
)
|
|
746
|
+
pretty_print += f"{self.dvs_layer}\n\n"
|
|
747
|
+
for idx, layer_data in self.dynapcnn_layers.items():
|
|
748
|
+
pretty_print += f"----------------------- [ DynapcnnLayer {idx} ] -----------------------\n"
|
|
749
|
+
if self.is_deployed_on_dynapcnn_device:
|
|
750
|
+
pretty_print += f"Core {self.layer2core_map[idx]}\n"
|
|
751
|
+
pretty_print += f"{layer_data}\n\n"
|
|
752
|
+
|
|
753
|
+
return pretty_print
|
|
754
|
+
|
|
755
|
+
def __repr__(self):
|
|
756
|
+
if self.is_deployed_on_dynapcnn_device:
|
|
757
|
+
layer_info = "\n\n".join(
|
|
758
|
+
f"{idx} - core: {self.layer2core_map[idx]}\n{pformat(layer)}"
|
|
759
|
+
for idx, layer in self.dynapcnn_layers.items()
|
|
760
|
+
)
|
|
761
|
+
device_info = f" deployed on {self.device},"
|
|
762
|
+
else:
|
|
763
|
+
layer_info = "\n\n".join(
|
|
764
|
+
f"Index: {idx}\n{pformat(layer)}"
|
|
765
|
+
for idx, layer in self.dynapcnn_layers.items()
|
|
766
|
+
)
|
|
767
|
+
device_info = f" on {self.device}," if hasattr(self, "device") else ""
|
|
768
|
+
return (
|
|
769
|
+
f"DynapCNN Network{device_info} containing:\nDVS Layer: {pformat(self.dvs_layer)}"
|
|
770
|
+
"\n\nDynapCNN Layers:\n\n" + layer_info
|
|
771
|
+
)
|
|
489
772
|
|
|
490
773
|
|
|
491
774
|
class DynapcnnCompatibleNetwork(DynapcnnNetwork):
|
|
492
775
|
"""Deprecated class, use DynapcnnNetwork instead."""
|
|
493
776
|
|
|
494
|
-
def __init__(
|
|
495
|
-
self,
|
|
496
|
-
snn: Union[nn.Sequential, sinabs.Network],
|
|
497
|
-
input_shape: Optional[Tuple[int, int, int]] = None,
|
|
498
|
-
dvs_input: bool = False,
|
|
499
|
-
discretize: bool = True,
|
|
500
|
-
):
|
|
777
|
+
def __init__(self, *args, **kwargs):
|
|
501
778
|
from warnings import warn
|
|
502
779
|
|
|
503
780
|
warn(
|
|
504
781
|
"DynapcnnCompatibleNetwork has been renamed to DynapcnnNetwork "
|
|
505
782
|
+ "and will be removed in a future release."
|
|
506
783
|
)
|
|
507
|
-
super().__init__(
|
|
784
|
+
super().__init__(*args, **kwargs)
|