sinabs 3.0.4.dev25__py3-none-any.whl → 3.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. sinabs/activation/reset_mechanism.py +3 -3
  2. sinabs/activation/surrogate_gradient_fn.py +4 -4
  3. sinabs/backend/dynapcnn/__init__.py +5 -4
  4. sinabs/backend/dynapcnn/chip_factory.py +33 -61
  5. sinabs/backend/dynapcnn/chips/dynapcnn.py +182 -86
  6. sinabs/backend/dynapcnn/chips/speck2e.py +6 -5
  7. sinabs/backend/dynapcnn/chips/speck2f.py +6 -5
  8. sinabs/backend/dynapcnn/config_builder.py +39 -59
  9. sinabs/backend/dynapcnn/connectivity_specs.py +48 -0
  10. sinabs/backend/dynapcnn/discretize.py +91 -155
  11. sinabs/backend/dynapcnn/dvs_layer.py +59 -101
  12. sinabs/backend/dynapcnn/dynapcnn_layer.py +185 -119
  13. sinabs/backend/dynapcnn/dynapcnn_layer_utils.py +335 -0
  14. sinabs/backend/dynapcnn/dynapcnn_network.py +602 -325
  15. sinabs/backend/dynapcnn/dynapcnnnetwork_module.py +370 -0
  16. sinabs/backend/dynapcnn/exceptions.py +122 -3
  17. sinabs/backend/dynapcnn/io.py +55 -92
  18. sinabs/backend/dynapcnn/mapping.py +111 -75
  19. sinabs/backend/dynapcnn/nir_graph_extractor.py +877 -0
  20. sinabs/backend/dynapcnn/sinabs_edges_handler.py +1024 -0
  21. sinabs/backend/dynapcnn/utils.py +214 -459
  22. sinabs/backend/dynapcnn/weight_rescaling_methods.py +53 -0
  23. sinabs/conversion.py +2 -2
  24. sinabs/from_torch.py +23 -1
  25. sinabs/hooks.py +38 -41
  26. sinabs/layers/alif.py +16 -16
  27. sinabs/layers/crop2d.py +2 -2
  28. sinabs/layers/exp_leak.py +1 -1
  29. sinabs/layers/iaf.py +11 -11
  30. sinabs/layers/lif.py +9 -9
  31. sinabs/layers/neuromorphic_relu.py +9 -8
  32. sinabs/layers/pool2d.py +5 -5
  33. sinabs/layers/quantize.py +1 -1
  34. sinabs/layers/stateful_layer.py +10 -7
  35. sinabs/layers/to_spike.py +9 -9
  36. sinabs/network.py +14 -12
  37. sinabs/nir.py +4 -3
  38. sinabs/synopcounter.py +10 -7
  39. sinabs/utils.py +155 -7
  40. sinabs/validate_memory_speck.py +0 -5
  41. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/METADATA +3 -2
  42. sinabs-3.1.1.dist-info/RECORD +65 -0
  43. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/licenses/AUTHORS +1 -0
  44. sinabs-3.1.1.dist-info/pbr.json +1 -0
  45. sinabs-3.0.4.dev25.dist-info/RECORD +0 -59
  46. sinabs-3.0.4.dev25.dist-info/pbr.json +0 -1
  47. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/WHEEL +0 -0
  48. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/licenses/LICENSE +0 -0
  49. {sinabs-3.0.4.dev25.dist-info → sinabs-3.1.1.dist-info}/top_level.txt +0 -0
@@ -34,11 +34,9 @@ def enable_timestamps(
34
34
  """
35
35
  Enable timestamps of the samna node.
36
36
 
37
- Args
38
- ----
39
- device_id: str
40
- Name of the device to initialize. Required for different existing APIs
41
- for Speck chips
37
+ Args:
38
+ device_id: Name of the device to initialize. Required for different
39
+ existing APIs for Speck chips
42
40
  """
43
41
  device_id = standardize_device_id(device_id=device_id)
44
42
  device_info = device_map[device_id]
@@ -52,12 +50,9 @@ def disable_timestamps(
52
50
  """
53
51
  Disable timestamps of the samna node.
54
52
 
55
- Args
56
- ----
57
-
58
- device_id: str
59
- Name of the device to initialize. Required for different existing APIs
60
- for Speck chips
53
+ Args:
54
+ device_id: Name of the device to initialize. Required for different
55
+ existing APIs for Speck chips
61
56
  """
62
57
  device_id = standardize_device_id(device_id=device_id)
63
58
  device_info = device_map[device_id]
@@ -71,11 +66,9 @@ def reset_timestamps(
71
66
  """
72
67
  Reset timestamps of the samna node.
73
68
 
74
- Args
75
- ----
76
- device_id: str
77
- Name of the device to initialize. Required for different existing APIs
78
- for Speck chips
69
+ Args:
70
+ device_id: Name of the device to initialize. Required for different
71
+ existing APIs for Speck chips
79
72
  """
80
73
  device_id = standardize_device_id(device_id=device_id)
81
74
  device_info = device_map[device_id]
@@ -88,19 +81,11 @@ def events_to_xytp(event_list: List, layer: int) -> np.array:
88
81
  Convert an eventList read from `samna` to a numpy structured array of `x`, `y`, `t`,
89
82
  `channel`.
90
83
 
91
- Parameters
92
- ----------
93
-
94
- event_list: List
95
- A list comprising of events from samna API
96
-
97
- layer: int
98
- The index of layer for which the data needs to be converted
99
-
100
- Returns
101
- -------
84
+ Args:
85
+ event_list: A list comprising of events from samna API.
86
+ layer: The index of layer for which the data needs to be converted.
102
87
 
103
- xytc: np.array
88
+ Returns:
104
89
  A numpy structured array with columns `x`, `y`, `t`, `channel`.
105
90
  """
106
91
  evs_filtered = list(
@@ -148,8 +133,11 @@ def get_device_map() -> Dict:
148
133
  # Group by device_type_name
149
134
  device_groups = groupby(devices, lambda x: x.device_type_name)
150
135
  # Switch keys from samna's device_type_name to device_type names
136
+ # -- guarantee is a supported device
151
137
  device_groups = {
152
- device_type_map[k]: sort_devices(list(v)) for k, v in device_groups
138
+ device_type_map[k]: sort_devices(list(v))
139
+ for k, v in device_groups
140
+ if k in device_type_map
153
141
  }
154
142
  # Flat map
155
143
  for dev_type, dev_list in device_groups.items():
@@ -161,16 +149,11 @@ def get_device_map() -> Dict:
161
149
  def is_device_type(dev_info: samna.device.DeviceInfo, dev_type: str) -> bool:
162
150
  """Check if a DeviceInfo object is of a given device type `dev_type`
163
151
 
164
- Args
165
- ----
166
-
167
- dev_info: samna.device.DeviceInfo
168
- Device info object
169
- dev_type: str
170
- Device type as a string
152
+ Args:
153
+ dev_info: samna.device.DeviceInfo. Device info object.
154
+ dev_type: Device type as a string.
171
155
 
172
156
  Returns:
173
- --------
174
157
  bool
175
158
  """
176
159
  return dev_info.device_type_name == device_types[dev_type]
@@ -179,17 +162,13 @@ def is_device_type(dev_info: samna.device.DeviceInfo, dev_type: str) -> bool:
179
162
  def discover_device(device_id: str):
180
163
  """Discover a samna device by device_name:device_id pair.
181
164
 
182
- Args
183
- ----
184
-
185
- device_id: str
186
- Device name/identifier (speck2fdevkit:0 or speck2edevkit:0 or ... )
187
- The convention is similar to that of pytorch GPU identifier ie cuda:0 , cuda:1 etc.
188
-
189
- Returns
190
- -------
165
+ Args:
166
+ device_id: Device name/identifier (speck2fdevkit:0 or speck2edevkit:0)
167
+ The convention is similar to that of pytorch GPU identifier i.e.,
168
+ cuda:0 , cuda:1 etc.
191
169
 
192
- device_info: samna.device.DeviceInfo
170
+ Returns:
171
+ samna.device.DeviceInfo
193
172
  """
194
173
  device_id = standardize_device_id(device_id=device_id)
195
174
  device_info = device_map[device_id]
@@ -199,21 +178,21 @@ def discover_device(device_id: str):
199
178
  def open_device(device_id: str):
200
179
  """Open device function.
201
180
 
202
- Args
203
- ----
204
-
205
- device_id: str
206
- device_name:device_id pair given as a string
207
-
208
- Returns
209
- -------
181
+ Args:
182
+ device_id: device_name:device_id pair given as a string
210
183
 
211
- device_handle: samna.device.*
184
+ Returns:
212
185
  Device handle received from samna.
213
186
  """
214
187
  device_id = standardize_device_id(device_id=device_id)
215
188
  device_map = get_device_map()
216
- device_info = device_map[device_id]
189
+ try:
190
+ device_info = device_map[device_id]
191
+ except KeyError:
192
+ msg = f"Device {device_id} has not been found. Make sure it is connected."
193
+ if device_map:
194
+ msg += "The following devices are available:\n" + "\n".join(device_map)
195
+ raise IOError(msg)
217
196
  device_handle = samna.device.open_device(device_info)
218
197
 
219
198
  if device_handle is not None:
@@ -225,12 +204,9 @@ def open_device(device_id: str):
225
204
  def close_device(device_id: str):
226
205
  """Close a device by device identifier.
227
206
 
228
- Args
229
- ----
230
-
231
- device_id: str
232
- device_name:device_id pair given as a string.
233
- speck2fdevkit:0 or speck2edevkit:0 or speck2fdevkit:1 or ...
207
+ Args:
208
+ device_id: device_name:device_id pair given as a string.
209
+ speck2fdevkit:0 or speck2edevkit:0 or speck2fdevkit:1 or ...
234
210
  """
235
211
  device_id = standardize_device_id(device_id=device_id)
236
212
  device_info = device_map[device_id]
@@ -277,22 +253,15 @@ def calculate_neuron_address(
277
253
  """Calculate the neuron address on the devkit. This function is designed for ReadNeuronValue
278
254
  event to help the user check the neuron value of the SNN on the devkit.
279
255
 
280
- Args
281
- ----
282
-
283
- x: int
284
- x coordinate of the neuron
285
- y: int
286
- y coordinate of the neuron
287
- c: int
288
- channel index of the neuron
289
- feature_map_size: Tuple[int, int, int]
290
- the size of the feature map [channel, height, width]
291
-
292
- Returns
293
- ----
256
+ Args:
257
+ x: x coordinate of the neuron
258
+ y: y coordinate of the neuron
259
+ c: channel index of the neuron
260
+ feature_map_size: Tuple[int, int, int] the size of the feature map
261
+ [channel, height, width]
294
262
 
295
- neuron_address: int
263
+ Returns:
264
+ neuron_address: int
296
265
  """
297
266
  # calculate how many bits it takes based on the feature map size
298
267
  channel, height, width = feature_map_size
@@ -317,22 +286,16 @@ def calculate_neuron_address(
317
286
  def neuron_address_to_cxy(
318
287
  address: int, feature_map_size: Tuple[int, int, int]
319
288
  ) -> Tuple:
320
- """Calculate the c, x, y, coordinate of a neuron when the address of the NeuronValue event is
321
- given.
322
-
323
- Args
324
- ----
289
+ """Calculate the c, x, y, coordinate of a neuron when the address of the
290
+ NeuronValue event is given.
325
291
 
326
- address: int
327
- the neuron address of the NeuronValue event
328
- feature_map_size: Tuple[int, int, int]
329
- the size of the feature map [channel, height, width]
330
-
331
- Returns
332
- ----
292
+ Args:
293
+ address (int): the neuron address of the NeuronValue event
294
+ feature_map_size: Tuple[int, int, int] the size of the feature map
295
+ [channel, height, width]
333
296
 
334
- neuron_cxy: Tuple[int, int, int]
335
- the [channel, x, y] of the neuron
297
+ Returns:
298
+ neuron_cxy: Tuple[int, int, int] the [channel, x, y] of the neuron
336
299
  """
337
300
  # calculate how many bits it takes based on the feature map size
338
301
  channel, height, width = feature_map_size
@@ -1,10 +1,13 @@
1
1
  from collections import deque
2
2
  from copy import deepcopy
3
3
  from dataclasses import dataclass
4
- from typing import List, Optional, Tuple
4
+ from typing import Dict, List, Optional, Tuple, Union
5
+
6
+ import sinabs
5
7
 
6
8
  from .dvs_layer import DVSLayer
7
9
  from .dynapcnn_layer import DynapcnnLayer
10
+ from .exceptions import InvalidModel
8
11
 
9
12
 
10
13
  @dataclass
@@ -27,16 +30,11 @@ def find_chip_layers(
27
30
  ) -> List[int]:
28
31
  """Find all layers where a given layer configuration fits.
29
32
 
30
- Parameters
31
- ----------
32
- layer:
33
- DynapcnnLayer
34
-
35
- constraints:
36
- A list of all the layer's constraints
33
+ Args:
34
+ layer: DynapCNNLayer.
35
+ constraints: A list of all the layer's constraints.
37
36
 
38
- Returns
39
- -------
37
+ Returns:
40
38
  A list of indices of layers where the given layer fits.
41
39
  """
42
40
  idx = [i for (i, constraint) in enumerate(constraints) if constraint.fits(layer)]
@@ -44,50 +42,71 @@ def find_chip_layers(
44
42
 
45
43
 
46
44
  def get_valid_mapping(
47
- model: "DynapcnnNetwork", constraints: List[LayerConstraints]
48
- ) -> List[Tuple[int, int]]:
45
+ layers: Dict[int, DynapcnnLayer], constraints: List[LayerConstraints]
46
+ ) -> Dict[int, int]:
49
47
  """Given a model, find a valid layer ordering for its placement within the constraints
50
48
  provided.
51
49
 
52
- Parameters
53
- ----------
54
- model:
55
- DynapcnnNetwork
56
- constraints:
57
- A list of all the layer's constraints
50
+ Args:
51
+ layers: Dict with layer indices as keys and DynapcnnLayer instances as values.
52
+ constraints: A list of all the layer's constraints.
58
53
 
59
- Returns
60
- -------
54
+ Returns:
55
+ Dict mapping from layer index (key) to assigned core ID (value).
61
56
  """
57
+ # Store layer indices and lists of possible target chips in separate lists
58
+ layer_indices = []
62
59
  layer_mapping = []
63
-
64
- for layer in model.sequence:
65
- if isinstance(layer, DynapcnnLayer):
66
- layer_mapping.append(find_chip_layers(layer, constraints))
60
+ for layer_index, this_layer in layers.items():
61
+ # Skip DVSLayers
62
+ if isinstance(this_layer, DynapcnnLayer):
63
+ chip_layers = find_chip_layers(this_layer, constraints)
64
+ layer_mapping.append(chip_layers)
65
+ layer_indices.append(layer_index)
66
+ # Make sure only DynapCNNLayers and DVSLayers are passed
67
+ elif not isinstance(this_layer, DVSLayer):
68
+ raise ValueError(f"Found unexpected layer type: `{type(this_layer)}")
67
69
 
68
70
  graph = make_flow_graph(layer_mapping, len(constraints))
69
71
 
70
- # Call mapping
72
+ # use Edmonds' Algorithm to find suitable cores for each DynapCNNLayer.
71
73
  new_graph = edmonds(graph, 0, len(graph) - 1)
74
+ netmap = recover_mapping(new_graph, len(layer_mapping))
72
75
 
73
- netmap = recover_mapping(new_graph, layer_mapping)
74
- return netmap
76
+ # Convert `netmap` to dict mapping from layer index to core ID
77
+ return {layer_idx: core_id for layer_idx, core_id in zip(layer_indices, netmap)}
75
78
 
76
79
 
77
80
  @dataclass
78
- class Edge:
81
+ class FlowGraphEdge:
79
82
  s: int
80
83
  t: int
81
84
  cap: int
82
85
  flow: int = 0
83
- rev: Optional["Edge"] = None
86
+ rev: Optional["FlowGraphEdge"] = None
84
87
 
85
88
  def __repr__(self):
86
- return f"Edge from {self.s} to {self.t} with capacity {self.cap} and flow {self.flow}"
89
+ return f"FlowGraphEdge from {self.s} to {self.t} with capacity {self.cap} and flow {self.flow}"
90
+
91
+
92
+ def edmonds(
93
+ graph: List[List[FlowGraphEdge]], source: int, sink: int, verbose: bool = False
94
+ ) -> List[List[FlowGraphEdge]]:
95
+ """Use Edmonds' Algorithm to compute flow of flow graph
96
+
97
+ Makes a copy of the graph. The original graph is not changed in place.
87
98
 
99
+ Args:
100
+ graph List[List[FlowGraphEdge]]): Flow graph representation. Each list
101
+ entry corresponds to a node and consists of a list holding the
102
+ outgoing edges from this node.
103
+ source (int): Index of source node within graph.
104
+ sind (int): Index of sink node within graph.
105
+ verbose (bool): Print detailed flow information if `True`.
88
106
 
89
- # graph is list of list of edges. Each edge is
90
- def edmonds(graph, source, sink, verbose: bool = False):
107
+ Returns:
108
+ New flow graph with calculated flow. Type is List[List[FlowGraphEdge]].
109
+ """
91
110
  graph = deepcopy(graph)
92
111
  flow = 0
93
112
  while True:
@@ -95,8 +114,8 @@ def edmonds(graph, source, sink, verbose: bool = False):
95
114
  q.append(source)
96
115
  pred = [None for _ in range(len(graph))]
97
116
  while len(q) != 0:
98
- cur = q.popleft()
99
- for edge in graph[cur]:
117
+ cur = q.popleft() # current node index
118
+ for edge in graph[cur]: # edges to/from current node
100
119
  if pred[edge.t] is None and edge.t != source and edge.cap > edge.flow:
101
120
  pred[edge.t] = edge
102
121
  q.append(edge.t)
@@ -122,31 +141,33 @@ def edmonds(graph, source, sink, verbose: bool = False):
122
141
 
123
142
  def make_flow_graph(
124
143
  layer_mapping: List[List[int]], num_layers: int = 9
125
- ) -> List[List[Edge]]:
126
- """Make a flow graph given all possible chip layers for each DynapcnnCompatibleLayer layer.
127
- Note that the flows are not computed yet. The flow for the graph generated here needs to be
128
- populated by calling the method `edmonds`
129
-
130
- Parameters
131
- ----------
132
- layer_mapping:
133
- List of a list of all layer indices. Eg. [[1,3], [4, 6, 1]] for a two layer model
134
- num_layers:
135
- Number of layers on the chip
136
-
137
- Returns
138
- -------
139
- graph: List[List[Edge]]
144
+ ) -> List[List[FlowGraphEdge]]:
145
+ """
146
+ Make a bipartite flow graph (flow network) given all possible chip layers
147
+ for each DynapCNNLayer layer. The goal is to formulate the mapping from
148
+ DynapCNNLayer instance to chip layer as a bipartite matching problem. Note that the
149
+ flows are not computed yet. The flow for the graph generated here needs to
150
+ be populated by calling the method `edmonds`.
151
+
152
+ Args:
153
+ layer_mapping: List of a list of matching chip core indices for each DynapCNNLayer instance.
154
+ Eg. [[1,3], [4, 6, 1]] for a two layer model, where each integer is a core index.
155
+ num_layers (int): Number of layers on the chip.
156
+
157
+ Returns:
158
+ Flow graph representation. Each list entry corresponds to a node and consists
159
+ of a list holding the outgoing edges from this node.
160
+ The returned object is of type List[List[FlowGraphEdge]].
140
161
  """
141
162
  graph = []
142
163
  # add all our nodes
143
164
  # one source node
144
165
  graph.append([])
145
166
  # one node for every layer that will be mapped
146
- for x in range(len(layer_mapping)):
167
+ for __ in range(len(layer_mapping)):
147
168
  graph.append([])
148
169
  # one node for every chip layer
149
- for x in range(num_layers):
170
+ for __ in range(num_layers):
150
171
  graph.append([])
151
172
  # one sink node
152
173
  graph.append([])
@@ -154,42 +175,57 @@ def make_flow_graph(
154
175
  target_offset = len(layer_mapping) + 1
155
176
  # first from source to all layers
156
177
  for i in range(len(layer_mapping)):
157
- graph[0].append(Edge(s=0, t=i + 1, cap=1, flow=0))
158
- # add the reverse edge
159
- graph[i + 1].append(Edge(s=i + 1, t=0, cap=0, flow=0))
178
+ source_to_layer = FlowGraphEdge(s=0, t=i + 1, cap=1, flow=0)
179
+ layer_to_source = FlowGraphEdge(s=i + 1, t=0, cap=0, flow=0)
160
180
  # fill in reverse pointers
161
- graph[0][-1].rev = graph[i + 1][-1]
162
- graph[i + 1][-1].rev = graph[0][-1]
181
+ source_to_layer.rev = layer_to_source
182
+ layer_to_source.rev = source_to_layer
183
+ # append new edges
184
+ graph[0].append(source_to_layer)
185
+ graph[i + 1].append(layer_to_source)
163
186
  # then from layers to chip layers
164
187
  for i, layer_targets in enumerate(layer_mapping):
165
188
  for target in layer_targets:
166
- graph[i + 1].append(Edge(s=i + 1, t=target + target_offset, cap=1, flow=0))
167
- graph[target + target_offset].append(
168
- Edge(s=target + target_offset, t=i + 1, cap=0, flow=0)
189
+ layer_to_chip = FlowGraphEdge(
190
+ s=i + 1, t=target + target_offset, cap=1, flow=0
191
+ )
192
+ chip_to_layer = FlowGraphEdge(
193
+ s=target + target_offset, t=i + 1, cap=0, flow=0
169
194
  )
170
- graph[i + 1][-1].rev = graph[target + target_offset][-1]
171
- graph[target + target_offset][-1].rev = graph[i + 1][-1]
172
- # print(graph)
195
+ layer_to_chip.rev = chip_to_layer
196
+ chip_to_layer.rev = layer_to_chip
197
+ graph[i + 1].append(layer_to_chip)
198
+ graph[target + target_offset].append(chip_to_layer)
173
199
  # then from chip layers to sink
174
- for i, layer in enumerate(graph[target_offset:-1]):
175
- sink = len(graph) - 1
176
- source = i + target_offset
177
- graph[source].append(Edge(s=source, t=sink, cap=1, flow=0))
178
- graph[sink].append(Edge(s=sink, t=source, cap=0, flow=0))
179
- graph[source][-1].rev = graph[sink][-1]
200
+ sink = len(graph) - 1
201
+ for chip_node in range(target_offset, sink):
202
+ graph[chip_node].append(FlowGraphEdge(s=chip_node, t=sink, cap=1, flow=0))
203
+ graph[sink].append(FlowGraphEdge(s=sink, t=chip_node, cap=0, flow=0))
204
+ graph[chip_node][-1].rev = graph[sink][-1]
180
205
  graph[sink][-1].rev = graph[sink][-1]
181
206
  return graph
182
207
 
183
208
 
184
- def recover_mapping(graph, layer_mapping) -> List[Tuple[int, int]]:
209
+ def recover_mapping(graph: List[List[FlowGraphEdge]], num_layers: int) -> List[int]:
210
+ """Based on the flow graph retrieve a layer-to-core mapping
211
+
212
+ Args:
213
+ graph List[List[FlowGraphEdge]]): Flow graph representation with flow
214
+ calculated. Each list entry corresponds to a node and consists of a
215
+ list holding the outgoing edges from this node.
216
+ num_layers (int): Number of software layers.
217
+
218
+ Returns:
219
+ Assigned core IDs for each layer in order. Type is List[int].
220
+ """
185
221
  mapping = []
186
- for i, layer in enumerate(layer_mapping):
187
- for edge in graph[i + 1]:
222
+ for i in range(1, num_layers + 1): # `+1` to skip source node
223
+ for edge in graph[i]:
188
224
  if edge.flow == 1:
189
- mapping.append((i, edge.t - len(layer_mapping) - 1))
190
- if len(mapping) != len(layer_mapping):
225
+ mapping.append(edge.t - num_layers - 1)
226
+ if len(mapping) != num_layers:
191
227
  raise ValueError(
192
- "No valid mapping found. "
193
- "For Speck family you can use `utils.validate_memory_mapping_speck()` to get more information."
228
+ "No valid mapping found. One or more of the DynapcnnLayers could not be mapped to any core."
229
+ "For Speck family you can verify if it is a memory issue by using `utils.validate_memory_mapping_speck()` to get more information."
194
230
  )
195
231
  return mapping