sinabs 3.1.1.dev1__py3-none-any.whl → 3.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -101,7 +101,7 @@ class DynapcnnConfigBuilder(ConfigBuilder):
101
101
  dimensions["input_shape"]["feature_count"] = channel_count
102
102
 
103
103
  # dimensions["output_feature_count"] already done in conv2d_to_dict
104
- (f, h, w) = layer.get_neuron_shape()
104
+ f, h, w = layer.get_neuron_shape()
105
105
  dimensions["output_shape"]["size"] = {}
106
106
  dimensions["output_shape"]["feature_count"] = f
107
107
  dimensions["output_shape"]["size"]["x"] = w
@@ -121,7 +121,7 @@ class DynapcnnConfigBuilder(ConfigBuilder):
121
121
  config_dict["dimensions"] = dimensions
122
122
  # Update parameters from convolution
123
123
  if layer.conv_layer.bias is not None:
124
- (weights, biases) = layer.conv_layer.parameters()
124
+ weights, biases = layer.conv_layer.parameters()
125
125
  else:
126
126
  (weights,) = layer.conv_layer.parameters()
127
127
  biases = torch.zeros(layer.conv_layer.out_channels)
@@ -418,7 +418,9 @@ class DynapcnnVisualizer:
418
418
  + " should contain value `dvs`. "
419
419
  )
420
420
 
421
- last_layer = dynapcnn_network.chip_layers_ordering[-1]
421
+ last_layer = dynapcnn_network.layer2core_map[
422
+ dynapcnn_network.exit_layer_ids[-1]
423
+ ]
422
424
 
423
425
  if not config.cnn_layers[last_layer].monitor_enable:
424
426
  raise ValueError(
@@ -453,7 +455,7 @@ class DynapcnnVisualizer:
453
455
 
454
456
  # Streamer graph
455
457
  # Dvs node
456
- (_, dvs_member_filter, _, streamer_node) = self.streamer_graph.sequential(
458
+ _, dvs_member_filter, _, streamer_node = self.streamer_graph.sequential(
457
459
  [
458
460
  dynapcnn_network.samna_device.get_model_source_node(),
459
461
  samna.graph.JitMemberSelect(),
@@ -524,7 +526,7 @@ class DynapcnnVisualizer:
524
526
  ## Readout node
525
527
  if "r" in self.gui_type:
526
528
  if self.readout_node == "JitMajorityReadout":
527
- (_, majority_readout_node, _) = self.streamer_graph.sequential(
529
+ _, majority_readout_node, _ = self.streamer_graph.sequential(
528
530
  [
529
531
  spike_collection_node,
530
532
  samna.graph.JitMajorityReadout(samna.ui.Event),
@@ -542,7 +544,7 @@ class DynapcnnVisualizer:
542
544
  self.readout_default_threshold_high
543
545
  )
544
546
  else:
545
- (_, majority_readout_node, _) = self.streamer_graph.sequential(
547
+ _, majority_readout_node, _ = self.streamer_graph.sequential(
546
548
  [
547
549
  spike_collection_node,
548
550
  self.readout_node,
@@ -568,7 +570,9 @@ class DynapcnnVisualizer:
568
570
  dynapcnn_network (DynapcnnNetwork): DynapcnnNetwork object
569
571
 
570
572
  """
571
- last_layer = dynapcnn_network.chip_layers_ordering[-1]
573
+ last_layer = dynapcnn_network.layer2core_map[
574
+ dynapcnn_network.exit_layer_ids[-1]
575
+ ]
572
576
  config = dynapcnn_network.samna_config
573
577
  model_output_feature_count = config.cnn_layers[
574
578
  last_layer
@@ -133,8 +133,11 @@ def get_device_map() -> Dict:
133
133
  # Group by device_type_name
134
134
  device_groups = groupby(devices, lambda x: x.device_type_name)
135
135
  # Switch keys from samna's device_type_name to device_type names
136
+ # -- guarantee is a supported device
136
137
  device_groups = {
137
- device_type_map[k]: sort_devices(list(v)) for k, v in device_groups
138
+ device_type_map[k]: sort_devices(list(v))
139
+ for k, v in device_groups
140
+ if k in device_type_map
138
141
  }
139
142
  # Flat map
140
143
  for dev_type, dev_list in device_groups.items():
@@ -504,7 +504,7 @@ class GraphExtractor:
504
504
  A handler to the newly added `DVSLayer` instance.
505
505
  """
506
506
 
507
- (features, height, width) = dvs_input_shape
507
+ features, height, width = dvs_input_shape
508
508
  if features > 2:
509
509
  raise ValueError(
510
510
  f"A DVSLayer istance can have a max feature dimension of 2 but {features} was given."
@@ -578,7 +578,7 @@ class GraphExtractor:
578
578
  f"A DVSLayer node exists and there are {nb_entries} entry nodes in the graph: the DVSLayer should be the only entry node."
579
579
  )
580
580
 
581
- (features, _, _) = dvs_input_shape
581
+ features, _, _ = dvs_input_shape
582
582
 
583
583
  if features > 2:
584
584
  raise ValueError(
@@ -752,7 +752,7 @@ class GraphExtractor:
752
752
  # different input sources to a core to have the same output shapes.
753
753
  if any(inp.shape != inputs[0].shape for inp in inputs):
754
754
  raise ValueError(
755
- f"Layer `sinabs.layers.merge.Merge` (node {node}) requires input tensors with the same shape"
755
+ f"Layer `sinabs.layers.merge.Merge` (node {node}) requires input tensors with the same shape."
756
756
  )
757
757
 
758
758
  # forward input through the node.
@@ -828,6 +828,7 @@ class GraphExtractor:
828
828
  if len(sources) == 0:
829
829
  return -1
830
830
  if len(sources) > 1:
831
+ # return -1 #TODO: why throw a runtime error when the documentation explicitly say -1 in case of more than one input node?
831
832
  raise RuntimeError(f"Node {node} has more than 1 input")
832
833
  return sources.pop()
833
834
 
sinabs/layers/pool2d.py CHANGED
@@ -80,7 +80,7 @@ class SpikingMaxPooling2dLayer(nn.Module):
80
80
  Returns:
81
81
  (channelsOut, height_out, width_out)
82
82
  """
83
- (channels, height, width) = input_shape
83
+ channels, height, width = input_shape
84
84
 
85
85
  height_out = conv_output_size(
86
86
  height + sum(self.padding[2:]), self.pool_size[0], self.strides[0]
sinabs/nir.py CHANGED
@@ -46,8 +46,8 @@ def _import_sinabs_module(
46
46
  groups=node.groups,
47
47
  bias=True,
48
48
  )
49
- conv.weight.data = torch.tensor(node.weight).float()
50
- conv.bias.data = torch.tensor(node.bias).float()
49
+ conv.weight.data = node.weight.detach().clone().to(float)
50
+ conv.bias.data = node.bias.detach().clone().to(float)
51
51
  return conv
52
52
 
53
53
  elif isinstance(node, nir.Conv2d):
@@ -184,6 +184,7 @@ def _extract_sinabs_module(module: torch.nn.Module) -> Optional[nir.NIRNode]:
184
184
  return nir.Affine(module.weight.detach(), module.bias.detach())
185
185
  elif isinstance(module, torch.nn.Conv1d):
186
186
  return nir.Conv1d(
187
+ input_shape=None,
187
188
  weight=module.weight.detach(),
188
189
  stride=module.stride,
189
190
  padding=module.padding,
@@ -191,7 +192,7 @@ def _extract_sinabs_module(module: torch.nn.Module) -> Optional[nir.NIRNode]:
191
192
  groups=module.groups,
192
193
  bias=(
193
194
  module.bias.detach()
194
- if module.bias
195
+ if isinstance(module.bias, torch.Tensor)
195
196
  else torch.zeros((module.weight.shape[0]))
196
197
  ),
197
198
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sinabs
3
- Version: 3.1.1.dev1
3
+ Version: 3.1.3
4
4
  Summary: SynSense Spiking Neural Network simulator for deep neural networks (DNNs).
5
5
  Author: SynSense (formerly AiCTX)
6
6
  Author-email: support@synsense.ai
@@ -87,7 +87,7 @@ Sinabs is published under Apache v2.0. See the LICENSE file for details.
87
87
 
88
88
  Contributing to Sinabs
89
89
  ------------------------
90
- Checkout the [contributing](https://sinabs.readthedocs.io/en/develop/about/contributing.html) page for more info.
90
+ Checkout the [contributing](https://sinabs.readthedocs.io/develop/about/contributing.html) page for more info.
91
91
 
92
92
 
93
93
  Citation
@@ -4,7 +4,7 @@ sinabs/conversion.py,sha256=wEACb-IMrsNcjoNvGjoaQSINHYs66j0XOeQSmk7nhTM,2011
4
4
  sinabs/from_torch.py,sha256=_jVoGP3aQcdCUIc39ujxhCQmbIDlDtzLHz6VY3zmX-k,5777
5
5
  sinabs/hooks.py,sha256=u6bzKXodhUHDLi_ZaUNbD8hogRX7RD4QoRouSUaKEzY,15962
6
6
  sinabs/network.py,sha256=Zn7fQ0WTU_7vgBwfVloGINrXivZTNIBsmUGLWAbX81A,9542
7
- sinabs/nir.py,sha256=r72RZ2WNuhnHvQ2MaSJ04J-Bes1mAdzoU9LKbJupZzE,8695
7
+ sinabs/nir.py,sha256=LUUlWbV2jXJPWby5SoHFl0_c5tMVaQlsw-U_s-WjecY,8761
8
8
  sinabs/synopcounter.py,sha256=VFefA5ix0JqCr2DPf8hNB19Y0Pe8tTvSrCuspFKa4Ic,12779
9
9
  sinabs/utils.py,sha256=1ctTxNT2ONYVK5d99VhZvXwxaOG4pD2lZ_PGRPPO6bQ,12114
10
10
  sinabs/validate_memory_speck.py,sha256=WKzB3o76sddUx9griloLQ6-ijDBPqFFXshdp9AjEiAM,5002
@@ -24,19 +24,19 @@ sinabs/backend/dynapcnn/dvs_layer.py,sha256=b9OryaOv5X5ZOxGOEZhabHZf3jZoykDuB295
24
24
  sinabs/backend/dynapcnn/dynapcnn_layer.py,sha256=Pf-Qhl24LwKyu-7sJXDRYQ_3DN_7Z3MQyj3lNsGKTaE,8917
25
25
  sinabs/backend/dynapcnn/dynapcnn_layer_utils.py,sha256=9aPk9YuAgbfI_Xworfy-SbJDQSRVFksqaLU3ZaCmS74,13153
26
26
  sinabs/backend/dynapcnn/dynapcnn_network.py,sha256=klYI82qV1IqhDyD_OkOs5G3F-FysTtJJRgQbFEpDrfg,34721
27
- sinabs/backend/dynapcnn/dynapcnn_visualizer.py,sha256=MRewU6519dAtAMxf-JlFBrlynJTZeLiDfB0d85-mMFQ,24262
27
+ sinabs/backend/dynapcnn/dynapcnn_visualizer.py,sha256=OKpeVpt6OIZm7sg_N3Lww0KyqegQCFVP0iKqwXPbtYY,24354
28
28
  sinabs/backend/dynapcnn/dynapcnnnetwork_module.py,sha256=aHLTAe59h_r0rZ0bHeSuIDG811MFP6_tOtyxiZIVVfA,14487
29
29
  sinabs/backend/dynapcnn/exceptions.py,sha256=MHvsM8a5c5ukHkH59OVYU0AeivVeNgdn9Tgqb07WOtU,3523
30
30
  sinabs/backend/dynapcnn/flipdims.py,sha256=I0I1nakrF0ngWBh-2SHHg7OkCOxotqukwHOQ45GWyCs,860
31
- sinabs/backend/dynapcnn/io.py,sha256=8m86xm0BcLUc8AGLaA30FyY5OXFu0NrpOaPk4zLCfJU,9918
31
+ sinabs/backend/dynapcnn/io.py,sha256=-XtP801uVragUlBf-6PjR5UHY6B0fcy1pUg0M6QlQKo,9999
32
32
  sinabs/backend/dynapcnn/mapping.py,sha256=sI0GKh2FkyciYFJuuCLCcitOzlEjfEEEbSG_OiJXICw,8683
33
- sinabs/backend/dynapcnn/nir_graph_extractor.py,sha256=1PbQzDokxW_0mzG_tdMSS1toHczW-2lnKGMUkdmCug8,37724
33
+ sinabs/backend/dynapcnn/nir_graph_extractor.py,sha256=8_KEKzVdLTwZGN5rVqaRWH2-BK9PlTQUgCbPgcs-9_Q,37856
34
34
  sinabs/backend/dynapcnn/sinabs_edges_handler.py,sha256=pEfGUXwhQy7cnmofK5MCulSVFqMBDPRRoQVM5MTf3Qk,41085
35
35
  sinabs/backend/dynapcnn/specksim.py,sha256=UKh_lH_yHIZaYEONWsAOChrD-vBdSawBxRBeDHlSv84,17138
36
36
  sinabs/backend/dynapcnn/utils.py,sha256=QD5dKTgQmGLjP8fIz_vlXFpSem8Z-q-9zTebqd9xJzc,10036
37
37
  sinabs/backend/dynapcnn/weight_rescaling_methods.py,sha256=iftnMHZtAsPZ3wHDPPCR3VgTKjCsjNlZAIQwezWwyns,2051
38
38
  sinabs/backend/dynapcnn/chips/__init__.py,sha256=zJQ7f7bp_cF0US1pZ8ga4-3Bo32T0GB9gD2RN3uKlsM,130
39
- sinabs/backend/dynapcnn/chips/dynapcnn.py,sha256=oTfrBJ7ZxIaG2pNd59j3dk03QQvJWKjws6GImOX5W_0,15240
39
+ sinabs/backend/dynapcnn/chips/dynapcnn.py,sha256=lIFHL9pCXbaOD3Leu04b7PjuwwN_LXDiFx7p-fkS7r8,15236
40
40
  sinabs/backend/dynapcnn/chips/speck2e.py,sha256=rq6RvS-FGE5aOtTXmvkKTmIJlwGxZz4vBFQ-nV3HLCo,879
41
41
  sinabs/backend/dynapcnn/chips/speck2f.py,sha256=bWGTFtUZzeZ7peZcgO1VQG_8YGyhiTQUKUE_X9jnC5E,877
42
42
  sinabs/layers/__init__.py,sha256=lRQtECdsYJzSWKppGcg_9oCHNRwjYprgmIDbJ21Kec0,584
@@ -48,7 +48,7 @@ sinabs/layers/iaf.py,sha256=S58p29fBE0wQdFEelZq2Qmb1m7rkLnYMyCpDVql2ASY,8001
48
48
  sinabs/layers/lif.py,sha256=V41sarXbFjGcd8RhX8NZIFHPr57-8zXssW7_kKJuicU,15293
49
49
  sinabs/layers/merge.py,sha256=oVtQXdtcXRivTlwDXTEbW1Ii-GTMkg3detp0-uCTcK0,985
50
50
  sinabs/layers/neuromorphic_relu.py,sha256=z8opQf38EJAWLpR2_k8d3MT8gxYA-s8aE8Jl9J7n4Lk,1545
51
- sinabs/layers/pool2d.py,sha256=NaV3KmjwV1lRTFLxo8BKt6hhRxAGwSFmSxmOPt6Ng_k,3553
51
+ sinabs/layers/pool2d.py,sha256=9crByasYe6hJdjLkywymCT2rDKVGSw3g0xIjAwZQlz4,3551
52
52
  sinabs/layers/quantize.py,sha256=X9wKlA4nk8-_DS7WBiPHubHZQMb4OsSE0OkeFemxNCM,499
53
53
  sinabs/layers/reshape.py,sha256=mIbRoYsx3qAFK5MJtWL6wvof4KR8fKIpyIdbLv3ozPM,3347
54
54
  sinabs/layers/stateful_layer.py,sha256=5p5C6hySXor58lbUdQtmT9WfA5hrNcxzdriOH9Ct5f0,6685
@@ -56,10 +56,10 @@ sinabs/layers/to_spike.py,sha256=ORkEGuHucWCqB3P1Ia8XJsPtpDAugCTSo7Bwr3xVpzE,329
56
56
  sinabs/layers/functional/__init__.py,sha256=v0c7DHizKg8jfelmFYeMMg9vDafKvzoenakc4SPpj84,91
57
57
  sinabs/layers/functional/alif.py,sha256=ycJ7rlcBAd-lq5GCDZrcNPeV-7fztt3uy43XhBtTKHI,4599
58
58
  sinabs/layers/functional/lif.py,sha256=QRjiWDCBaJFk4J7RRMgktMaLCyN6xEXAKvC9Bu_PICU,4259
59
- sinabs-3.1.1.dev1.dist-info/licenses/AUTHORS,sha256=rSozYAiy4aFshSqHdw7bV-CyqtIyWhcPo26R5JR6-MY,1847
60
- sinabs-3.1.1.dev1.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
61
- sinabs-3.1.1.dev1.dist-info/METADATA,sha256=TBK76i04bGarg88j-cJoYoakH9PFUqHTKJV5Vrx3KYk,3918
62
- sinabs-3.1.1.dev1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
63
- sinabs-3.1.1.dev1.dist-info/pbr.json,sha256=DsGf0oO1zpKuMU-ZkpK0AxRzUJVUeBhbXdrMY2BeHvs,47
64
- sinabs-3.1.1.dev1.dist-info/top_level.txt,sha256=QOXGzf0ZeDjRnJ9OgAjkk6h5jrh66cwrwvtPJTyfDk8,7
65
- sinabs-3.1.1.dev1.dist-info/RECORD,,
59
+ sinabs-3.1.3.dist-info/licenses/AUTHORS,sha256=rSozYAiy4aFshSqHdw7bV-CyqtIyWhcPo26R5JR6-MY,1847
60
+ sinabs-3.1.3.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
61
+ sinabs-3.1.3.dist-info/METADATA,sha256=G65LwqVuathl4uuyGz4JpD1S8CXFqHKcq8-MsftZdSQ,3910
62
+ sinabs-3.1.3.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
63
+ sinabs-3.1.3.dist-info/pbr.json,sha256=OEWdbkD4Q8LvbGlpU0R5QyaFrHAjL8tC1RIlE9qpspc,47
64
+ sinabs-3.1.3.dist-info/top_level.txt,sha256=QOXGzf0ZeDjRnJ9OgAjkk6h5jrh66cwrwvtPJTyfDk8,7
65
+ sinabs-3.1.3.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -0,0 +1 @@
1
+ {"git_version": "d84078e", "is_release": false}
@@ -1 +0,0 @@
1
- {"git_version": "dbf7eee", "is_release": false}