sinabs 3.0.4.dev25__tar.gz → 3.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (324) hide show
  1. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/AUTHORS +1 -0
  2. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/PKG-INFO +3 -2
  3. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/README.md +1 -1
  4. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/about/differences.md +2 -2
  5. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/about/release_notes.md +18 -0
  6. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/conf.py +0 -1
  7. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/contact.md +1 -1
  8. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/layers/utils.py +7 -1
  9. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/getting_started/quickstart.ipynb +2 -8
  10. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/how_tos/activations.ipynb +3 -1
  11. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/how_tos/custom_hooks.ipynb +6 -2
  12. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/how_tos/synops_loss_ann.ipynb +27 -11
  13. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/how_tos/synops_loss_snn.ipynb +21 -13
  14. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/faqs/available_network_arch.md +140 -11
  15. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/faqs/device_management.md +1 -1
  16. sinabs-3.1.1/docs/speck/faqs/imgs/network-with-merge-and-split.png +0 -0
  17. sinabs-3.1.1/docs/speck/faqs/imgs/two-independent-networks.png +0 -0
  18. sinabs-3.1.1/docs/speck/faqs/imgs/two-networks-merging-output.png +0 -0
  19. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/faqs/tips_for_training.md +3 -3
  20. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/notebooks/leak_neuron.ipynb +10 -7
  21. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/notebooks/nmnist_quick_start.ipynb +148 -54
  22. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/notebooks/play_with_speck_dvs.ipynb +71 -57
  23. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/notebooks/power_monitoring.ipynb +51 -38
  24. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/notebooks/using_readout_layer.ipynb +74 -46
  25. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/notebooks/visualize_speck_dvs_input.ipynb +23 -21
  26. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/notebooks/visualize_spike_count.ipynb +62 -35
  27. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/overview.md +7 -7
  28. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/visualizer.md +2 -2
  29. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/tutorials/LeNet_5_EngChinese.ipynb +1 -1
  30. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/tutorials/bptt.ipynb +0 -1
  31. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/tutorials/nir_to_speck.ipynb +29 -21
  32. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/tutorials/nmnist.ipynb +30 -20
  33. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/tutorials/weight_transfer_mnist.ipynb +5 -1
  34. sinabs-3.1.1/examples/dynapcnn_network/snn_DVSLayer_given.ipynb +337 -0
  35. sinabs-3.1.1/examples/dynapcnn_network/snn_DVSLayer_given_followed_by_pool.ipynb +341 -0
  36. sinabs-3.1.1/examples/dynapcnn_network/snn_deployment.ipynb +882 -0
  37. sinabs-3.1.1/examples/dynapcnn_network/snn_need_create_DVSLayer.ipynb +333 -0
  38. sinabs-3.1.1/examples/dynapcnn_network/snn_no_DVSLayer.ipynb +324 -0
  39. sinabs-3.1.1/examples/dynapcnn_network/snn_with_batchnorm.ipynb +300 -0
  40. sinabs-3.1.1/examples/dynapcnn_network/snn_with_multiple_batchnorm.ipynb +380 -0
  41. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/requirements.txt +1 -1
  42. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/activation/reset_mechanism.py +3 -3
  43. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/activation/surrogate_gradient_fn.py +4 -4
  44. sinabs-3.1.1/sinabs/backend/dynapcnn/__init__.py +6 -0
  45. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/backend/dynapcnn/chip_factory.py +33 -61
  46. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/backend/dynapcnn/chips/dynapcnn.py +182 -86
  47. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/backend/dynapcnn/chips/speck2e.py +6 -5
  48. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/backend/dynapcnn/chips/speck2f.py +6 -5
  49. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/backend/dynapcnn/config_builder.py +39 -59
  50. sinabs-3.1.1/sinabs/backend/dynapcnn/connectivity_specs.py +48 -0
  51. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/backend/dynapcnn/discretize.py +91 -155
  52. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/backend/dynapcnn/dvs_layer.py +59 -101
  53. sinabs-3.1.1/sinabs/backend/dynapcnn/dynapcnn_layer.py +270 -0
  54. sinabs-3.1.1/sinabs/backend/dynapcnn/dynapcnn_layer_utils.py +335 -0
  55. sinabs-3.1.1/sinabs/backend/dynapcnn/dynapcnn_network.py +784 -0
  56. sinabs-3.1.1/sinabs/backend/dynapcnn/dynapcnnnetwork_module.py +370 -0
  57. sinabs-3.1.1/sinabs/backend/dynapcnn/exceptions.py +138 -0
  58. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/backend/dynapcnn/io.py +55 -92
  59. sinabs-3.1.1/sinabs/backend/dynapcnn/mapping.py +231 -0
  60. sinabs-3.1.1/sinabs/backend/dynapcnn/nir_graph_extractor.py +877 -0
  61. sinabs-3.1.1/sinabs/backend/dynapcnn/sinabs_edges_handler.py +1024 -0
  62. sinabs-3.1.1/sinabs/backend/dynapcnn/utils.py +294 -0
  63. sinabs-3.1.1/sinabs/backend/dynapcnn/weight_rescaling_methods.py +53 -0
  64. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/conversion.py +2 -2
  65. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/from_torch.py +23 -1
  66. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/hooks.py +38 -41
  67. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/alif.py +16 -16
  68. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/crop2d.py +2 -2
  69. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/exp_leak.py +1 -1
  70. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/iaf.py +11 -11
  71. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/lif.py +9 -9
  72. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/neuromorphic_relu.py +9 -8
  73. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/pool2d.py +5 -5
  74. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/quantize.py +1 -1
  75. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/stateful_layer.py +10 -7
  76. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/to_spike.py +9 -9
  77. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/network.py +14 -12
  78. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/nir.py +4 -3
  79. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/synopcounter.py +10 -7
  80. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/utils.py +155 -7
  81. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/validate_memory_speck.py +0 -5
  82. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs.egg-info/PKG-INFO +3 -2
  83. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs.egg-info/SOURCES.txt +39 -0
  84. sinabs-3.1.1/sinabs.egg-info/pbr.json +1 -0
  85. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs.egg-info/requires.txt +1 -0
  86. sinabs-3.1.1/tests/__init__.py +0 -0
  87. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/hw_utils.py +10 -5
  88. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_auto_mapping.py +1 -1
  89. sinabs-3.1.1/tests/test_dynapcnn/test_compatible_layer_build.py +41 -0
  90. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_config_making.py +18 -7
  91. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_device_movement.py +0 -1
  92. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_discover_device.py +0 -12
  93. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_doorbell.py +24 -5
  94. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_dvs_input.py +51 -27
  95. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_dvs_layer.py +0 -31
  96. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_individual_cases.py +12 -20
  97. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_large_net.py +22 -9
  98. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_learning.py +1 -0
  99. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_monitoring.py +14 -12
  100. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_neuron_leak.py +4 -2
  101. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_single_neuron_hardware.py +4 -3
  102. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_speck2e.py +2 -2
  103. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_visualizer.py +27 -26
  104. sinabs-3.1.1/tests/test_dynapcnnlayer/__init__.py +0 -0
  105. sinabs-3.1.1/tests/test_dynapcnnlayer/conftest_dynapcnnlayer.py +16 -0
  106. sinabs-3.1.1/tests/test_dynapcnnlayer/model_dummy_1.py +194 -0
  107. sinabs-3.1.1/tests/test_dynapcnnlayer/model_dummy_2.py +255 -0
  108. sinabs-3.1.1/tests/test_dynapcnnlayer/model_dummy_3.py +321 -0
  109. sinabs-3.1.1/tests/test_dynapcnnlayer/model_dummy_4.py +228 -0
  110. sinabs-3.1.1/tests/test_dynapcnnlayer/test_dynapcnnlayer.py +69 -0
  111. sinabs-3.1.1/tests/test_dynapcnnnetwork/__init__.py +0 -0
  112. sinabs-3.1.1/tests/test_dynapcnnnetwork/conftest_dynapcnnnetwork.py +32 -0
  113. sinabs-3.1.1/tests/test_dynapcnnnetwork/model_dummy_1.py +118 -0
  114. sinabs-3.1.1/tests/test_dynapcnnnetwork/model_dummy_2.py +161 -0
  115. sinabs-3.1.1/tests/test_dynapcnnnetwork/model_dummy_3.py +188 -0
  116. sinabs-3.1.1/tests/test_dynapcnnnetwork/model_dummy_4.py +186 -0
  117. sinabs-3.1.1/tests/test_dynapcnnnetwork/model_dummy_seq.py +73 -0
  118. sinabs-3.1.1/tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py +60 -0
  119. sinabs-3.1.1/tests/test_dynapcnnnetwork/test_failcases.py +94 -0
  120. sinabs-3.1.1/tests/test_graph_extractor/conftest_graph_extractor.py +19 -0
  121. sinabs-3.1.1/tests/test_graph_extractor/model_dummy_1.py +151 -0
  122. sinabs-3.1.1/tests/test_graph_extractor/model_dummy_2.py +200 -0
  123. sinabs-3.1.1/tests/test_graph_extractor/model_dummy_3.py +235 -0
  124. sinabs-3.1.1/tests/test_graph_extractor/model_dummy_4.py +192 -0
  125. sinabs-3.1.1/tests/test_graph_extractor/test_graph_extractor.py +62 -0
  126. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_hooks.py +1 -1
  127. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_layers/test_maxpooling.py +0 -1
  128. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_nir.py +15 -0
  129. sinabs-3.0.4.dev25/sinabs/backend/dynapcnn/__init__.py +0 -5
  130. sinabs-3.0.4.dev25/sinabs/backend/dynapcnn/dynapcnn_layer.py +0 -204
  131. sinabs-3.0.4.dev25/sinabs/backend/dynapcnn/dynapcnn_network.py +0 -507
  132. sinabs-3.0.4.dev25/sinabs/backend/dynapcnn/exceptions.py +0 -19
  133. sinabs-3.0.4.dev25/sinabs/backend/dynapcnn/mapping.py +0 -195
  134. sinabs-3.0.4.dev25/sinabs/backend/dynapcnn/utils.py +0 -539
  135. sinabs-3.0.4.dev25/sinabs.egg-info/pbr.json +0 -1
  136. sinabs-3.0.4.dev25/tests/test_dynapcnn/test_compatible_layer_build.py +0 -197
  137. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/.coveragerc +0 -0
  138. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/.github/workflows/ci-pipeline.yml +0 -0
  139. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/.pre-commit-config.yaml +0 -0
  140. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/.readthedocs.yaml +0 -0
  141. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/CITATION.cff +0 -0
  142. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/LICENSE +0 -0
  143. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/codecov.yml +0 -0
  144. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/Makefile +0 -0
  145. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/Overview/dataflow_layers.png +0 -0
  146. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/Overview/event_preprocessing_pipeline.png +0 -0
  147. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/Overview/memory_constraints.png +0 -0
  148. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/Overview/sinabs-dynapcnn-role.png +0 -0
  149. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/Overview/speck_dynapcnn.png +0 -0
  150. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/Overview/speck_top_level.png +0 -0
  151. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/devkits_images/dynapcnn_devkit.png +0 -0
  152. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/devkits_images/speck_devkit.png +0 -0
  153. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/devkits_images/speck_module.png +0 -0
  154. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/devkits_images/speck_module_devkit.png +0 -0
  155. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/nmnist_quick_start/dvs_input_flow.png +0 -0
  156. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/nmnist_quick_start/dynapcnn_visualizer.png +0 -0
  157. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/nmnist_quick_start/spike_input_flow.png +0 -0
  158. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/power_monitoring/dynamic_power_samna_graph.png +0 -0
  159. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/power_monitoring/idle_power_samna_graph.png +0 -0
  160. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/power_monitoring/power_plot.png +0 -0
  161. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/sinabs-logo-lowercase-whitebg.png +0 -0
  162. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/sinabs-logo-lowercase.png +0 -0
  163. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/tips_for_training/exceeding_bandwidth.png +0 -0
  164. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/using_readout_layer/handcraft_weights.png +0 -0
  165. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/using_readout_layer/neuron_id_mismatch.png +0 -0
  166. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/using_readout_layer/readout_layer.png +0 -0
  167. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/using_readout_layer/samna_graph.png +0 -0
  168. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/visualize_speck_dvs/samna_graph.png +0 -0
  169. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/visualize_spike_count/samna_graph.png +0 -0
  170. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_static/visualize_spike_count/spike_count.png +0 -0
  171. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_templates/class_activation.rst +0 -0
  172. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/_templates/class_layer.rst +0 -0
  173. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/about/about.rst +0 -0
  174. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/about/contributing.md +0 -0
  175. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/about/info.md +0 -0
  176. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/api/activation.rst +0 -0
  177. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/api/api.rst +0 -0
  178. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/api/from_torch.rst +0 -0
  179. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/api/hooks.rst +0 -0
  180. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/api/layers.rst +0 -0
  181. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/api/network.rst +0 -0
  182. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/api/nir.rst +0 -0
  183. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/api/synopcounter.rst +0 -0
  184. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/api/utils.rst +0 -0
  185. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/README.rst +0 -0
  186. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/layers/README.rst +0 -0
  187. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/layers/plot_alif.py +0 -0
  188. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/layers/plot_exp_leaky.py +0 -0
  189. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/layers/plot_iaf.py +0 -0
  190. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/layers/plot_lif.py +0 -0
  191. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/spike_fns/README.rst +0 -0
  192. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/spike_fns/plot_maxspike.py +0 -0
  193. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/spike_fns/plot_multispike.py +0 -0
  194. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/spike_fns/plot_singlespike.py +0 -0
  195. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/surrogate_grad_fns/README.rst +0 -0
  196. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/surrogate_grad_fns/plot_gaussian.py +0 -0
  197. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/surrogate_grad_fns/plot_heaviside.py +0 -0
  198. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/surrogate_grad_fns/plot_multigaussian.py +0 -0
  199. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/surrogate_grad_fns/plot_periodicexponential.py +0 -0
  200. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/gallery/surrogate_grad_fns/plot_singleexponential.py +0 -0
  201. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/getting_started/fundamentals.rst +0 -0
  202. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/getting_started/getting_started.rst +0 -0
  203. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/getting_started/iaf_neuron_model.ipynb +0 -0
  204. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/getting_started/install.rst +0 -0
  205. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/getting_started/python_pyenv_pipenv.rst +0 -0
  206. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/how_tos/how_tos.rst +0 -0
  207. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/index.md +0 -0
  208. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/make.bat +0 -0
  209. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/plugins/plugins.rst +0 -0
  210. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/requirements.txt +0 -0
  211. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/advanced_concepts.rst +0 -0
  212. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/chip_factory.rst +0 -0
  213. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/config_builder.rst +0 -0
  214. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/crop2d.rst +0 -0
  215. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/discretize.rst +0 -0
  216. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/dvs_layer.rst +0 -0
  217. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/dynapcnn.rst +0 -0
  218. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/dynapcnn_layer.rst +0 -0
  219. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/dynapcnn_network.rst +0 -0
  220. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/dynapcnn_visualizer.rst +0 -0
  221. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/exceptions.rst +0 -0
  222. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/flipdims.rst +0 -0
  223. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/io.rst +0 -0
  224. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/mapping.rst +0 -0
  225. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/specksim.rst +0 -0
  226. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/api/dynapcnn/utils.rst +0 -0
  227. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/dangers.md +0 -0
  228. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/faqs/add_new_device.md +0 -0
  229. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/faqs/available_algorithmic_operation.md +0 -0
  230. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/faqs/chip_errata.md +0 -0
  231. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/faqs/index.rst +0 -0
  232. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/faqs/output_monitoring.md +0 -0
  233. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/faqs/save_hardware_config_as_binary.md +0 -0
  234. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/index.rst +0 -0
  235. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/specksim.md +0 -0
  236. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/the_basics.md +0 -0
  237. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/speck/tutorials.rst +0 -0
  238. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/tutorials/scnn_mnist.nir +0 -0
  239. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/tutorials/tutorials.rst +0 -0
  240. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/docs/tutorials/weight_scaling.md +0 -0
  241. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/mnist/dynapcnn_network.py +0 -0
  242. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/mnist/mnist_params.pt +0 -0
  243. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/mnist/specksim_network.py +0 -0
  244. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/visualizer/dvs_gesture_params.pt +0 -0
  245. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/visualizer/gesture_viz.py +0 -0
  246. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/visualizer/icons/01_armroll.png +0 -0
  247. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/visualizer/icons/02_handclap.png +0 -0
  248. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/visualizer/icons/03_lefthandclockwise.png +0 -0
  249. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/visualizer/icons/04_lefthandcounterclockwise.png +0 -0
  250. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/visualizer/icons/05_lefthandwave.png +0 -0
  251. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/visualizer/icons/06_righthandwave.png +0 -0
  252. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/visualizer/icons/07_righthandclockwise.png +0 -0
  253. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/visualizer/icons/08_righthandcounterclockwise.png +0 -0
  254. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/visualizer/icons/09_airdrums.png +0 -0
  255. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/visualizer/icons/10_airguitar.png +0 -0
  256. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/examples/visualizer/icons/11_other.png +0 -0
  257. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/jupyterlab-requirements.txt +0 -0
  258. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/pull_request_template.md +0 -0
  259. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/setup.cfg +0 -0
  260. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/setup.py +0 -0
  261. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/__init__.py +0 -0
  262. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/activation/__init__.py +0 -0
  263. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/activation/quantize.py +0 -0
  264. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/activation/spike_generation.py +0 -0
  265. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/backend/__init__.py +0 -0
  266. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/backend/dynapcnn/chips/__init__.py +0 -0
  267. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/backend/dynapcnn/crop2d.py +0 -0
  268. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/backend/dynapcnn/dynapcnn_visualizer.py +0 -0
  269. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/backend/dynapcnn/flipdims.py +0 -0
  270. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/backend/dynapcnn/specksim.py +0 -0
  271. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/cnnutils.py +0 -0
  272. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/__init__.py +0 -0
  273. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/channel_shift.py +0 -0
  274. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/functional/__init__.py +0 -0
  275. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/functional/alif.py +0 -0
  276. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/functional/lif.py +0 -0
  277. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/merge.py +0 -0
  278. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs/layers/reshape.py +0 -0
  279. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs.egg-info/dependency_links.txt +0 -0
  280. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs.egg-info/not-zip-safe +0 -0
  281. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/sinabs.egg-info/top_level.txt +0 -0
  282. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/inputs_and_results/hooks/conv_input.pth +0 -0
  283. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/inputs_and_results/hooks/conv_layer_synops.pth +0 -0
  284. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/inputs_and_results/hooks/firing_rates.pth +0 -0
  285. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/inputs_and_results/hooks/firing_rates_per_neuron.pth +0 -0
  286. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/inputs_and_results/hooks/input_diffs.pth +0 -0
  287. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/inputs_and_results/hooks/model_synops.pth +0 -0
  288. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/mnist_params.pt +0 -0
  289. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/models/README.txt +0 -0
  290. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/models/synop_hook_model.pth +0 -0
  291. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/requirements.txt +0 -0
  292. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_activations.py +0 -0
  293. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_batch_mismatch.py +0 -0
  294. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_batch_size_update.py +0 -0
  295. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_conversion.py +0 -0
  296. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_copy.py +0 -0
  297. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/custom_jit_filters.py +0 -0
  298. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_device_name_mapping.py +0 -0
  299. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_discretized.py +0 -0
  300. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_dynapcnn/test_event_conversion.py +0 -0
  301. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_from_model.py +0 -0
  302. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_layers/test_alif.py +0 -0
  303. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_layers/test_channelshift.py +0 -0
  304. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_layers/test_crop2d.py +0 -0
  305. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_layers/test_exp_leak.py +0 -0
  306. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_layers/test_iaf.py +0 -0
  307. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_layers/test_img2spk.py +0 -0
  308. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_layers/test_lif.py +0 -0
  309. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_layers/test_merge.py +0 -0
  310. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_layers/test_neuromorphic_relu.py +0 -0
  311. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_layers/test_reshaping.py +0 -0
  312. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_layers/test_sig2spk.py +0 -0
  313. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_layers/test_stateful_layer.py +0 -0
  314. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_network_class.py +0 -0
  315. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_normalize_weights.py +0 -0
  316. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_onnx.py +0 -0
  317. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_quantize.py +0 -0
  318. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_specksim/test_specksim_bindings.py +0 -0
  319. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_specksim/test_specksim_conversion.py +0 -0
  320. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_specksim/test_specksim_network.py +0 -0
  321. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_surrogate_gradients.py +0 -0
  322. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_synops_counter.py +0 -0
  323. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/test_utils.py +0 -0
  324. {sinabs-3.0.4.dev25 → sinabs-3.1.1}/tests/weights/README.txt +0 -0
@@ -22,6 +22,7 @@ Vanessa Leite <vanessa.leite@synsense.ai>
22
22
  Vanessa Leite <vanessa@ini.uzh.ch>
23
23
  Vanessa Leite <vanessinhaleite.cp.ufma@gmail.com>
24
24
  Vanessa Leite <vrcleite@gmail.com>
25
+ Willian Girao <williansoaresgirao@gmail.com>
25
26
  Willian-Girao <williansoaresgirao@gmail.com>
26
27
  Yalun Hu <yalun.hu@synsense.ai>
27
28
  Yalun_Hu <yalun.hu@synsense.ai>
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sinabs
3
- Version: 3.0.4.dev25
3
+ Version: 3.1.1
4
4
  Summary: SynSense Spiking Neural Network simulator for deep neural networks (DNNs).
5
5
  Author: SynSense (formerly AiCTX)
6
6
  Author-email: support@synsense.ai
@@ -23,6 +23,7 @@ Requires-Dist: torch>=1.8
23
23
  Requires-Dist: nir<=1.0.4
24
24
  Requires-Dist: nirtorch
25
25
  Requires-Dist: samna>=0.33
26
+ Requires-Dist: matplotlib
26
27
  Dynamic: author
27
28
  Dynamic: author-email
28
29
  Dynamic: classifier
@@ -86,7 +87,7 @@ Sinabs is published under Apache v2.0. See the LICENSE file for details.
86
87
 
87
88
  Contributing to Sinabs
88
89
  ------------------------
89
- Checkout the [contributing](https://sinabs.readthedocs.io/en/develop/about/contributing.html) page for more info.
90
+ Checkout the [contributing](https://sinabs.readthedocs.io/develop/about/contributing.html) page for more info.
90
91
 
91
92
 
92
93
  Citation
@@ -49,7 +49,7 @@ Sinabs is published under Apache v2.0. See the LICENSE file for details.
49
49
 
50
50
  Contributing to Sinabs
51
51
  ------------------------
52
- Checkout the [contributing](https://sinabs.readthedocs.io/en/develop/about/contributing.html) page for more info.
52
+ Checkout the [contributing](https://sinabs.readthedocs.io/develop/about/contributing.html) page for more info.
53
53
 
54
54
 
55
55
  Citation
@@ -1,8 +1,8 @@
1
1
  How is Sinabs different?
2
2
  ========================
3
3
 
4
- There are many SNN simulators out there. What does Sinabs do differently? Sinabs is meant to extend PyTorch by adding stateful and spiking layers, which can then take full advantage of the optimised, gradient-based training mechanisms. You might find that the design of our sequential models differs slightly from that of recurrent layers in PyTorch, as we do not pass the state as an input or receive it as output. We do this so that we can be compatible with the nn.Sequential architecture, without having to manually define the flow of tensors between layers.
4
+ There are many SNN simulators out there. What does Sinabs do differently? Sinabs is meant to extend PyTorch by adding stateful and spiking layers, which can then take full advantage of the optimised, gradient-based training mechanisms. You might find that the design of our sequential models differs slightly from that of recurrent layers in PyTorch, as we do not pass the state as an input or receive it as output. We do this so that we can be compatible with the nn.Sequential architecture, without having to manually define the flow of tensors between layers.
5
5
 
6
6
  What is the difference between Rockpool and Sinabs?
7
7
  ---------------------------------------------------
8
- Rockpool provides multiple computational backends such as Jax, Nest or PyTorch and wraps its own api around it! That allows for powerful abstractions and many additional features such as graph tracing, support for continuous time systems and neuromorphic hardware. Sinabs on the other hand focuses on simplicity: Built exclusively on PyTorch, it's meant to be a thin layer that adds support for spiking layers that are not part of PyTorch. Traditionally, Sinabs added support for SynSense's convolutional neuromorphic hardware (Dynap-CNN), while Rockpool focuses on SynSense's hardware for lower-dimensional signals (Xylo). You can read about both hardware architectures [here](https://www.synsense-neuromorphic.com/technology). That means that Sinabs comes with built-in weight transfer functionality which converts a pre-trained ANN to an SNN, because vision models often have strong spatial dependencies. Rockpool on the other hand adds support for analog audio frontends and exact hardware simulation in software.
8
+ Rockpool provides multiple computational backends such as Jax, Nest or PyTorch and wraps its own api around it! That allows for powerful abstractions and many additional features such as graph tracing, support for continuous time systems and neuromorphic hardware. Sinabs on the other hand focuses on simplicity: Built exclusively on PyTorch, it's meant to be a thin layer that adds support for spiking layers that are not part of PyTorch. Traditionally, Sinabs added support for SynSense's convolutional neuromorphic hardware (Dynap-CNN), while Rockpool focuses on SynSense's hardware for lower-dimensional signals (Xylo). You can read about both hardware architectures [here](https://www.synsense.ai/products/speck-2/). That means that Sinabs comes with built-in weight transfer functionality which converts a pre-trained ANN to an SNN, because vision models often have strong spatial dependencies. Rockpool on the other hand adds support for analog audio frontends and exact hardware simulation in software.
@@ -1,5 +1,23 @@
1
1
  # Release notes
2
2
 
3
+ ## Unreleased
4
+
5
+ * Fix NIR export of Conv1d layer which expected an input_shape parameter.
6
+ * Fix broken link on documentation.
7
+ * Fix missing plots on `Neuron Models` gallery.
8
+ * Fix `open_device` function that would thrown an error if an unsupported SynSense device was connected to the computer.
9
+
10
+ ## v3.1.0 (31/10/2025)
11
+
12
+ * Add mapping of non-sequential networks:
13
+ * Now, layers in Sinabs models can receive inputs from and send outputs to multiple layers.
14
+ * Deprecate `chip_layers_ordering` from DynapCNNNetwork. Use `layer2core_map` instead.
15
+ * Deprecate `DynapcnnCompatibleNetwork`. Use `DynapcnnNetwork` instead.
16
+ * Deprecate `merge_conv_bn` from `sinabs.utils`. Use `merge_bn` instead.
17
+ * Fix tests crashing in case an unexpected board was found.
18
+ * Fix tests that were not running for speck2fdevkit.
19
+ * Fix error when initializing sinabs without matplotlib: matplotlib is not a sinabs dependency.
20
+
3
21
  ## v3.0.4 (08/09/2025)
4
22
 
5
23
  * Update sinabs code to be consistent with Python 3.12 and Numpy > 2.0.
@@ -31,7 +31,6 @@ sphinx_gallery_conf = {
31
31
  # "backreferences_dir": "gen_modules/backreferences",
32
32
  "doc_module": ("sinabs",),
33
33
  "download_all_examples": False,
34
- "ignore_pattern": r"utils\.py",
35
34
  }
36
35
 
37
36
  # Napoleon settings
@@ -1,5 +1,5 @@
1
1
  # **CONTACT US**
2
2
 
3
- Are you using Sinabs to train your networks? Do you have access to SynSense hardware and need help figuring something out? Found a bug? It's normally easiest to reach SynSense developers on the [SynSense Discord](https://discord.gg/V6FHBZURkg) server. There we have dedicated channels regarding Sinabs and our hardware.
3
+ Are you using Sinabs to train your networks? Do you have access to SynSense hardware and need help figuring something out? Found a bug? It's normally easiest to reach SynSense developers on the [SynSense Discord](https://discord.gg/9yY3uyJV) server. There we have dedicated channels regarding Sinabs and our hardware.
4
4
 
5
5
  If you don't have Discord, you can also start a discussion on [Github](https://github.com/synsense/sinabs/discussions)!
@@ -1,8 +1,14 @@
1
+ """
2
+ ==============================
3
+ Utility Function
4
+ ==============================
5
+ Utility function for plots in the documentation.
6
+ """
7
+
1
8
  import matplotlib.pyplot as plt
2
9
  import torch
3
10
 
4
11
  import sinabs
5
- import sinabs.layers as sl
6
12
 
7
13
 
8
14
  def plot_evolution(neuron_model: sinabs.layers, input: torch.Tensor):
@@ -28,15 +28,9 @@
28
28
  "\n",
29
29
  "model = nn.Sequential(\n",
30
30
  " nn.Linear(16, 64),\n",
31
- " sl.LIF(\n",
32
- " tau_mem=10.0,\n",
33
- " surrogate_grad_fn=sinabs.activation.SingleExponential()\n",
34
- " ),\n",
31
+ " sl.LIF(tau_mem=10.0, surrogate_grad_fn=sinabs.activation.SingleExponential()),\n",
35
32
  " nn.Linear(64, 4),\n",
36
- " sl.LIF(\n",
37
- " tau_mem=10.0,\n",
38
- " surrogate_grad_fn=sinabs.activation.SingleExponential()\n",
39
- " ),\n",
33
+ " sl.LIF(tau_mem=10.0, surrogate_grad_fn=sinabs.activation.SingleExponential()),\n",
40
34
  ")"
41
35
  ]
42
36
  },
@@ -258,7 +258,9 @@
258
258
  "x = torch.linspace(-0.5, 3.5, 500)\n",
259
259
  "plt.plot(x, sina.Heaviside(window=0.5)(v_mem=x, spike_threshold=1.0), label=\"Heaviside\")\n",
260
260
  "plt.plot(x, sina.MultiGaussian()(v_mem=x, spike_threshold=1.0), label=\"MultiGaussian\")\n",
261
- "plt.plot(x, sina.SingleExponential()(v_mem=x, spike_threshold=1.0), label=\"SingleExponential\")\n",
261
+ "plt.plot(\n",
262
+ " x, sina.SingleExponential()(v_mem=x, spike_threshold=1.0), label=\"SingleExponential\"\n",
263
+ ")\n",
262
264
  "\n",
263
265
  "plt.xlabel(\"Membrane potential v_mem\")\n",
264
266
  "plt.ylabel(\"derivative of output activation\")\n",
@@ -27,6 +27,7 @@
27
27
  "from sinabs import layers as sl\n",
28
28
  "import sinabs.hooks\n",
29
29
  "\n",
30
+ "\n",
30
31
  "# - Define SNN\n",
31
32
  "class SNN(nn.Sequential):\n",
32
33
  " def __init__(self, batch_size):\n",
@@ -46,6 +47,7 @@
46
47
  " sl.UnflattenTime(batch_size=batch_size),\n",
47
48
  " )\n",
48
49
  "\n",
50
+ "\n",
49
51
  "batch_size = 5\n",
50
52
  "snn = SNN(batch_size=batch_size)"
51
53
  ]
@@ -162,7 +164,7 @@
162
164
  ],
163
165
  "source": [
164
166
  "# Forward pass\n",
165
- "rand_input_spikes = (torch.ones((batch_size, 10, 1, 28, 28)) ).float()\n",
167
+ "rand_input_spikes = (torch.ones((batch_size, 10, 1, 28, 28))).float()\n",
166
168
  "snn(rand_input_spikes)\n",
167
169
  "\n",
168
170
  "# Access and print hook data\n",
@@ -170,7 +172,9 @@
170
172
  " if hasattr(layer, \"hook_data\"):\n",
171
173
  " print(f\"Layer {idx}:\")\n",
172
174
  " print(f\"\\tBatch size: {layer.hook_data['batch_size']}\")\n",
173
- " print(f\"\\tShape: {layer.hook_data['neuron_shape']} - {layer.hook_data['num_neurons']} neurons in total\")"
175
+ " print(\n",
176
+ " f\"\\tShape: {layer.hook_data['neuron_shape']} - {layer.hook_data['num_neurons']} neurons in total\"\n",
177
+ " )"
174
178
  ]
175
179
  },
176
180
  {
@@ -49,7 +49,9 @@
49
49
  "from torchvision import datasets, transforms\n",
50
50
  "\n",
51
51
  "to_tensor = transforms.ToTensor()\n",
52
- "mnist_train = datasets.MNIST(\"../tutorials/data\", train=True, transform=lambda x: to_tensor(x)*255)\n",
52
+ "mnist_train = datasets.MNIST(\n",
53
+ " \"../tutorials/data\", train=True, transform=lambda x: to_tensor(x) * 255\n",
54
+ ")\n",
53
55
  "train_loader = DataLoader(mnist_train, batch_size=512, shuffle=True, num_workers=4)"
54
56
  ]
55
57
  },
@@ -162,8 +164,14 @@
162
164
  " output = ann(data)\n",
163
165
  " optim.zero_grad()\n",
164
166
  "\n",
165
- " model_loss = nn.functional.cross_entropy(output, target) \n",
166
- " synops_loss = synops_loss_weight * nn.functional.mse_loss(ann.hook_data[\"total_synops_per_timestep\"], target_synops) / 1e7\n",
167
+ " model_loss = nn.functional.cross_entropy(output, target)\n",
168
+ " synops_loss = (\n",
169
+ " synops_loss_weight\n",
170
+ " * nn.functional.mse_loss(\n",
171
+ " ann.hook_data[\"total_synops_per_timestep\"], target_synops\n",
172
+ " )\n",
173
+ " / 1e7\n",
174
+ " )\n",
167
175
  " loss = model_loss + synops_loss\n",
168
176
  " loss.backward()\n",
169
177
  " optim.step()\n",
@@ -197,14 +205,19 @@
197
205
  "source": [
198
206
  "import pandas as pd\n",
199
207
  "import seaborn as sns\n",
208
+ "\n",
200
209
  "sns.set(style=\"whitegrid\")\n",
201
210
  "\n",
202
- "df = pd.DataFrame({\n",
203
- " 'Loss type': ['Prediction'] * len(model_losses) + ['Synops'] * len(synops_losses),\n",
204
- " 'Loss' : model_losses + synops_losses,\n",
205
- " 'Training step' : list(range(len(model_losses))) + list(range(len(synops_losses)))\n",
206
- "})\n",
207
- "fig = sns.lineplot(data=df, x='Training step', y='Loss', hue='Loss type')\n",
211
+ "df = pd.DataFrame(\n",
212
+ " {\n",
213
+ " \"Loss type\": [\"Prediction\"] * len(model_losses)\n",
214
+ " + [\"Synops\"] * len(synops_losses),\n",
215
+ " \"Loss\": model_losses + synops_losses,\n",
216
+ " \"Training step\": list(range(len(model_losses)))\n",
217
+ " + list(range(len(synops_losses))),\n",
218
+ " }\n",
219
+ ")\n",
220
+ "fig = sns.lineplot(data=df, x=\"Training step\", y=\"Loss\", hue=\"Loss type\")\n",
208
221
  "fig.set_yscale(\"log\");"
209
222
  ]
210
223
  },
@@ -221,7 +234,9 @@
221
234
  "metadata": {},
222
235
  "outputs": [],
223
236
  "source": [
224
- "mnist_test = datasets.MNIST(\"../tutorials/data\", train=False, transform=lambda x: to_tensor(x)*255)\n",
237
+ "mnist_test = datasets.MNIST(\n",
238
+ " \"../tutorials/data\", train=False, transform=lambda x: to_tensor(x) * 255\n",
239
+ ")\n",
225
240
  "test_loader = DataLoader(mnist_test, batch_size=512, num_workers=4)"
226
241
  ]
227
242
  },
@@ -243,7 +258,8 @@
243
258
  ],
244
259
  "source": [
245
260
  "from torchmetrics.classification import MulticlassAccuracy\n",
246
- "metric = MulticlassAccuracy(num_classes=10, average='micro').to(device)\n",
261
+ "\n",
262
+ "metric = MulticlassAccuracy(num_classes=10, average=\"micro\").to(device)\n",
247
263
  "\n",
248
264
  "accuracies = []\n",
249
265
  "for data, targets in iter(test_loader):\n",
@@ -24,6 +24,7 @@
24
24
  "# Seed for reproducibility\n",
25
25
  "torch.manual_seed(1)\n",
26
26
  "\n",
27
+ "\n",
27
28
  "class SNN(nn.Sequential):\n",
28
29
  " def __init__(self, batch_size):\n",
29
30
  " super().__init__(\n",
@@ -42,6 +43,7 @@
42
43
  " sl.UnflattenTime(batch_size=batch_size),\n",
43
44
  " )\n",
44
45
  "\n",
46
+ "\n",
45
47
  "batch_size = 5\n",
46
48
  "snn = SNN(batch_size=batch_size)"
47
49
  ]
@@ -79,7 +81,7 @@
79
81
  " layer.register_forward_hook(firing_rate_per_neuron_hook)\n",
80
82
  "\n",
81
83
  "# Forward pass to record the number of synaptic operations\n",
82
- "rand_input_spikes = (torch.ones((batch_size, 10, 1, 28, 28)) ).float()\n",
84
+ "rand_input_spikes = (torch.ones((batch_size, 10, 1, 28, 28))).float()\n",
83
85
  "snn(rand_input_spikes)\n",
84
86
  "\n",
85
87
  "print(\"Synops after feeding input:\")\n",
@@ -112,7 +114,7 @@
112
114
  ],
113
115
  "source": [
114
116
  "print(\"Per layer:\")\n",
115
- "for layer_idx, synops in snn.hook_data['synops_per_timestep'].items():\n",
117
+ "for layer_idx, synops in snn.hook_data[\"synops_per_timestep\"].items():\n",
116
118
  " print(f\"- Layer {layer_idx}: {synops:.2e} SynOps per step\")"
117
119
  ]
118
120
  },
@@ -210,25 +212,28 @@
210
212
  "source": [
211
213
  "# Helper functions to calculate the SynOp loss for each layer\n",
212
214
  "\n",
215
+ "\n",
213
216
  "def get_synops_squared_error(synops, target_synops):\n",
214
- " \"\"\" SynOps loss term for individual layer:\n",
217
+ " \"\"\"SynOps loss term for individual layer:\n",
215
218
  " Squared error on anything above the target\n",
216
219
  " \"\"\"\n",
217
220
  " # Use ReLU so that only values above taret are punished\n",
218
221
  " above_target = nn.functional.relu(synops - target_synops)\n",
219
222
  " # Return squared error\n",
220
223
  " return above_target**2\n",
221
- " \n",
224
+ "\n",
225
+ "\n",
222
226
  "def get_synops_loss(snn, target_synops):\n",
223
- " \"\"\" Sum of synop losses of individual layers\"\"\"\n",
227
+ " \"\"\"Sum of synop losses of individual layers\"\"\"\n",
224
228
  " loss = sum(\n",
225
229
  " get_synops_squared_error(synops, target_synops)\n",
226
230
  " for synops in snn.hook_data[\"synops_per_timestep\"].values()\n",
227
231
  " )\n",
228
232
  " return loss\n",
229
233
  "\n",
234
+ "\n",
230
235
  "def get_firing_rate_squared_error(firing_rates, target_rate):\n",
231
- " \"\"\" Firing rate loss term for individual layer:\n",
236
+ " \"\"\"Firing rate loss term for individual layer:\n",
232
237
  " Squared error on anything below the target\n",
233
238
  " \"\"\"\n",
234
239
  " # Use ReLU so that only values below target are punished\n",
@@ -236,11 +241,14 @@
236
241
  " # Return sum of squared errors\n",
237
242
  " return (below_target**2).sum()\n",
238
243
  "\n",
244
+ "\n",
239
245
  "def get_firing_rate_loss(snn, target_rate):\n",
240
- " \"\"\" Sum of firing rate losses of individual layers\"\"\"\n",
246
+ " \"\"\"Sum of firing rate losses of individual layers\"\"\"\n",
241
247
  " loss = sum(\n",
242
- " get_firing_rate_squared_error(layer.hook_data[\"firing_rate_per_neuron\"], target_rate)\n",
243
- " for layer in snn \n",
248
+ " get_firing_rate_squared_error(\n",
249
+ " layer.hook_data[\"firing_rate_per_neuron\"], target_rate\n",
250
+ " )\n",
251
+ " for layer in snn\n",
244
252
  " if hasattr(layer, \"hook_data\") and \"firing_rate_per_neuron\" in layer.hook_data\n",
245
253
  " )\n",
246
254
  " return loss"
@@ -256,7 +264,7 @@
256
264
  "target_synops = 1e5\n",
257
265
  "target_firing_rate = 0.5\n",
258
266
  "synops_loss_weight = 0.01\n",
259
- "rate_loss_weight = 1.\n",
267
+ "rate_loss_weight = 1.0\n",
260
268
  "\n",
261
269
  "optim = torch.optim.Adam(snn.parameters(), lr=1e-3)\n",
262
270
  "\n",
@@ -265,9 +273,9 @@
265
273
  " sinabs.reset_states(snn)\n",
266
274
  " sinabs.zero_grad(snn)\n",
267
275
  " optim.zero_grad()\n",
268
- " \n",
276
+ "\n",
269
277
  " snn(rand_input_spikes)\n",
270
- " \n",
278
+ "\n",
271
279
  " synop_loss = synops_loss_weight * get_synops_loss(snn, target_synops)\n",
272
280
  " firing_rate_loss = rate_loss_weight * get_firing_rate_loss(snn, target_firing_rate)\n",
273
281
  " loss = synop_loss + firing_rate_loss\n",
@@ -302,7 +310,7 @@
302
310
  "\n",
303
311
  "for idx, synops in synops_per_layer.items():\n",
304
312
  " ax.plot(synops, label=f\"Layer {idx}\")\n",
305
- "ax.axhline(y=target_synops, color='black', label=\"Target synops\")\n",
313
+ "ax.axhline(y=target_synops, color=\"black\", label=\"Target synops\")\n",
306
314
  "ax.set_ylabel(\"Mean synaptic operations per step\")\n",
307
315
  "ax.legend()\n",
308
316
  "plt.title(\"SynOps during training\");"
@@ -39,25 +39,154 @@ dynapcnn.to(devcie="your device", chip_layers_ordering=[2, 5, 7, 1])
39
39
 
40
40
  ## What network structure can I define?
41
41
 
42
- Currently, `sinabs-dynapcnn` can only parse a `torch.nn.Sequential` like architecture. So it is recommended to
43
- use a `Sequential` like network. We are developing a network graph extraction feature at the present, which will
44
- help the user to deploy their networks with more complex architecture to the devkit.
42
+ `Sinabs` can parse a `torch.nn.Sequential` like architecture, so it is recommended to
43
+ use a `Sequential` like network.
45
44
 
45
+ As of `v3.1.0`, we released a network graph extraction feature that helps users deploy their networks with more complex architectures into the devkit.
46
+ Our `Speck` chip, in fact, supports branched architectures. With the graph extraction feature, we support a range of network structures, as shown below:
46
47
 
47
- ## Can I achieve a "Residual Connection" like ResNet does?
48
48
 
49
- Like mentioned above, "Yes, we can define a residual short-cut on the devkit". However, currently you can only manually
50
- change the `samna.speck2f.configuration.CNNLayerDestination.layer` to achieve this, you can do this if you are very
51
- familiar with the `samna-configuration`. Otherwise,let's wait for a while after the "network graph extraction feature" is
52
- completed.
49
+ Two independent networks:
50
+
51
+ ![Two independent networks](imgs/two-independent-networks.png)
52
+
53
+ Two networks with merging outputs:
54
+
55
+ ![Two networks with merging outputs](imgs/two-networks-merging-output.png)
56
+
57
+ A network with a merge and a split:
53
58
 
59
+ ![A network with a merge and a split](imgs/network-with-merge-and-split.png)
54
60
 
55
- ## What If I Really Want to Use "Residual Connection"!
61
+
62
+ Note: with the graph extracture feature it is possible to implement recurrent neural networks. However, this is not recommended or supported as it can result in deadlock on the chip.
63
+
64
+ ## How to make use of the graph extraction feature?
65
+
66
+ For general architectures, users need to define their classes, by defining at least the `__init__` method with all the layers, as well as an appropriate `forward` method.
67
+
68
+ Here is an example to define a network with a merge and a split:
69
+
70
+ ```python
71
+ import torch.nn as nn
72
+
73
+ from sinabs.activation.surrogate_gradient_fn import PeriodicExponential
74
+ from sinabs.layers import IAFSqueeze, Merge, SumPool2d
75
+
76
+ class SNN(nn.Module):
77
+ def __init__(self, batch_size) -> None:
78
+ super().__init__()
79
+
80
+ self.conv1 = nn.Conv2d(2, 1, 2, 1, bias=False)
81
+ self.iaf1 = IAFSqueeze(
82
+ batch_size=batch_size,
83
+ min_v_mem=-1.0,
84
+ spike_threshold=1.0,
85
+ surrogate_grad_fn=PeriodicExponential(),
86
+ )
87
+
88
+ self.conv2 = nn.Conv2d(1, 1, 2, 1, bias=False)
89
+ self.iaf2 = IAFSqueeze(
90
+ batch_size=batch_size,
91
+ min_v_mem=-1.0,
92
+ spike_threshold=1.0,
93
+ surrogate_grad_fn=PeriodicExponential(),
94
+ )
95
+ self.pool2 = SumPool2d(2, 2)
96
+
97
+ self.conv3 = nn.Conv2d(1, 1, 2, 1, bias=False)
98
+ self.iaf3 = IAFSqueeze(
99
+ batch_size=batch_size,
100
+ min_v_mem=-1.0,
101
+ spike_threshold=1.0,
102
+ surrogate_grad_fn=PeriodicExponential(),
103
+ )
104
+ self.pool3 = SumPool2d(2, 2)
105
+ self.pool3a = SumPool2d(5, 5)
106
+
107
+ self.conv4 = nn.Conv2d(1, 1, 2, 1, bias=False)
108
+ self.iaf4 = IAFSqueeze(
109
+ batch_size=batch_size,
110
+ min_v_mem=-1.0,
111
+ spike_threshold=1.0,
112
+ surrogate_grad_fn=PeriodicExponential(),
113
+ )
114
+ self.pool4 = SumPool2d(3, 3)
115
+
116
+ self.flat1 = nn.Flatten()
117
+ self.flat2 = nn.Flatten()
118
+
119
+ self.conv5 = nn.Conv2d(1, 1, 2, 1, bias=False)
120
+ self.iaf5 = IAFSqueeze(
121
+ batch_size=batch_size,
122
+ min_v_mem=-1.0,
123
+ spike_threshold=1.0,
124
+ surrogate_grad_fn=PeriodicExponential(),
125
+ )
126
+
127
+ self.fc2 = nn.Linear(25, 10, bias=False)
128
+ self.iaf2_fc = IAFSqueeze(
129
+ batch_size=batch_size,
130
+ min_v_mem=-1.0,
131
+ spike_threshold=1.0,
132
+ surrogate_grad_fn=PeriodicExponential(),
133
+ )
134
+
135
+ # -- merges --
136
+ self.merge1 = Merge()
137
+ self.merge2 = Merge()
138
+
139
+ def forward(self, x):
140
+ # conv 1 - A/0
141
+ con1_out = self.conv1(x)
142
+ iaf1_out = self.iaf1(con1_out)
143
+
144
+ # conv 2 - B/1
145
+ conv2_out = self.conv2(iaf1_out)
146
+ iaf2_out = self.iaf2(conv2_out)
147
+ pool2_out = self.pool2(iaf2_out)
148
+
149
+ # conv 3 - C/2
150
+ conv3_out = self.conv3(iaf1_out)
151
+ iaf3_out = self.iaf3(conv3_out)
152
+ pool3_out = self.pool3(iaf3_out)
153
+ pool3a_out = self.pool3a(iaf3_out)
154
+
155
+ # conv 4 - D/3
156
+ merge1_out = self.merge1(pool2_out, pool3_out)
157
+ conv4_out = self.conv4(merge1_out)
158
+ iaf4_out = self.iaf4(conv4_out)
159
+ pool4_out = self.pool4(iaf4_out)
160
+ flat1_out = self.flat1(pool4_out)
161
+
162
+ # conv 5 - E/4
163
+ conv5_out = self.conv5(pool3a_out)
164
+ iaf5_out = self.iaf5(conv5_out)
165
+ flat2_out = self.flat2(iaf5_out)
166
+
167
+ # fc 2 - F/5
168
+ merge2_out = self.merge2(flat2_out, flat1_out)
169
+
170
+ fc2_out = self.fc2(merge2_out)
171
+ iaf2_fc_out = self.iaf2_fc(fc2_out)
172
+
173
+ return iaf2_fc_out
174
+ ```
175
+
176
+ ## Can I achieve a "Residual Connection" like ResNet does?
177
+
178
+ Like mentioned above, "Yes, we can define a residual short-cut on the devkit". You can manually
179
+ change the `samna.speck2f.configuration.CNNLayerDestination.layer` to achieve this, if you are very
180
+ familiar with the `samna-configuration`.
181
+ You can also make use of our network graph extraction feature, to implement residual networks.
182
+
183
+ ## How to use "Residual Connection" manually?
56
184
 
57
185
  Alright! Here I will give an example of achieving the "Residual Connection" by manually modify the `samna-configuration`.
58
186
 
59
187
  Let's say you want an architecture like below:
60
188
 
189
+
61
190
  ```python
62
191
  from torch import nn
63
192
  from sinabs.layers import IAFSqueeze
@@ -92,7 +221,7 @@ class ResidualBlock(nn.Module):
92
221
 
93
222
  ```
94
223
 
95
- Since currently sinabs-dynapcnn can only parse Sequential like network, we need to some tedious work like below:
224
+ Since currently Sinabs can only parse Sequential like network, we need to do some tedious work like below:
96
225
 
97
226
  ```python
98
227
  # define a Sequential first
@@ -136,7 +265,7 @@ devkit.get_model().apply_configuration(samna_cfg)
136
265
  ```
137
266
 
138
267
  I have to say it is not an elegant solution though, it should help you to achieve an initial Residual Block. We will
139
- improve this part after sinabs-dynapcnn has the ability for extracting model's graph.
268
+ improve this part after Sinabs has the ability for extracting model's graph.
140
269
 
141
270
  ## What execution order should I be aware of when I am implementing a sequential structure?
142
271
  You should be aware with the internal layer order.
@@ -54,7 +54,7 @@ import samna
54
54
  speck = samna.device.open_device("Speck2fModuleDevKit")
55
55
  ```
56
56
 
57
- All devices that supported by samna can be found at [here.](https://synsense-sys-int.gitlab.io/samna/install.html#discover-supported-devices)
57
+ All devices that supported by samna can be found at [here.](https://synsense-sys-int.gitlab.io/samna/0.48.0/install.html#supported-devices)
58
58
 
59
59
  ## How Do I Reset The Device
60
60
 
@@ -104,7 +104,7 @@ for synops_per_layer, limit in zip(synops_of_every_layer, synops_upper_limit):
104
104
  synops_loss += torch.nn.functional.relu(residual)
105
105
  ```
106
106
  4. Switch on the
107
- ["decimator"](https://synsense-sys-int.gitlab.io/samna/reference/speck2e/configuration/index.html?highlight=decimation#samna.speck2e.configuration.CnnLayerConfig.output_decimator_enable)
107
+ ["decimator"](https://synsense-sys-int.gitlab.io/samna/0.48.0/reference/speck2e/configuration/index.html#samna.speck2e.configuration.CnnLayerConfig.output_decimator_enable)
108
108
  on your DynapCNN Core. In the devkit, each DynapCNN Core is equipped with a decimator block at its data path output.
109
109
  The decimator block enables the user to reduce the spike rate at the output of a DynapCNN Core. By default,
110
110
  the decimator is disabled, and the code below shows an example of enabling the decimator of the chip by modifying
@@ -126,7 +126,7 @@ samna_cfg.cnn_layers[layer_idx].output_decimator_enable = True
126
126
  samna_cfg.cnn_layers[layer_idx].output_decimator_interval = 0b001
127
127
  ```
128
128
 
129
- more details about the "decimator" can be found [here.](https://synsense-sys-int.gitlab.io/samna/reference/speck2f/configuration/index.html?highlight=output_decimator#samna.speck2f.configuration.CnnLayerConfig.output_decimator_interval)
129
+ more details about the "decimator" can be found [here.](https://synsense-sys-int.gitlab.io/samna/0.48.0/reference/speck2e/configuration/index.html#samna.speck2e.configuration.CnnLayerConfig.output_decimator_interval)
130
130
 
131
131
  ## The Reset Mechanism Of Neuron Membrane Potential
132
132
  Our devkit provides two types of reset mechanism for the spiking neuron's membrane potential.
@@ -141,7 +141,7 @@ If you use an ANN-to-SNN conversion, then you should choose the second one strat
141
141
  If you train an SNN with a "reset to 0" strategy, then you should choose the first one strategy.
142
142
 
143
143
  The `samna configuration` for each layer has a boolean attribute called
144
- ["return_to_zero"](https://synsense-sys-int.gitlab.io/samna/reference/speck2f/configuration/index.html?highlight=return_to_zero#samna.speck2f.configuration.CnnLayerConfig.return_to_zero).
144
+ ["return_to_zero"](https://synsense-sys-int.gitlab.io/samna/0.48.0/reference/speck2e/configuration/index.html#samna.speck2e.configuration.CnnLayerConfig.return_to_zero).
145
145
  - If it is set to be `True`, then the hardware execute the first resetting strategy.
146
146
  - If it is set to be `False`, the hardware execute the second strategy.
147
147
 
@@ -100,9 +100,11 @@
100
100
  " ev = samna.speck2f.event.ReadNeuronValue()\n",
101
101
  " ev.layer = 0\n",
102
102
  " # output feature map size is the same as the input shape\n",
103
- " ev.address = calculate_neuron_address(x=x, y=y, c=0, feature_map_size=input_shape)\n",
103
+ " ev.address = calculate_neuron_address(\n",
104
+ " x=x, y=y, c=0, feature_map_size=input_shape\n",
105
+ " )\n",
104
106
  " input_events.append(ev)\n",
105
- " \n",
107
+ "\n",
106
108
  "input_events"
107
109
  ]
108
110
  },
@@ -223,9 +225,11 @@
223
225
  "source": [
224
226
  "# Deploy the snn to speck devkit\n",
225
227
  "\n",
226
- "dynapcnn = DynapcnnNetwork(snn=snn, discretize=True, dvs_input=False, input_shape=input_shape)\n",
228
+ "dynapcnn = DynapcnnNetwork(\n",
229
+ " snn=snn, discretize=True, dvs_input=False, input_shape=input_shape\n",
230
+ ")\n",
227
231
  "# don't forget to set the slow clock frequency!\n",
228
- "# here we set the frequency to 1Hz, which mean the Vmem should decrease after every 1 second \n",
232
+ "# here we set the frequency to 1Hz, which mean the Vmem should decrease after every 1 second\n",
229
233
  "dynapcnn.to(device=\"speck2fmodule\", slow_clk_frequency=1)\n",
230
234
  "\n",
231
235
  "# Check if neuron states decrease along with time pass by\n",
@@ -236,7 +240,7 @@
236
240
  " # write input\n",
237
241
  " dynapcnn.samna_input_buffer.write(input_events)\n",
238
242
  " time.sleep(0.5)\n",
239
- " print(f'----After {0.5 * iter_times} seconds:----')\n",
243
+ " print(f\"----After {0.5 * iter_times} seconds:----\")\n",
240
244
  " # get outputs\n",
241
245
  " output_events = dynapcnn.samna_output_buffer.get_events()\n",
242
246
  "\n",
@@ -244,8 +248,7 @@
244
248
  " c, x, y = neuron_address_to_cxy(out_ev.address, feature_map_size=input_shape)\n",
245
249
  " pre_neuron_state = neuron_states.get((c, x, y), 127)\n",
246
250
  " neuron_states.update({(c, x, y): out_ev.neuron_state})\n",
247
- " print(f\"c:{c}, x:{x}, y:{y}, vmem:{out_ev.neuron_state}\")\n",
248
- " "
251
+ " print(f\"c:{c}, x:{x}, y:{y}, vmem:{out_ev.neuron_state}\")"
249
252
  ]
250
253
  },
251
254
  {