sinabs 3.1.0__tar.gz → 3.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (317) hide show
  1. {sinabs-3.1.0 → sinabs-3.1.1}/PKG-INFO +2 -2
  2. {sinabs-3.1.0 → sinabs-3.1.1}/README.md +1 -1
  3. {sinabs-3.1.0 → sinabs-3.1.1}/docs/about/release_notes.md +11 -4
  4. {sinabs-3.1.0 → sinabs-3.1.1}/docs/conf.py +0 -1
  5. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/layers/utils.py +7 -1
  6. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/notebooks/nmnist_quick_start.ipynb +142 -48
  7. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/notebooks/using_readout_layer.ipynb +1 -1
  8. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/io.py +4 -1
  9. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/nir.py +4 -3
  10. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs.egg-info/PKG-INFO +2 -2
  11. sinabs-3.1.1/sinabs.egg-info/pbr.json +1 -0
  12. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/hw_utils.py +7 -4
  13. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_visualizer.py +25 -22
  14. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_nir.py +15 -0
  15. sinabs-3.1.0/sinabs.egg-info/pbr.json +0 -1
  16. {sinabs-3.1.0 → sinabs-3.1.1}/.coveragerc +0 -0
  17. {sinabs-3.1.0 → sinabs-3.1.1}/.github/workflows/ci-pipeline.yml +0 -0
  18. {sinabs-3.1.0 → sinabs-3.1.1}/.pre-commit-config.yaml +0 -0
  19. {sinabs-3.1.0 → sinabs-3.1.1}/.readthedocs.yaml +0 -0
  20. {sinabs-3.1.0 → sinabs-3.1.1}/AUTHORS +0 -0
  21. {sinabs-3.1.0 → sinabs-3.1.1}/CITATION.cff +0 -0
  22. {sinabs-3.1.0 → sinabs-3.1.1}/LICENSE +0 -0
  23. {sinabs-3.1.0 → sinabs-3.1.1}/codecov.yml +0 -0
  24. {sinabs-3.1.0 → sinabs-3.1.1}/docs/Makefile +0 -0
  25. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/Overview/dataflow_layers.png +0 -0
  26. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/Overview/event_preprocessing_pipeline.png +0 -0
  27. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/Overview/memory_constraints.png +0 -0
  28. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/Overview/sinabs-dynapcnn-role.png +0 -0
  29. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/Overview/speck_dynapcnn.png +0 -0
  30. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/Overview/speck_top_level.png +0 -0
  31. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/devkits_images/dynapcnn_devkit.png +0 -0
  32. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/devkits_images/speck_devkit.png +0 -0
  33. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/devkits_images/speck_module.png +0 -0
  34. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/devkits_images/speck_module_devkit.png +0 -0
  35. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/nmnist_quick_start/dvs_input_flow.png +0 -0
  36. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/nmnist_quick_start/dynapcnn_visualizer.png +0 -0
  37. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/nmnist_quick_start/spike_input_flow.png +0 -0
  38. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/power_monitoring/dynamic_power_samna_graph.png +0 -0
  39. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/power_monitoring/idle_power_samna_graph.png +0 -0
  40. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/power_monitoring/power_plot.png +0 -0
  41. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/sinabs-logo-lowercase-whitebg.png +0 -0
  42. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/sinabs-logo-lowercase.png +0 -0
  43. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/tips_for_training/exceeding_bandwidth.png +0 -0
  44. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/using_readout_layer/handcraft_weights.png +0 -0
  45. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/using_readout_layer/neuron_id_mismatch.png +0 -0
  46. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/using_readout_layer/readout_layer.png +0 -0
  47. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/using_readout_layer/samna_graph.png +0 -0
  48. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/visualize_speck_dvs/samna_graph.png +0 -0
  49. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/visualize_spike_count/samna_graph.png +0 -0
  50. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_static/visualize_spike_count/spike_count.png +0 -0
  51. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_templates/class_activation.rst +0 -0
  52. {sinabs-3.1.0 → sinabs-3.1.1}/docs/_templates/class_layer.rst +0 -0
  53. {sinabs-3.1.0 → sinabs-3.1.1}/docs/about/about.rst +0 -0
  54. {sinabs-3.1.0 → sinabs-3.1.1}/docs/about/contributing.md +0 -0
  55. {sinabs-3.1.0 → sinabs-3.1.1}/docs/about/differences.md +0 -0
  56. {sinabs-3.1.0 → sinabs-3.1.1}/docs/about/info.md +0 -0
  57. {sinabs-3.1.0 → sinabs-3.1.1}/docs/api/activation.rst +0 -0
  58. {sinabs-3.1.0 → sinabs-3.1.1}/docs/api/api.rst +0 -0
  59. {sinabs-3.1.0 → sinabs-3.1.1}/docs/api/from_torch.rst +0 -0
  60. {sinabs-3.1.0 → sinabs-3.1.1}/docs/api/hooks.rst +0 -0
  61. {sinabs-3.1.0 → sinabs-3.1.1}/docs/api/layers.rst +0 -0
  62. {sinabs-3.1.0 → sinabs-3.1.1}/docs/api/network.rst +0 -0
  63. {sinabs-3.1.0 → sinabs-3.1.1}/docs/api/nir.rst +0 -0
  64. {sinabs-3.1.0 → sinabs-3.1.1}/docs/api/synopcounter.rst +0 -0
  65. {sinabs-3.1.0 → sinabs-3.1.1}/docs/api/utils.rst +0 -0
  66. {sinabs-3.1.0 → sinabs-3.1.1}/docs/contact.md +0 -0
  67. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/README.rst +0 -0
  68. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/layers/README.rst +0 -0
  69. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/layers/plot_alif.py +0 -0
  70. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/layers/plot_exp_leaky.py +0 -0
  71. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/layers/plot_iaf.py +0 -0
  72. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/layers/plot_lif.py +0 -0
  73. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/spike_fns/README.rst +0 -0
  74. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/spike_fns/plot_maxspike.py +0 -0
  75. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/spike_fns/plot_multispike.py +0 -0
  76. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/spike_fns/plot_singlespike.py +0 -0
  77. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/surrogate_grad_fns/README.rst +0 -0
  78. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/surrogate_grad_fns/plot_gaussian.py +0 -0
  79. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/surrogate_grad_fns/plot_heaviside.py +0 -0
  80. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/surrogate_grad_fns/plot_multigaussian.py +0 -0
  81. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/surrogate_grad_fns/plot_periodicexponential.py +0 -0
  82. {sinabs-3.1.0 → sinabs-3.1.1}/docs/gallery/surrogate_grad_fns/plot_singleexponential.py +0 -0
  83. {sinabs-3.1.0 → sinabs-3.1.1}/docs/getting_started/fundamentals.rst +0 -0
  84. {sinabs-3.1.0 → sinabs-3.1.1}/docs/getting_started/getting_started.rst +0 -0
  85. {sinabs-3.1.0 → sinabs-3.1.1}/docs/getting_started/iaf_neuron_model.ipynb +0 -0
  86. {sinabs-3.1.0 → sinabs-3.1.1}/docs/getting_started/install.rst +0 -0
  87. {sinabs-3.1.0 → sinabs-3.1.1}/docs/getting_started/python_pyenv_pipenv.rst +0 -0
  88. {sinabs-3.1.0 → sinabs-3.1.1}/docs/getting_started/quickstart.ipynb +0 -0
  89. {sinabs-3.1.0 → sinabs-3.1.1}/docs/how_tos/activations.ipynb +0 -0
  90. {sinabs-3.1.0 → sinabs-3.1.1}/docs/how_tos/custom_hooks.ipynb +0 -0
  91. {sinabs-3.1.0 → sinabs-3.1.1}/docs/how_tos/how_tos.rst +0 -0
  92. {sinabs-3.1.0 → sinabs-3.1.1}/docs/how_tos/synops_loss_ann.ipynb +0 -0
  93. {sinabs-3.1.0 → sinabs-3.1.1}/docs/how_tos/synops_loss_snn.ipynb +0 -0
  94. {sinabs-3.1.0 → sinabs-3.1.1}/docs/index.md +0 -0
  95. {sinabs-3.1.0 → sinabs-3.1.1}/docs/make.bat +0 -0
  96. {sinabs-3.1.0 → sinabs-3.1.1}/docs/plugins/plugins.rst +0 -0
  97. {sinabs-3.1.0 → sinabs-3.1.1}/docs/requirements.txt +0 -0
  98. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/advanced_concepts.rst +0 -0
  99. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/chip_factory.rst +0 -0
  100. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/config_builder.rst +0 -0
  101. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/crop2d.rst +0 -0
  102. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/discretize.rst +0 -0
  103. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/dvs_layer.rst +0 -0
  104. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/dynapcnn.rst +0 -0
  105. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/dynapcnn_layer.rst +0 -0
  106. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/dynapcnn_network.rst +0 -0
  107. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/dynapcnn_visualizer.rst +0 -0
  108. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/exceptions.rst +0 -0
  109. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/flipdims.rst +0 -0
  110. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/io.rst +0 -0
  111. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/mapping.rst +0 -0
  112. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/specksim.rst +0 -0
  113. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/api/dynapcnn/utils.rst +0 -0
  114. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/dangers.md +0 -0
  115. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/faqs/add_new_device.md +0 -0
  116. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/faqs/available_algorithmic_operation.md +0 -0
  117. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/faqs/available_network_arch.md +0 -0
  118. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/faqs/chip_errata.md +0 -0
  119. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/faqs/device_management.md +0 -0
  120. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/faqs/imgs/network-with-merge-and-split.png +0 -0
  121. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/faqs/imgs/two-independent-networks.png +0 -0
  122. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/faqs/imgs/two-networks-merging-output.png +0 -0
  123. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/faqs/index.rst +0 -0
  124. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/faqs/output_monitoring.md +0 -0
  125. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/faqs/save_hardware_config_as_binary.md +0 -0
  126. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/faqs/tips_for_training.md +0 -0
  127. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/index.rst +0 -0
  128. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/notebooks/leak_neuron.ipynb +0 -0
  129. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/notebooks/play_with_speck_dvs.ipynb +0 -0
  130. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/notebooks/power_monitoring.ipynb +0 -0
  131. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/notebooks/visualize_speck_dvs_input.ipynb +0 -0
  132. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/notebooks/visualize_spike_count.ipynb +0 -0
  133. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/overview.md +0 -0
  134. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/specksim.md +0 -0
  135. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/the_basics.md +0 -0
  136. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/tutorials.rst +0 -0
  137. {sinabs-3.1.0 → sinabs-3.1.1}/docs/speck/visualizer.md +0 -0
  138. {sinabs-3.1.0 → sinabs-3.1.1}/docs/tutorials/LeNet_5_EngChinese.ipynb +0 -0
  139. {sinabs-3.1.0 → sinabs-3.1.1}/docs/tutorials/bptt.ipynb +0 -0
  140. {sinabs-3.1.0 → sinabs-3.1.1}/docs/tutorials/nir_to_speck.ipynb +0 -0
  141. {sinabs-3.1.0 → sinabs-3.1.1}/docs/tutorials/nmnist.ipynb +0 -0
  142. {sinabs-3.1.0 → sinabs-3.1.1}/docs/tutorials/scnn_mnist.nir +0 -0
  143. {sinabs-3.1.0 → sinabs-3.1.1}/docs/tutorials/tutorials.rst +0 -0
  144. {sinabs-3.1.0 → sinabs-3.1.1}/docs/tutorials/weight_scaling.md +0 -0
  145. {sinabs-3.1.0 → sinabs-3.1.1}/docs/tutorials/weight_transfer_mnist.ipynb +0 -0
  146. {sinabs-3.1.0 → sinabs-3.1.1}/examples/dynapcnn_network/snn_DVSLayer_given.ipynb +0 -0
  147. {sinabs-3.1.0 → sinabs-3.1.1}/examples/dynapcnn_network/snn_DVSLayer_given_followed_by_pool.ipynb +0 -0
  148. {sinabs-3.1.0 → sinabs-3.1.1}/examples/dynapcnn_network/snn_deployment.ipynb +0 -0
  149. {sinabs-3.1.0 → sinabs-3.1.1}/examples/dynapcnn_network/snn_need_create_DVSLayer.ipynb +0 -0
  150. {sinabs-3.1.0 → sinabs-3.1.1}/examples/dynapcnn_network/snn_no_DVSLayer.ipynb +0 -0
  151. {sinabs-3.1.0 → sinabs-3.1.1}/examples/dynapcnn_network/snn_with_batchnorm.ipynb +0 -0
  152. {sinabs-3.1.0 → sinabs-3.1.1}/examples/dynapcnn_network/snn_with_multiple_batchnorm.ipynb +0 -0
  153. {sinabs-3.1.0 → sinabs-3.1.1}/examples/mnist/dynapcnn_network.py +0 -0
  154. {sinabs-3.1.0 → sinabs-3.1.1}/examples/mnist/mnist_params.pt +0 -0
  155. {sinabs-3.1.0 → sinabs-3.1.1}/examples/mnist/specksim_network.py +0 -0
  156. {sinabs-3.1.0 → sinabs-3.1.1}/examples/visualizer/dvs_gesture_params.pt +0 -0
  157. {sinabs-3.1.0 → sinabs-3.1.1}/examples/visualizer/gesture_viz.py +0 -0
  158. {sinabs-3.1.0 → sinabs-3.1.1}/examples/visualizer/icons/01_armroll.png +0 -0
  159. {sinabs-3.1.0 → sinabs-3.1.1}/examples/visualizer/icons/02_handclap.png +0 -0
  160. {sinabs-3.1.0 → sinabs-3.1.1}/examples/visualizer/icons/03_lefthandclockwise.png +0 -0
  161. {sinabs-3.1.0 → sinabs-3.1.1}/examples/visualizer/icons/04_lefthandcounterclockwise.png +0 -0
  162. {sinabs-3.1.0 → sinabs-3.1.1}/examples/visualizer/icons/05_lefthandwave.png +0 -0
  163. {sinabs-3.1.0 → sinabs-3.1.1}/examples/visualizer/icons/06_righthandwave.png +0 -0
  164. {sinabs-3.1.0 → sinabs-3.1.1}/examples/visualizer/icons/07_righthandclockwise.png +0 -0
  165. {sinabs-3.1.0 → sinabs-3.1.1}/examples/visualizer/icons/08_righthandcounterclockwise.png +0 -0
  166. {sinabs-3.1.0 → sinabs-3.1.1}/examples/visualizer/icons/09_airdrums.png +0 -0
  167. {sinabs-3.1.0 → sinabs-3.1.1}/examples/visualizer/icons/10_airguitar.png +0 -0
  168. {sinabs-3.1.0 → sinabs-3.1.1}/examples/visualizer/icons/11_other.png +0 -0
  169. {sinabs-3.1.0 → sinabs-3.1.1}/jupyterlab-requirements.txt +0 -0
  170. {sinabs-3.1.0 → sinabs-3.1.1}/pull_request_template.md +0 -0
  171. {sinabs-3.1.0 → sinabs-3.1.1}/requirements.txt +0 -0
  172. {sinabs-3.1.0 → sinabs-3.1.1}/setup.cfg +0 -0
  173. {sinabs-3.1.0 → sinabs-3.1.1}/setup.py +0 -0
  174. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/__init__.py +0 -0
  175. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/activation/__init__.py +0 -0
  176. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/activation/quantize.py +0 -0
  177. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/activation/reset_mechanism.py +0 -0
  178. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/activation/spike_generation.py +0 -0
  179. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/activation/surrogate_gradient_fn.py +0 -0
  180. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/__init__.py +0 -0
  181. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/__init__.py +0 -0
  182. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/chip_factory.py +0 -0
  183. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/chips/__init__.py +0 -0
  184. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/chips/dynapcnn.py +0 -0
  185. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/chips/speck2e.py +0 -0
  186. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/chips/speck2f.py +0 -0
  187. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/config_builder.py +0 -0
  188. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/connectivity_specs.py +0 -0
  189. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/crop2d.py +0 -0
  190. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/discretize.py +0 -0
  191. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/dvs_layer.py +0 -0
  192. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/dynapcnn_layer.py +0 -0
  193. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/dynapcnn_layer_utils.py +0 -0
  194. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/dynapcnn_network.py +0 -0
  195. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/dynapcnn_visualizer.py +0 -0
  196. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/dynapcnnnetwork_module.py +0 -0
  197. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/exceptions.py +0 -0
  198. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/flipdims.py +0 -0
  199. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/mapping.py +0 -0
  200. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/nir_graph_extractor.py +0 -0
  201. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/sinabs_edges_handler.py +0 -0
  202. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/specksim.py +0 -0
  203. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/utils.py +0 -0
  204. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/backend/dynapcnn/weight_rescaling_methods.py +0 -0
  205. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/cnnutils.py +0 -0
  206. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/conversion.py +0 -0
  207. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/from_torch.py +0 -0
  208. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/hooks.py +0 -0
  209. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/__init__.py +0 -0
  210. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/alif.py +0 -0
  211. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/channel_shift.py +0 -0
  212. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/crop2d.py +0 -0
  213. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/exp_leak.py +0 -0
  214. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/functional/__init__.py +0 -0
  215. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/functional/alif.py +0 -0
  216. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/functional/lif.py +0 -0
  217. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/iaf.py +0 -0
  218. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/lif.py +0 -0
  219. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/merge.py +0 -0
  220. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/neuromorphic_relu.py +0 -0
  221. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/pool2d.py +0 -0
  222. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/quantize.py +0 -0
  223. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/reshape.py +0 -0
  224. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/stateful_layer.py +0 -0
  225. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/layers/to_spike.py +0 -0
  226. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/network.py +0 -0
  227. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/synopcounter.py +0 -0
  228. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/utils.py +0 -0
  229. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs/validate_memory_speck.py +0 -0
  230. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs.egg-info/SOURCES.txt +0 -0
  231. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs.egg-info/dependency_links.txt +0 -0
  232. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs.egg-info/not-zip-safe +0 -0
  233. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs.egg-info/requires.txt +0 -0
  234. {sinabs-3.1.0 → sinabs-3.1.1}/sinabs.egg-info/top_level.txt +0 -0
  235. {sinabs-3.1.0 → sinabs-3.1.1}/tests/__init__.py +0 -0
  236. {sinabs-3.1.0 → sinabs-3.1.1}/tests/inputs_and_results/hooks/conv_input.pth +0 -0
  237. {sinabs-3.1.0 → sinabs-3.1.1}/tests/inputs_and_results/hooks/conv_layer_synops.pth +0 -0
  238. {sinabs-3.1.0 → sinabs-3.1.1}/tests/inputs_and_results/hooks/firing_rates.pth +0 -0
  239. {sinabs-3.1.0 → sinabs-3.1.1}/tests/inputs_and_results/hooks/firing_rates_per_neuron.pth +0 -0
  240. {sinabs-3.1.0 → sinabs-3.1.1}/tests/inputs_and_results/hooks/input_diffs.pth +0 -0
  241. {sinabs-3.1.0 → sinabs-3.1.1}/tests/inputs_and_results/hooks/model_synops.pth +0 -0
  242. {sinabs-3.1.0 → sinabs-3.1.1}/tests/mnist_params.pt +0 -0
  243. {sinabs-3.1.0 → sinabs-3.1.1}/tests/models/README.txt +0 -0
  244. {sinabs-3.1.0 → sinabs-3.1.1}/tests/models/synop_hook_model.pth +0 -0
  245. {sinabs-3.1.0 → sinabs-3.1.1}/tests/requirements.txt +0 -0
  246. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_activations.py +0 -0
  247. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_batch_mismatch.py +0 -0
  248. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_batch_size_update.py +0 -0
  249. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_conversion.py +0 -0
  250. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_copy.py +0 -0
  251. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/custom_jit_filters.py +0 -0
  252. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_auto_mapping.py +0 -0
  253. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_compatible_layer_build.py +0 -0
  254. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_config_making.py +0 -0
  255. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_device_movement.py +0 -0
  256. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_device_name_mapping.py +0 -0
  257. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_discover_device.py +0 -0
  258. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_discretized.py +0 -0
  259. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_doorbell.py +0 -0
  260. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_dvs_input.py +0 -0
  261. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_dvs_layer.py +0 -0
  262. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_event_conversion.py +0 -0
  263. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_individual_cases.py +0 -0
  264. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_large_net.py +0 -0
  265. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_learning.py +0 -0
  266. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_monitoring.py +0 -0
  267. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_neuron_leak.py +0 -0
  268. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_single_neuron_hardware.py +0 -0
  269. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnn/test_speck2e.py +0 -0
  270. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnlayer/__init__.py +0 -0
  271. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnlayer/conftest_dynapcnnlayer.py +0 -0
  272. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnlayer/model_dummy_1.py +0 -0
  273. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnlayer/model_dummy_2.py +0 -0
  274. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnlayer/model_dummy_3.py +0 -0
  275. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnlayer/model_dummy_4.py +0 -0
  276. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnlayer/test_dynapcnnlayer.py +0 -0
  277. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnnetwork/__init__.py +0 -0
  278. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnnetwork/conftest_dynapcnnnetwork.py +0 -0
  279. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnnetwork/model_dummy_1.py +0 -0
  280. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnnetwork/model_dummy_2.py +0 -0
  281. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnnetwork/model_dummy_3.py +0 -0
  282. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnnetwork/model_dummy_4.py +0 -0
  283. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnnetwork/model_dummy_seq.py +0 -0
  284. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py +0 -0
  285. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_dynapcnnnetwork/test_failcases.py +0 -0
  286. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_from_model.py +0 -0
  287. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_graph_extractor/conftest_graph_extractor.py +0 -0
  288. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_graph_extractor/model_dummy_1.py +0 -0
  289. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_graph_extractor/model_dummy_2.py +0 -0
  290. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_graph_extractor/model_dummy_3.py +0 -0
  291. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_graph_extractor/model_dummy_4.py +0 -0
  292. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_graph_extractor/test_graph_extractor.py +0 -0
  293. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_hooks.py +0 -0
  294. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_layers/test_alif.py +0 -0
  295. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_layers/test_channelshift.py +0 -0
  296. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_layers/test_crop2d.py +0 -0
  297. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_layers/test_exp_leak.py +0 -0
  298. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_layers/test_iaf.py +0 -0
  299. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_layers/test_img2spk.py +0 -0
  300. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_layers/test_lif.py +0 -0
  301. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_layers/test_maxpooling.py +0 -0
  302. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_layers/test_merge.py +0 -0
  303. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_layers/test_neuromorphic_relu.py +0 -0
  304. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_layers/test_reshaping.py +0 -0
  305. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_layers/test_sig2spk.py +0 -0
  306. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_layers/test_stateful_layer.py +0 -0
  307. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_network_class.py +0 -0
  308. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_normalize_weights.py +0 -0
  309. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_onnx.py +0 -0
  310. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_quantize.py +0 -0
  311. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_specksim/test_specksim_bindings.py +0 -0
  312. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_specksim/test_specksim_conversion.py +0 -0
  313. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_specksim/test_specksim_network.py +0 -0
  314. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_surrogate_gradients.py +0 -0
  315. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_synops_counter.py +0 -0
  316. {sinabs-3.1.0 → sinabs-3.1.1}/tests/test_utils.py +0 -0
  317. {sinabs-3.1.0 → sinabs-3.1.1}/tests/weights/README.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sinabs
3
- Version: 3.1.0
3
+ Version: 3.1.1
4
4
  Summary: SynSense Spiking Neural Network simulator for deep neural networks (DNNs).
5
5
  Author: SynSense (formerly AiCTX)
6
6
  Author-email: support@synsense.ai
@@ -87,7 +87,7 @@ Sinabs is published under Apache v2.0. See the LICENSE file for details.
87
87
 
88
88
  Contributing to Sinabs
89
89
  ------------------------
90
- Checkout the [contributing](https://sinabs.readthedocs.io/en/develop/about/contributing.html) page for more info.
90
+ Checkout the [contributing](https://sinabs.readthedocs.io/develop/about/contributing.html) page for more info.
91
91
 
92
92
 
93
93
  Citation
@@ -49,7 +49,7 @@ Sinabs is published under Apache v2.0. See the LICENSE file for details.
49
49
 
50
50
  Contributing to Sinabs
51
51
  ------------------------
52
- Checkout the [contributing](https://sinabs.readthedocs.io/en/develop/about/contributing.html) page for more info.
52
+ Checkout the [contributing](https://sinabs.readthedocs.io/develop/about/contributing.html) page for more info.
53
53
 
54
54
 
55
55
  Citation
@@ -1,15 +1,22 @@
1
1
  # Release notes
2
2
 
3
+ ## Unreleased
4
+
5
+ * Fix NIR export of Conv1d layer which expected an input_shape parameter.
6
+ * Fix broken link on documentation.
7
+ * Fix missing plots on `Neuron Models` gallery.
8
+ * Fix `open_device` function that would thrown an error if an unsupported SynSense device was connected to the computer.
9
+
3
10
  ## v3.1.0 (31/10/2025)
4
11
 
5
- * Fix tests crashing in case an unexpected board was found.
6
- * Fix tests that were not running for speck2fdevkit.
7
- * Fix error when initializing sinabs without matplotlib: matplotlib is not a sinabs dependency.
8
- * Add mapping of non-sequential networks
12
+ * Add mapping of non-sequential networks:
9
13
  * Now, layers in Sinabs models can receive inputs from and send outputs to multiple layers.
10
14
  * Deprecate `chip_layers_ordering` from DynapCNNNetwork. Use `layer2core_map` instead.
11
15
  * Deprecate `DynapcnnCompatibleNetwork`. Use `DynapcnnNetwork` instead.
12
16
  * Deprecate `merge_conv_bn` from `sinabs.utils`. Use `merge_bn` instead.
17
+ * Fix tests crashing in case an unexpected board was found.
18
+ * Fix tests that were not running for speck2fdevkit.
19
+ * Fix error when initializing sinabs without matplotlib: matplotlib is not a sinabs dependency.
13
20
 
14
21
  ## v3.0.4 (08/09/2025)
15
22
 
@@ -31,7 +31,6 @@ sphinx_gallery_conf = {
31
31
  # "backreferences_dir": "gen_modules/backreferences",
32
32
  "doc_module": ("sinabs",),
33
33
  "download_all_examples": False,
34
- "ignore_pattern": r"utils\.py",
35
34
  }
36
35
 
37
36
  # Napoleon settings
@@ -1,8 +1,14 @@
1
+ """
2
+ ==============================
3
+ Utility Function
4
+ ==============================
5
+ Utility function for plots in the documentation.
6
+ """
7
+
1
8
  import matplotlib.pyplot as plt
2
9
  import torch
3
10
 
4
11
  import sinabs
5
- import sinabs.layers as sl
6
12
 
7
13
 
8
14
  def plot_evolution(neuron_model: sinabs.layers, input: torch.Tensor):
@@ -37,7 +37,7 @@
37
37
  "except ImportError:\n",
38
38
  " ! pip install tonic\n",
39
39
  " from tonic.datasets.nmnist import NMNIST\n",
40
- " \n",
40
+ "\n",
41
41
  "# download dataset\n",
42
42
  "root_dir = \"./NMNIST\"\n",
43
43
  "_ = NMNIST(save_to=root_dir, train=True)\n",
@@ -64,7 +64,9 @@
64
64
  "sample_data, label = NMNIST(save_to=root_dir, train=False)[0]\n",
65
65
  "\n",
66
66
  "print(f\"type of data is: {type(sample_data)}\")\n",
67
- "print(f\"time length of sample data is: {sample_data['t'][-1] - sample_data['t'][0]} micro seconds\")\n",
67
+ "print(\n",
68
+ " f\"time length of sample data is: {sample_data['t'][-1] - sample_data['t'][0]} micro seconds\"\n",
69
+ ")\n",
68
70
  "print(f\"there are {len(sample_data)} events in the sample data\")\n",
69
71
  "print(f\"the label of the sample data is: {label}\")"
70
72
  ]
@@ -99,15 +101,26 @@
99
101
  "# define a CNN model\n",
100
102
  "cnn = nn.Sequential(\n",
101
103
  " # [2, 34, 34] -> [8, 17, 17]\n",
102
- " nn.Conv2d(in_channels=2, out_channels=8, kernel_size=(3, 3), padding=(1, 1), bias=False),\n",
104
+ " nn.Conv2d(\n",
105
+ " in_channels=2, out_channels=8, kernel_size=(3, 3), padding=(1, 1), bias=False\n",
106
+ " ),\n",
103
107
  " nn.ReLU(),\n",
104
108
  " nn.AvgPool2d(2, 2),\n",
105
109
  " # [8, 17, 17] -> [16, 8, 8]\n",
106
- " nn.Conv2d(in_channels=8, out_channels=16, kernel_size=(3, 3), padding=(1, 1), bias=False),\n",
110
+ " nn.Conv2d(\n",
111
+ " in_channels=8, out_channels=16, kernel_size=(3, 3), padding=(1, 1), bias=False\n",
112
+ " ),\n",
107
113
  " nn.ReLU(),\n",
108
114
  " nn.AvgPool2d(2, 2),\n",
109
115
  " # [16 * 8 * 8] -> [16, 4, 4]\n",
110
- " nn.Conv2d(in_channels=16, out_channels=16, kernel_size=(3, 3), padding=(1, 1), stride=(2, 2), bias=False),\n",
116
+ " nn.Conv2d(\n",
117
+ " in_channels=16,\n",
118
+ " out_channels=16,\n",
119
+ " kernel_size=(3, 3),\n",
120
+ " padding=(1, 1),\n",
121
+ " stride=(2, 2),\n",
122
+ " bias=False,\n",
123
+ " ),\n",
111
124
  " nn.ReLU(),\n",
112
125
  " # [16 * 4 * 4] -> [10]\n",
113
126
  " nn.Flatten(),\n",
@@ -160,7 +173,9 @@
160
173
  "\n",
161
174
  "# check the transformed data\n",
162
175
  "sample_data, label = cnn_train_dataset[0]\n",
163
- "print(f\"The transformed array is in shape [Time-Step, Channel, Height, Width] --> {sample_data.shape}\")"
176
+ "print(\n",
177
+ " f\"The transformed array is in shape [Time-Step, Channel, Height, Width] --> {sample_data.shape}\"\n",
178
+ ")"
164
179
  ]
165
180
  },
166
181
  {
@@ -306,8 +321,20 @@
306
321
  "\n",
307
322
  "cnn = cnn.to(device=device)\n",
308
323
  "\n",
309
- "cnn_train_dataloader = DataLoader(cnn_train_dataset, batch_size=batch_size, num_workers=num_workers, drop_last=True, shuffle=shuffle)\n",
310
- "cnn_test_dataloader = DataLoader(cnn_test_dataset, batch_size=batch_size, num_workers=num_workers, drop_last=True, shuffle=shuffle)\n",
324
+ "cnn_train_dataloader = DataLoader(\n",
325
+ " cnn_train_dataset,\n",
326
+ " batch_size=batch_size,\n",
327
+ " num_workers=num_workers,\n",
328
+ " drop_last=True,\n",
329
+ " shuffle=shuffle,\n",
330
+ ")\n",
331
+ "cnn_test_dataloader = DataLoader(\n",
332
+ " cnn_test_dataset,\n",
333
+ " batch_size=batch_size,\n",
334
+ " num_workers=num_workers,\n",
335
+ " drop_last=True,\n",
336
+ " shuffle=shuffle,\n",
337
+ ")\n",
311
338
  "\n",
312
339
  "optimizer = SGD(params=cnn.parameters(), lr=lr)\n",
313
340
  "criterion = CrossEntropyLoss()\n",
@@ -329,7 +356,9 @@
329
356
  " loss.backward()\n",
330
357
  " optimizer.step()\n",
331
358
  " # set progressing bar\n",
332
- " train_p_bar.set_description(f\"Epoch {e} - Training Loss: {round(loss.item(), 4)}\")\n",
359
+ " train_p_bar.set_description(\n",
360
+ " f\"Epoch {e} - Training Loss: {round(loss.item(), 4)}\"\n",
361
+ " )\n",
333
362
  "\n",
334
363
  " # validate\n",
335
364
  " correct_predictions = []\n",
@@ -348,9 +377,11 @@
348
377
  " correct_predictions.append(pred.eq(label.view_as(pred)))\n",
349
378
  " # set progressing bar\n",
350
379
  " test_p_bar.set_description(f\"Epoch {e} - Testing Model...\")\n",
351
- " \n",
380
+ "\n",
352
381
  " correct_predictions = torch.cat(correct_predictions)\n",
353
- " print(f\"Epoch {e} - accuracy: {correct_predictions.sum().item()/(len(correct_predictions))*100}%\")"
382
+ " print(\n",
383
+ " f\"Epoch {e} - accuracy: {correct_predictions.sum().item()/(len(correct_predictions))*100}%\"\n",
384
+ " )"
354
385
  ]
355
386
  },
356
387
  {
@@ -391,7 +422,9 @@
391
422
  "source": [
392
423
  "from sinabs.from_torch import from_model\n",
393
424
  "\n",
394
- "snn_convert = from_model(model=cnn, input_shape=(2, 34, 34), batch_size=batch_size).spiking_model\n",
425
+ "snn_convert = from_model(\n",
426
+ " model=cnn, input_shape=(2, 34, 34), batch_size=batch_size\n",
427
+ ").spiking_model\n",
395
428
  "snn_convert"
396
429
  ]
397
430
  },
@@ -434,7 +467,13 @@
434
467
  "n_time_steps = 100\n",
435
468
  "to_raster = ToFrame(sensor_size=NMNIST.sensor_size, n_time_bins=n_time_steps)\n",
436
469
  "snn_test_dataset = NMNIST(save_to=root_dir, train=False, transform=to_raster)\n",
437
- "snn_test_dataloader = DataLoader(snn_test_dataset, batch_size=batch_size, num_workers=num_workers, drop_last=True, shuffle=False)\n",
470
+ "snn_test_dataloader = DataLoader(\n",
471
+ " snn_test_dataset,\n",
472
+ " batch_size=batch_size,\n",
473
+ " num_workers=num_workers,\n",
474
+ " drop_last=True,\n",
475
+ " shuffle=False,\n",
476
+ ")\n",
438
477
  "\n",
439
478
  "snn_convert = snn_convert.to(device)\n",
440
479
  "\n",
@@ -459,7 +498,9 @@
459
498
  " test_p_bar.set_description(f\"Testing SNN Model...\")\n",
460
499
  "\n",
461
500
  " correct_predictions = torch.cat(correct_predictions)\n",
462
- " print(f\"accuracy of converted SNN: {correct_predictions.sum().item()/(len(correct_predictions))*100}%\")"
501
+ " print(\n",
502
+ " f\"accuracy of converted SNN: {correct_predictions.sum().item()/(len(correct_predictions))*100}%\"\n",
503
+ " )"
463
504
  ]
464
505
  },
465
506
  {
@@ -511,20 +552,39 @@
511
552
  "# just replace the ReLU layer with the sl.IAFSqueeze\n",
512
553
  "snn_bptt = nn.Sequential(\n",
513
554
  " # [2, 34, 34] -> [8, 17, 17]\n",
514
- " nn.Conv2d(in_channels=2, out_channels=8, kernel_size=(3, 3), padding=(1, 1), bias=False),\n",
515
- " sl.IAFSqueeze(batch_size=batch_size, min_v_mem=-1.0, surrogate_grad_fn=PeriodicExponential()),\n",
555
+ " nn.Conv2d(\n",
556
+ " in_channels=2, out_channels=8, kernel_size=(3, 3), padding=(1, 1), bias=False\n",
557
+ " ),\n",
558
+ " sl.IAFSqueeze(\n",
559
+ " batch_size=batch_size, min_v_mem=-1.0, surrogate_grad_fn=PeriodicExponential()\n",
560
+ " ),\n",
516
561
  " nn.AvgPool2d(2, 2),\n",
517
562
  " # [8, 17, 17] -> [16, 8, 8]\n",
518
- " nn.Conv2d(in_channels=8, out_channels=16, kernel_size=(3, 3), padding=(1, 1), bias=False),\n",
519
- " sl.IAFSqueeze(batch_size=batch_size, min_v_mem=-1.0, surrogate_grad_fn=PeriodicExponential()),\n",
563
+ " nn.Conv2d(\n",
564
+ " in_channels=8, out_channels=16, kernel_size=(3, 3), padding=(1, 1), bias=False\n",
565
+ " ),\n",
566
+ " sl.IAFSqueeze(\n",
567
+ " batch_size=batch_size, min_v_mem=-1.0, surrogate_grad_fn=PeriodicExponential()\n",
568
+ " ),\n",
520
569
  " nn.AvgPool2d(2, 2),\n",
521
570
  " # [16 * 8 * 8] -> [16, 4, 4]\n",
522
- " nn.Conv2d(in_channels=16, out_channels=16, kernel_size=(3, 3), padding=(1, 1), stride=(2, 2), bias=False),\n",
523
- " sl.IAFSqueeze(batch_size=batch_size, min_v_mem=-1.0, surrogate_grad_fn=PeriodicExponential()),\n",
571
+ " nn.Conv2d(\n",
572
+ " in_channels=16,\n",
573
+ " out_channels=16,\n",
574
+ " kernel_size=(3, 3),\n",
575
+ " padding=(1, 1),\n",
576
+ " stride=(2, 2),\n",
577
+ " bias=False,\n",
578
+ " ),\n",
579
+ " sl.IAFSqueeze(\n",
580
+ " batch_size=batch_size, min_v_mem=-1.0, surrogate_grad_fn=PeriodicExponential()\n",
581
+ " ),\n",
524
582
  " # [16 * 4 * 4] -> [10]\n",
525
583
  " nn.Flatten(),\n",
526
584
  " nn.Linear(16 * 4 * 4, 10, bias=False),\n",
527
- " sl.IAFSqueeze(batch_size=batch_size, min_v_mem=-1.0, surrogate_grad_fn=PeriodicExponential()),\n",
585
+ " sl.IAFSqueeze(\n",
586
+ " batch_size=batch_size, min_v_mem=-1.0, surrogate_grad_fn=PeriodicExponential()\n",
587
+ " ),\n",
528
588
  ")\n",
529
589
  "\n",
530
590
  "# init the model weights\n",
@@ -585,6 +645,7 @@
585
645
  "source": [
586
646
  "try:\n",
587
647
  " from sinabs.exodus import conversion\n",
648
+ "\n",
588
649
  " snn_bptt = conversion.sinabs_to_exodus(snn_bptt)\n",
589
650
  "except ImportError:\n",
590
651
  " print(\"Sinabs-exodus is not intalled.\")\n",
@@ -668,8 +729,20 @@
668
729
  "device = \"cuda:0\"\n",
669
730
  "shuffle = True\n",
670
731
  "\n",
671
- "snn_train_dataloader = DataLoader(snn_train_dataset, batch_size=batch_size, num_workers=num_workers, drop_last=True, shuffle=True)\n",
672
- "snn_test_dataloader = DataLoader(snn_test_dataset, batch_size=batch_size, num_workers=num_workers, drop_last=True, shuffle=False)\n",
732
+ "snn_train_dataloader = DataLoader(\n",
733
+ " snn_train_dataset,\n",
734
+ " batch_size=batch_size,\n",
735
+ " num_workers=num_workers,\n",
736
+ " drop_last=True,\n",
737
+ " shuffle=True,\n",
738
+ ")\n",
739
+ "snn_test_dataloader = DataLoader(\n",
740
+ " snn_test_dataset,\n",
741
+ " batch_size=batch_size,\n",
742
+ " num_workers=num_workers,\n",
743
+ " drop_last=True,\n",
744
+ " shuffle=False,\n",
745
+ ")\n",
673
746
  "\n",
674
747
  "snn_bptt = snn_bptt.to(device=device)\n",
675
748
  "\n",
@@ -695,15 +768,17 @@
695
768
  " # backward\n",
696
769
  " loss.backward()\n",
697
770
  " optimizer.step()\n",
698
- " \n",
771
+ "\n",
699
772
  " # detach the neuron states and activations from current computation graph(necessary)\n",
700
773
  " for layer in snn_bptt.modules():\n",
701
774
  " if isinstance(layer, sl.StatefulLayer):\n",
702
775
  " for name, buffer in layer.named_buffers():\n",
703
776
  " buffer.detach_()\n",
704
- " \n",
777
+ "\n",
705
778
  " # set progressing bar\n",
706
- " train_p_bar.set_description(f\"Epoch {e} - BPTT Training Loss: {round(loss.item(), 4)}\")\n",
779
+ " train_p_bar.set_description(\n",
780
+ " f\"Epoch {e} - BPTT Training Loss: {round(loss.item(), 4)}\"\n",
781
+ " )\n",
707
782
  "\n",
708
783
  " # validate\n",
709
784
  " correct_predictions = []\n",
@@ -725,9 +800,11 @@
725
800
  " correct_predictions.append(pred.eq(label.view_as(pred)))\n",
726
801
  " # set progressing bar\n",
727
802
  " test_p_bar.set_description(f\"Epoch {e} - BPTT Testing Model...\")\n",
728
- " \n",
803
+ "\n",
729
804
  " correct_predictions = torch.cat(correct_predictions)\n",
730
- " print(f\"Epoch {e} - BPTT accuracy: {correct_predictions.sum().item()/(len(correct_predictions))*100}%\")"
805
+ " print(\n",
806
+ " f\"Epoch {e} - BPTT accuracy: {correct_predictions.sum().item()/(len(correct_predictions))*100}%\"\n",
807
+ " )"
731
808
  ]
732
809
  },
733
810
  {
@@ -775,6 +852,7 @@
775
852
  "source": [
776
853
  "try:\n",
777
854
  " from sinabs.exodus import conversion\n",
855
+ "\n",
778
856
  " snn_bptt = conversion.exodus_to_sinabs(snn_bptt)\n",
779
857
  "except ImportError:\n",
780
858
  " print(\"Sinabs-exodus is not intalled.\")\n",
@@ -812,7 +890,9 @@
812
890
  "\n",
813
891
  "# cpu_snn = snn_convert.to(device=\"cpu\")\n",
814
892
  "cpu_snn = snn_bptt.to(device=\"cpu\")\n",
815
- "dynapcnn = DynapcnnNetwork(snn=cpu_snn, input_shape=(2, 34, 34), discretize=True, dvs_input=False)\n",
893
+ "dynapcnn = DynapcnnNetwork(\n",
894
+ " snn=cpu_snn, input_shape=(2, 34, 34), discretize=True, dvs_input=False\n",
895
+ ")\n",
816
896
  "devkit_name = \"speck2fdevkit\"\n",
817
897
  "\n",
818
898
  "# use the `to` method of DynapcnnNetwork to deploy the SNN to the devkit\n",
@@ -886,10 +966,10 @@
886
966
  " samna_event_stream = []\n",
887
967
  " for ev in events:\n",
888
968
  " spk = samna.speck2f.event.Spike()\n",
889
- " spk.x = ev['x']\n",
890
- " spk.y = ev['y']\n",
891
- " spk.timestamp = ev['t'] - events['t'][0]\n",
892
- " spk.feature = ev['p']\n",
969
+ " spk.x = ev[\"x\"]\n",
970
+ " spk.y = ev[\"y\"]\n",
971
+ " spk.timestamp = ev[\"t\"] - events[\"t\"][0]\n",
972
+ " spk.feature = ev[\"p\"]\n",
893
973
  " # Spikes will be sent to layer/core #0, since the SNN is deployed on core: [0, 1, 2, 3]\n",
894
974
  " spk.layer = 0\n",
895
975
  " samna_event_stream.append(spk)\n",
@@ -897,7 +977,7 @@
897
977
  " # inference on chip\n",
898
978
  " # output_events is also a list of Spike, but each Spike.layer is 3, since layer#3 is the output layer\n",
899
979
  " output_events = dynapcnn(samna_event_stream)\n",
900
- " \n",
980
+ "\n",
901
981
  " # use the most frequent output neruon index as the final prediction\n",
902
982
  " neuron_index = [each.feature for each in output_events]\n",
903
983
  " if len(neuron_index) != 0:\n",
@@ -905,14 +985,16 @@
905
985
  " prediction = frequent_counter.most_common(1)[0][0]\n",
906
986
  " else:\n",
907
987
  " prediction = -1\n",
908
- " inferece_p_bar.set_description(f\"label: {label}, prediction: {prediction}, output spikes num: {len(output_events)}\") \n",
988
+ " inferece_p_bar.set_description(\n",
989
+ " f\"label: {label}, prediction: {prediction}, output spikes num: {len(output_events)}\"\n",
990
+ " )\n",
909
991
  "\n",
910
992
  " if prediction == label:\n",
911
993
  " correct_samples += 1\n",
912
994
  "\n",
913
995
  " test_samples += 1\n",
914
- " \n",
915
- "print(f\"On chip inference accuracy: {correct_samples / test_samples}\") "
996
+ "\n",
997
+ "print(f\"On chip inference accuracy: {correct_samples / test_samples}\")"
916
998
  ]
917
999
  },
918
1000
  {
@@ -1004,6 +1086,8 @@
1004
1086
  ],
1005
1087
  "source": [
1006
1088
  "import samnawe\n",
1089
+ "\n",
1090
+ "\n",
1007
1091
  "# first define a callback function to modify the devkit configuration\n",
1008
1092
  "# the callback function should only has 1 devkit config instance as its input argument\n",
1009
1093
  "def config_modify_callback(devkit_cfg):\n",
@@ -1014,24 +1098,32 @@
1014
1098
  " devkit_cfg.dvs_layer.raw_monitor_enable = False\n",
1015
1099
  " # prevent the events generated by the embedded dvs been feed to the DynapCNN Core.\n",
1016
1100
  " devkit_cfg.dvs_layer.pass_sensor_events = False\n",
1017
- " # point the dvs layer output destination to the core#0 \n",
1101
+ " # point the dvs layer output destination to the core#0\n",
1018
1102
  " devkit_cfg.dvs_layer.destinations[0].enable = True\n",
1019
1103
  " devkit_cfg.dvs_layer.destinations[0].layer = 0\n",
1020
1104
  "\n",
1021
1105
  " # the callback must return the modified devkit config\n",
1022
1106
  " return devkit_cfg\n",
1023
1107
  "\n",
1108
+ "\n",
1024
1109
  "# close the devkit before reopen\n",
1025
1110
  "samna.device.close_device(dynapcnn.samna_device)\n",
1026
1111
  "\n",
1027
1112
  "# init DynapcnnNetwork instance\n",
1028
- "dynapcnn = DynapcnnNetwork(snn=cpu_snn, input_shape=(2, 34, 34), discretize=True, dvs_input=True)\n",
1113
+ "dynapcnn = DynapcnnNetwork(\n",
1114
+ " snn=cpu_snn, input_shape=(2, 34, 34), discretize=True, dvs_input=True\n",
1115
+ ")\n",
1029
1116
  "\n",
1030
1117
  "devkit_name = \"speck2fdevkit\"\n",
1031
1118
  "# define which layers output you want to monitor\n",
1032
1119
  "layers_to_monitor = [0, 1, 2, 3]\n",
1033
1120
  "# pass the callback function into the `.to` method\n",
1034
- "dynapcnn.to(device=devkit_name, chip_layers_ordering=[0, 1, 2, 3], monitor_layers=layers_to_monitor, config_modifier=config_modify_callback)\n",
1121
+ "dynapcnn.to(\n",
1122
+ " device=devkit_name,\n",
1123
+ " chip_layers_ordering=[0, 1, 2, 3],\n",
1124
+ " monitor_layers=layers_to_monitor,\n",
1125
+ " config_modifier=config_modify_callback,\n",
1126
+ ")\n",
1035
1127
  "print(f\"The SNN is deployed on the core: {dynapcnn.chip_layers_ordering}\")"
1036
1128
  ]
1037
1129
  },
@@ -1160,16 +1252,16 @@
1160
1252
  " samna_event_stream = []\n",
1161
1253
  " for ev in events:\n",
1162
1254
  " dvs_ev = samna.speck2f.event.DvsEvent()\n",
1163
- " dvs_ev.x = ev['x']\n",
1164
- " dvs_ev.y = ev['y']\n",
1165
- " dvs_ev.timestamp = ev['t'] - events['t'][0]\n",
1166
- " dvs_ev.p = ev['p']\n",
1255
+ " dvs_ev.x = ev[\"x\"]\n",
1256
+ " dvs_ev.y = ev[\"y\"]\n",
1257
+ " dvs_ev.timestamp = ev[\"t\"] - events[\"t\"][0]\n",
1258
+ " dvs_ev.p = ev[\"p\"]\n",
1167
1259
  " samna_event_stream.append(dvs_ev)\n",
1168
1260
  "\n",
1169
1261
  " # inference on chip\n",
1170
1262
  " # output_events is also a list of Spike, but .layer will have 0, 1, 2, 3 since we choose to monitor all layers' output\n",
1171
1263
  " output_events = dynapcnn(samna_event_stream)\n",
1172
- " \n",
1264
+ "\n",
1173
1265
  " # get each layers output spikes\n",
1174
1266
  " layer0_spks = [each.feature for each in output_events if each.layer == 0]\n",
1175
1267
  " layer1_spks = [each.feature for each in output_events if each.layer == 1]\n",
@@ -1181,14 +1273,16 @@
1181
1273
  " prediction = frequent_counter.most_common(1)[0][0]\n",
1182
1274
  " else:\n",
1183
1275
  " prediction = -1\n",
1184
- " inferece_p_bar.set_description(f\"label: {label} prediction: {prediction},layer 0 output spks: {len(layer0_spks)},layer 1 output spikes num: {len(layer1_spks)}, layer 2 output spikes num: {len(layer2_spks)},layer 3 output spikes num: {len(layer3_spks)}\") \n",
1276
+ " inferece_p_bar.set_description(\n",
1277
+ " f\"label: {label} prediction: {prediction},layer 0 output spks: {len(layer0_spks)},layer 1 output spikes num: {len(layer1_spks)}, layer 2 output spikes num: {len(layer2_spks)},layer 3 output spikes num: {len(layer3_spks)}\"\n",
1278
+ " )\n",
1185
1279
  "\n",
1186
1280
  " if prediction == label:\n",
1187
1281
  " correct_samples += 1\n",
1188
1282
  "\n",
1189
1283
  " test_samples += 1\n",
1190
- " \n",
1191
- "print(f\"On chip inference accuracy: {correct_samples / test_samples}\") "
1284
+ "\n",
1285
+ "print(f\"On chip inference accuracy: {correct_samples / test_samples}\")"
1192
1286
  ]
1193
1287
  },
1194
1288
  {
@@ -190,7 +190,7 @@
190
190
  "outputs": [],
191
191
  "source": [
192
192
  "def remapping_output_index(\n",
193
- " output_layer: Union[nn.Conv2d, nn.Linear]\n",
193
+ " output_layer: Union[nn.Conv2d, nn.Linear],\n",
194
194
  ") -> Union[nn.Conv2d, nn.Linear]:\n",
195
195
  " \"\"\"\n",
196
196
  " Since the mapping of output channel's index from last cnn layer to the readout layer is not correct\n",
@@ -133,8 +133,11 @@ def get_device_map() -> Dict:
133
133
  # Group by device_type_name
134
134
  device_groups = groupby(devices, lambda x: x.device_type_name)
135
135
  # Switch keys from samna's device_type_name to device_type names
136
+ # -- guarantee is a supported device
136
137
  device_groups = {
137
- device_type_map[k]: sort_devices(list(v)) for k, v in device_groups
138
+ device_type_map[k]: sort_devices(list(v))
139
+ for k, v in device_groups
140
+ if k in device_type_map
138
141
  }
139
142
  # Flat map
140
143
  for dev_type, dev_list in device_groups.items():
@@ -46,8 +46,8 @@ def _import_sinabs_module(
46
46
  groups=node.groups,
47
47
  bias=True,
48
48
  )
49
- conv.weight.data = torch.tensor(node.weight).float()
50
- conv.bias.data = torch.tensor(node.bias).float()
49
+ conv.weight.data = node.weight.detach().clone().to(float)
50
+ conv.bias.data = node.bias.detach().clone().to(float)
51
51
  return conv
52
52
 
53
53
  elif isinstance(node, nir.Conv2d):
@@ -184,6 +184,7 @@ def _extract_sinabs_module(module: torch.nn.Module) -> Optional[nir.NIRNode]:
184
184
  return nir.Affine(module.weight.detach(), module.bias.detach())
185
185
  elif isinstance(module, torch.nn.Conv1d):
186
186
  return nir.Conv1d(
187
+ input_shape=None,
187
188
  weight=module.weight.detach(),
188
189
  stride=module.stride,
189
190
  padding=module.padding,
@@ -191,7 +192,7 @@ def _extract_sinabs_module(module: torch.nn.Module) -> Optional[nir.NIRNode]:
191
192
  groups=module.groups,
192
193
  bias=(
193
194
  module.bias.detach()
194
- if module.bias
195
+ if isinstance(module.bias, torch.Tensor)
195
196
  else torch.zeros((module.weight.shape[0]))
196
197
  ),
197
198
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sinabs
3
- Version: 3.1.0
3
+ Version: 3.1.1
4
4
  Summary: SynSense Spiking Neural Network simulator for deep neural networks (DNNs).
5
5
  Author: SynSense (formerly AiCTX)
6
6
  Author-email: support@synsense.ai
@@ -87,7 +87,7 @@ Sinabs is published under Apache v2.0. See the LICENSE file for details.
87
87
 
88
88
  Contributing to Sinabs
89
89
  ------------------------
90
- Checkout the [contributing](https://sinabs.readthedocs.io/en/develop/about/contributing.html) page for more info.
90
+ Checkout the [contributing](https://sinabs.readthedocs.io/develop/about/contributing.html) page for more info.
91
91
 
92
92
 
93
93
  Citation
@@ -0,0 +1 @@
1
+ {"git_version": "cf8590b", "is_release": false}
@@ -62,9 +62,12 @@ def reset_all_connected_boards():
62
62
  import samna
63
63
 
64
64
  devs = samna.device.get_unopened_devices()
65
+ reverse_dict = {v: k for k, v in supported_device_types_for_testing.items()}
66
+
65
67
  if len(devs) > 0: # check if the connected board is found.
66
68
  for device in devs:
67
- handle = samna.device.open_device(device)
68
- handle.reset_board_soft(True)
69
- samna.device.close_device(handle)
70
- print(f"Resetted board: {device.device_type_name}")
69
+ if device.device_type_name in reverse_dict:
70
+ handle = samna.device.open_device(device)
71
+ handle.reset_board_soft(True)
72
+ samna.device.close_device(handle)
73
+ print(f"Resetted board: {device.device_type_name}")
@@ -62,9 +62,6 @@ def get_demo_dynapcnn_network():
62
62
  return dynapcnn_network
63
63
 
64
64
 
65
- @pytest.mark.skip(
66
- "This test was being skiiped all the time and now needs modification in the visualizer"
67
- )
68
65
  def test_jit_compilation():
69
66
  dvs_shape = (128, 128)
70
67
  spike_collection_interval = 500
@@ -76,22 +73,28 @@ def test_jit_compilation():
76
73
 
77
74
  dynapcnn_network = get_demo_dynapcnn_network()
78
75
  for device_name, _ in devices.items():
79
- dynapcnn_network.to(device=device_name)
80
-
81
- visualizer = DynapcnnVisualizer(
82
- dvs_shape=dvs_shape, spike_collection_interval=spike_collection_interval
83
- )
84
- visualizer.create_visualizer_process(visualizer_id=visualizer_id)
85
-
86
- streamer_graph = samna.graph.EventFilterGraph()
87
- # Streamer graph
88
- # Dvs node
89
- (_, dvs_member_filter, _, streamer_node) = streamer_graph.sequential(
90
- [
91
- # samna.graph.JitSource(samna.speck2e.event.OutputEvent),
92
- dynapcnn_network.samna_device.get_model_source_node(),
93
- samna.graph.JitMemberSelect(),
94
- samna.graph.JitDvsEventToViz(samna.ui.Event),
95
- "VizEventStreamer",
96
- ]
97
- )
76
+ if device_name in [
77
+ "speck2e",
78
+ "speck2edevkit",
79
+ "speck2fmodule",
80
+ ]:
81
+
82
+ dynapcnn_network.to(device=device_name)
83
+
84
+ visualizer = DynapcnnVisualizer(
85
+ dvs_shape=dvs_shape, spike_collection_interval=spike_collection_interval
86
+ )
87
+ visualizer.create_visualizer_process(visualizer_id=visualizer_id)
88
+
89
+ streamer_graph = samna.graph.EventFilterGraph()
90
+ # Streamer graph
91
+ # Dvs node
92
+ (_, dvs_member_filter, _, streamer_node) = streamer_graph.sequential(
93
+ [
94
+ # samna.graph.JitSource(samna.speck2e.event.OutputEvent),
95
+ dynapcnn_network.samna_device.get_model_source_node(),
96
+ samna.graph.JitMemberSelect(),
97
+ samna.graph.JitDvsEventToViz(samna.ui.Event),
98
+ "VizEventStreamer",
99
+ ]
100
+ )
@@ -105,3 +105,18 @@ def test_2dcnn_network():
105
105
  nir_graph = to_nir(orig_model, torch.rand(1, 2, 10, 10))
106
106
 
107
107
  loaded_model = from_nir(nir_graph, batch_size=1)
108
+
109
+
110
+ def test_conv1d():
111
+ batch_size = 2
112
+ conv1d = nn.Conv1d(16, 16, 3)
113
+ graph = to_nir(conv1d, torch.randn(batch_size, 16, 32))
114
+ converted = from_nir(graph, batch_size=batch_size)
115
+
116
+ assert len(graph.nodes) == 1 + 2
117
+ assert isinstance(graph.nodes["model"], nir.Conv1d)
118
+ assert len(graph.edges) == 0 + 2
119
+ assert conv1d.kernel_size == converted.model.kernel_size
120
+ assert conv1d.stride == converted.model.stride
121
+ assert conv1d.padding == converted.model.padding
122
+ assert conv1d.dilation == converted.model.dilation
@@ -1 +0,0 @@
1
- {"git_version": "8b87dc3", "is_release": false}
File without changes