sinabs 3.0.3.dev1__py3-none-any.whl → 3.0.4.dev2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sinabs/__init__.py +8 -2
- sinabs/backend/dynapcnn/dynapcnn_network.py +2 -2
- sinabs/backend/dynapcnn/mapping.py +4 -1
- sinabs/utils.py +61 -1
- sinabs/validate_memory_speck.py +144 -0
- {sinabs-3.0.3.dev1.dist-info → sinabs-3.0.4.dev2.dist-info}/METADATA +1 -1
- {sinabs-3.0.3.dev1.dist-info → sinabs-3.0.4.dev2.dist-info}/RECORD +12 -11
- sinabs-3.0.4.dev2.dist-info/pbr.json +1 -0
- sinabs-3.0.3.dev1.dist-info/pbr.json +0 -1
- {sinabs-3.0.3.dev1.dist-info → sinabs-3.0.4.dev2.dist-info}/AUTHORS +0 -0
- {sinabs-3.0.3.dev1.dist-info → sinabs-3.0.4.dev2.dist-info}/LICENSE +0 -0
- {sinabs-3.0.3.dev1.dist-info → sinabs-3.0.4.dev2.dist-info}/WHEEL +0 -0
- {sinabs-3.0.3.dev1.dist-info → sinabs-3.0.4.dev2.dist-info}/top_level.txt +0 -0
sinabs/__init__.py
CHANGED
|
@@ -2,9 +2,15 @@ from pbr.version import VersionInfo
|
|
|
2
2
|
|
|
3
3
|
__version__ = VersionInfo("sinabs").release_string()
|
|
4
4
|
|
|
5
|
-
from . import conversion, utils
|
|
5
|
+
from . import conversion, utils, validate_memory_speck
|
|
6
6
|
from .from_torch import from_model
|
|
7
7
|
from .network import Network
|
|
8
8
|
from .nir import from_nir, to_nir
|
|
9
9
|
from .synopcounter import SNNAnalyzer, SynOpCounter
|
|
10
|
-
from .utils import
|
|
10
|
+
from .utils import (
|
|
11
|
+
reset_states,
|
|
12
|
+
set_batch_size,
|
|
13
|
+
zero_grad,
|
|
14
|
+
validate_memory_mapping_speck,
|
|
15
|
+
)
|
|
16
|
+
from .validate_memory_speck import ValidateMapping
|
|
@@ -361,8 +361,8 @@ class DynapcnnNetwork(nn.Module):
|
|
|
361
361
|
try:
|
|
362
362
|
_, is_compatible = self._make_config(device=device_type)
|
|
363
363
|
except ValueError as e:
|
|
364
|
-
# Catch "No valid mapping found" error
|
|
365
|
-
if e.args[0]
|
|
364
|
+
# Catch "No valid mapping found" error, it is the first sentence in the string
|
|
365
|
+
if e.args[0].find("No valid mapping found.") == 0:
|
|
366
366
|
return False
|
|
367
367
|
else:
|
|
368
368
|
raise e
|
|
@@ -188,5 +188,8 @@ def recover_mapping(graph, layer_mapping) -> List[Tuple[int, int]]:
|
|
|
188
188
|
if edge.flow == 1:
|
|
189
189
|
mapping.append((i, edge.t - len(layer_mapping) - 1))
|
|
190
190
|
if len(mapping) != len(layer_mapping):
|
|
191
|
-
raise ValueError(
|
|
191
|
+
raise ValueError(
|
|
192
|
+
"No valid mapping found. "
|
|
193
|
+
"For Speck family you can use `utils.validate_memory_mapping_speck()` to get more information."
|
|
194
|
+
)
|
|
192
195
|
return mapping
|
sinabs/utils.py
CHANGED
|
@@ -1,10 +1,11 @@
|
|
|
1
|
-
from typing import List
|
|
1
|
+
from typing import List, Tuple
|
|
2
2
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
import torch
|
|
5
5
|
import torch.nn as nn
|
|
6
6
|
|
|
7
7
|
import sinabs
|
|
8
|
+
from .validate_memory_speck import ValidateMapping
|
|
8
9
|
|
|
9
10
|
|
|
10
11
|
def reset_states(model: nn.Module) -> None:
|
|
@@ -179,3 +180,62 @@ def set_batch_size(model: nn.Module, batch_size: int):
|
|
|
179
180
|
if isinstance(mod, sinabs.layers.SqueezeMixin):
|
|
180
181
|
mod.batch_size = batch_size
|
|
181
182
|
# reset_states(mod)
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def validate_memory_mapping_speck(
|
|
186
|
+
input_feature_size: int,
|
|
187
|
+
output_feature_size: int,
|
|
188
|
+
kernel_size: Tuple[int, int],
|
|
189
|
+
stride: Tuple[int, int],
|
|
190
|
+
padding: Tuple[int, int],
|
|
191
|
+
input_dimension: Tuple[int, int] = [64, 64],
|
|
192
|
+
conv_2d: bool = True,
|
|
193
|
+
):
|
|
194
|
+
"""Helper function to verify if it is possible to map a specific layer on to speck.
|
|
195
|
+
This function validates kernel and neuron memories. It doesnt check for all the network layers together.
|
|
196
|
+
It considers the mapping of a Conv2D layer only.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
input_feature_size (int): number of input channels
|
|
200
|
+
output_feature_size (int): number of output channels
|
|
201
|
+
kernel_size (Tuple[int, int]): 2D kernel size
|
|
202
|
+
stride (Tuple[int, int]): 2D stride size
|
|
203
|
+
padding (Tuple[int, int]): 2D padding size
|
|
204
|
+
input_dimension (Tuple[int, int]): 2D input dimension size. Defaults to [64,64]
|
|
205
|
+
conv_2d (bool): if it is mapping a Conv2D layers. Defaults to True. It won't validate other types of network.
|
|
206
|
+
|
|
207
|
+
Return:
|
|
208
|
+
msg (string): Message indicating layer can be mapped with total size of kernel and neuron memories.
|
|
209
|
+
|
|
210
|
+
Raises:
|
|
211
|
+
Exception: if neuron or kernel memories are higher than available on chip.
|
|
212
|
+
"""
|
|
213
|
+
|
|
214
|
+
if not conv_2d:
|
|
215
|
+
raise ValueError("This function only validates Conv2D layers.")
|
|
216
|
+
|
|
217
|
+
validate = ValidateMapping(
|
|
218
|
+
input_feature_size,
|
|
219
|
+
output_feature_size,
|
|
220
|
+
kernel_size,
|
|
221
|
+
stride,
|
|
222
|
+
padding,
|
|
223
|
+
[input_dimension[0], input_dimension[1]],
|
|
224
|
+
conv_2d,
|
|
225
|
+
)
|
|
226
|
+
(
|
|
227
|
+
kernel,
|
|
228
|
+
neuron,
|
|
229
|
+
kernel_error_msg,
|
|
230
|
+
neuron_error_msg,
|
|
231
|
+
) = validate.calculate_total_memory()
|
|
232
|
+
|
|
233
|
+
if kernel_error_msg != "" or neuron_error_msg != "":
|
|
234
|
+
raise Exception(kernel_error_msg + neuron_error_msg)
|
|
235
|
+
else:
|
|
236
|
+
msg = (
|
|
237
|
+
"Layer can be mapped successfully. "
|
|
238
|
+
f"Kernel memory is {kernel:g}Ki and neuron memory is {neuron:g}Ki."
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
return msg
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
from typing import Tuple
|
|
2
|
+
from matplotlib import pyplot as plt
|
|
3
|
+
from matplotlib import colors
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ValidateMapping:
|
|
9
|
+
def __init__(
|
|
10
|
+
self,
|
|
11
|
+
input_feature_size: int,
|
|
12
|
+
output_feature_size: int,
|
|
13
|
+
kernel_size: Tuple[int, int],
|
|
14
|
+
stride: Tuple[int, int],
|
|
15
|
+
padding: Tuple[int, int],
|
|
16
|
+
input_dimension: Tuple[int, int] = [64, 64],
|
|
17
|
+
conv_2d: bool = True,
|
|
18
|
+
):
|
|
19
|
+
self.input_feature_size = input_feature_size
|
|
20
|
+
self.output_feature_size = output_feature_size
|
|
21
|
+
|
|
22
|
+
self.kernel_size = kernel_size
|
|
23
|
+
self.stride = stride
|
|
24
|
+
self.padding = padding
|
|
25
|
+
self.input_dimension = input_dimension
|
|
26
|
+
|
|
27
|
+
# - If not Conv2D layer, assuming it is AvgPool2D layer
|
|
28
|
+
if not conv_2d:
|
|
29
|
+
if (
|
|
30
|
+
kernel_size[0] != kernel_size[1]
|
|
31
|
+
or kernel_size[0] == 3
|
|
32
|
+
or kernel_size[0] > 4
|
|
33
|
+
):
|
|
34
|
+
raise Exception(
|
|
35
|
+
"Kernel size is limited to 1x1, 2x2 or 4x4 for AvgPool2D layer."
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
if (
|
|
39
|
+
len(kernel_size) > 2
|
|
40
|
+
or len(stride) > 2
|
|
41
|
+
or len(padding) > 2
|
|
42
|
+
or len(input_dimension) > 2
|
|
43
|
+
):
|
|
44
|
+
raise Exception(
|
|
45
|
+
"We expect input dimension kernel, stride and padding to be 2D elements, i.e.,"
|
|
46
|
+
"to have only two positions: x and y."
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
if kernel_size[0] > 16 or kernel_size[1] > 16:
|
|
50
|
+
raise Exception("Kernel size is limited to, at most, 16x16.")
|
|
51
|
+
|
|
52
|
+
if output_feature_size > 1024:
|
|
53
|
+
raise Exception("Output feature size is limited to, at most, 1024.")
|
|
54
|
+
|
|
55
|
+
if not self.check_stride():
|
|
56
|
+
raise Warning("Kernel stride can be 1, 2, 4 or 8 and, at most, 8x8.")
|
|
57
|
+
|
|
58
|
+
def calculate_total_memory(self):
|
|
59
|
+
kernel_memory = self.calculate_kernel_memory()
|
|
60
|
+
neuron_memory = self.calculate_neuron_memory()
|
|
61
|
+
|
|
62
|
+
kernel_error_msg = self.verify_combined_memories(
|
|
63
|
+
"kernel", kernel_memory, "neuron", neuron_memory
|
|
64
|
+
)
|
|
65
|
+
neuron_error_msg = self.verify_combined_memories(
|
|
66
|
+
"neuron", neuron_memory, "kernel", kernel_memory
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
print(kernel_error_msg)
|
|
70
|
+
print(neuron_error_msg)
|
|
71
|
+
return (
|
|
72
|
+
kernel_memory / 1024,
|
|
73
|
+
neuron_memory / 1024,
|
|
74
|
+
kernel_error_msg,
|
|
75
|
+
neuron_error_msg,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
def calculate_kernel_memory(self):
|
|
79
|
+
return self.input_feature_size * pow(
|
|
80
|
+
2,
|
|
81
|
+
np.ceil(np.log2(self.kernel_size[0] * self.kernel_size[1]))
|
|
82
|
+
+ np.ceil(np.log2(self.output_feature_size)),
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
def calculate_neuron_memory(self):
|
|
86
|
+
fx = (
|
|
87
|
+
(self.input_dimension[0] - self.kernel_size[0] + 2 * self.padding[0])
|
|
88
|
+
/ self.stride[0]
|
|
89
|
+
) + 1
|
|
90
|
+
fy = (
|
|
91
|
+
(self.input_dimension[1] - self.kernel_size[1] + 2 * self.padding[1])
|
|
92
|
+
/ self.stride[1]
|
|
93
|
+
) + 1
|
|
94
|
+
return self.output_feature_size * pow(
|
|
95
|
+
2, (np.ceil(np.log2(fx)) + np.ceil(np.log2(fy)))
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
def check_stride(self):
|
|
99
|
+
for i in range(len(self.stride)):
|
|
100
|
+
if (
|
|
101
|
+
self.stride[i] == 1
|
|
102
|
+
or self.stride[i] == 2
|
|
103
|
+
or self.stride[i] == 4
|
|
104
|
+
or self.stride[i] == 8
|
|
105
|
+
):
|
|
106
|
+
return True
|
|
107
|
+
return False
|
|
108
|
+
|
|
109
|
+
def verify_combined_memories(
|
|
110
|
+
self, base_name: str, base_memory: int, compared_name: str, compared_memory: int
|
|
111
|
+
):
|
|
112
|
+
# core ids --------- kernel memory -------- neuron memory
|
|
113
|
+
# [0, 1, 2] ------------- 16Ki ----------------- 64Ki
|
|
114
|
+
# [3, 4] ---------------- 32Ki ----------------- 32Ki
|
|
115
|
+
# [5, 6] ---------------- 64Ki ----------------- 16Ki
|
|
116
|
+
# [7, 8] ---------------- 16Ki ----------------- 16Ki
|
|
117
|
+
|
|
118
|
+
base_memory = base_memory / 1024
|
|
119
|
+
compared_memory = compared_memory / 1024
|
|
120
|
+
|
|
121
|
+
error_msg = ""
|
|
122
|
+
if base_memory > 64:
|
|
123
|
+
error_msg = (
|
|
124
|
+
f"{base_name.capitalize()} memory is {base_memory:g}Ki and can not be mapped on chip. "
|
|
125
|
+
f"{base_name.capitalize()} memory on chip needs to be at most 64Ki."
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
if base_memory > 16 and base_memory <= 32:
|
|
129
|
+
if compared_memory > 32:
|
|
130
|
+
error_msg = (
|
|
131
|
+
"There is no core on chip to fit neuron and kernel memories. "
|
|
132
|
+
f"When {base_name} memory is higher than 16Ki, {compared_name} memory needs to be at most 32Ki. "
|
|
133
|
+
f"{base_name.capitalize()} is {base_memory:g}Ki and {compared_name} is {compared_memory:g}Ki."
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
if base_memory > 32:
|
|
137
|
+
if compared_memory > 16:
|
|
138
|
+
error_msg = (
|
|
139
|
+
"There is no core on chip to fit neuron and kernel memories. "
|
|
140
|
+
f"When {base_name} memory is higher than 32Ki, {compared_name} memory needs to be at most 16Ki. "
|
|
141
|
+
f"{base_name.capitalize()} is {base_memory:g}Ki and {compared_name} is {compared_memory:g}Ki."
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
return error_msg
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
sinabs/__init__.py,sha256=
|
|
1
|
+
sinabs/__init__.py,sha256=vvWzU_lrsSob2JpOdG7ok8Rm0FjR-v7ogYgK6zruAGk,458
|
|
2
2
|
sinabs/cnnutils.py,sha256=MTVTmTnLYMiDQZozfgH7UhMCYQPpOto0vDa0kMjADiA,2406
|
|
3
3
|
sinabs/conversion.py,sha256=k9pNqOtmA4JhKXIyGoFY6Fl8jyZp7DtlmpS4ym8pN0w,2023
|
|
4
4
|
sinabs/from_torch.py,sha256=L_n7BRj7llKI-4Er1cPOZo6VVoInxehMk1PVlDiFIt8,4957
|
|
@@ -6,7 +6,8 @@ sinabs/hooks.py,sha256=7jK44SxPcnQhWScvML9QIXyX5sfA_1E-lHpYjz2_4qc,16197
|
|
|
6
6
|
sinabs/network.py,sha256=If6Qz2MDEpHPHD0bEStDyCif1EUw4ef3CXUQr4og9rA,9489
|
|
7
7
|
sinabs/nir.py,sha256=r72RZ2WNuhnHvQ2MaSJ04J-Bes1mAdzoU9LKbJupZzE,8695
|
|
8
8
|
sinabs/synopcounter.py,sha256=ZF7f9Au-j5wC3gPixWxj4yq8v8KdjDUMJWExyKi6Y5s,12759
|
|
9
|
-
sinabs/utils.py,sha256=
|
|
9
|
+
sinabs/utils.py,sha256=wNFwlf03SOYAEgdZiA1cxAf1XEnRXlycutEu2uhDBSQ,7696
|
|
10
|
+
sinabs/validate_memory_speck.py,sha256=8uByUV9VNHncbYX4Wxq3qaepwfp-tH3c2MZeHRZSUeU,5134
|
|
10
11
|
sinabs/activation/__init__.py,sha256=cHXmIvV9fYZhqKVVTzD2F1a6KQklJPgTgDFjNzIEII8,311
|
|
11
12
|
sinabs/activation/quantize.py,sha256=AzrIQbIlSPoiPgueC4XkRGNSeNoU5V9B7mtXbq0Kzk8,1166
|
|
12
13
|
sinabs/activation/reset_mechanism.py,sha256=aKtQFxB8WqzuSir0NocdkqTF_YD7E365QBj66g3wQvE,1419
|
|
@@ -20,12 +21,12 @@ sinabs/backend/dynapcnn/crop2d.py,sha256=-FKOQHdx8GjEXK64OlWZyc1GId4FFRptVnt80jL
|
|
|
20
21
|
sinabs/backend/dynapcnn/discretize.py,sha256=PyamxI-AoekChUZa_nCKatVre-gXBFWqnzPDcvnDQh4,13475
|
|
21
22
|
sinabs/backend/dynapcnn/dvs_layer.py,sha256=Aauw7u7IJvtUkjOpYo1snqzVyFns6DZ5bmZGaj0Y7pA,9468
|
|
22
23
|
sinabs/backend/dynapcnn/dynapcnn_layer.py,sha256=53u_7NqlNqJxTjrOeEOz4WABrYLtmclaN8sAwaSfr9Y,6702
|
|
23
|
-
sinabs/backend/dynapcnn/dynapcnn_network.py,sha256
|
|
24
|
+
sinabs/backend/dynapcnn/dynapcnn_network.py,sha256=-fKHomA28pEUodTyzfRfQ_SaHwKaY1sbWarTWOWiwTc,20594
|
|
24
25
|
sinabs/backend/dynapcnn/dynapcnn_visualizer.py,sha256=MRewU6519dAtAMxf-JlFBrlynJTZeLiDfB0d85-mMFQ,24262
|
|
25
26
|
sinabs/backend/dynapcnn/exceptions.py,sha256=hEei4gOniq3ByYXkJovlAeaUfZ8Q9BWTHTb0DJ9pHeQ,485
|
|
26
27
|
sinabs/backend/dynapcnn/flipdims.py,sha256=I0I1nakrF0ngWBh-2SHHg7OkCOxotqukwHOQ45GWyCs,860
|
|
27
28
|
sinabs/backend/dynapcnn/io.py,sha256=1AN4CcixXM1PFZ6U3LeNGQ71ajSXaV3lTyq__j4sxns,9952
|
|
28
|
-
sinabs/backend/dynapcnn/mapping.py,sha256=
|
|
29
|
+
sinabs/backend/dynapcnn/mapping.py,sha256=h63M9hhI6enZtQyq-may9hU7YcnGNCumiHalh8ohMno,6108
|
|
29
30
|
sinabs/backend/dynapcnn/specksim.py,sha256=UKh_lH_yHIZaYEONWsAOChrD-vBdSawBxRBeDHlSv84,17138
|
|
30
31
|
sinabs/backend/dynapcnn/utils.py,sha256=LYihrBIiPTDLUwsUikVOzWva4RwPc27LoHRCUDlDm-4,17934
|
|
31
32
|
sinabs/backend/dynapcnn/chips/__init__.py,sha256=zJQ7f7bp_cF0US1pZ8ga4-3Bo32T0GB9gD2RN3uKlsM,130
|
|
@@ -49,10 +50,10 @@ sinabs/layers/to_spike.py,sha256=97ar-tiDZCgckBLdnKoHzm8PjTFwDXra0weOFgAf6_4,331
|
|
|
49
50
|
sinabs/layers/functional/__init__.py,sha256=v0c7DHizKg8jfelmFYeMMg9vDafKvzoenakc4SPpj84,91
|
|
50
51
|
sinabs/layers/functional/alif.py,sha256=ycJ7rlcBAd-lq5GCDZrcNPeV-7fztt3uy43XhBtTKHI,4599
|
|
51
52
|
sinabs/layers/functional/lif.py,sha256=QRjiWDCBaJFk4J7RRMgktMaLCyN6xEXAKvC9Bu_PICU,4259
|
|
52
|
-
sinabs-3.0.
|
|
53
|
-
sinabs-3.0.
|
|
54
|
-
sinabs-3.0.
|
|
55
|
-
sinabs-3.0.
|
|
56
|
-
sinabs-3.0.
|
|
57
|
-
sinabs-3.0.
|
|
58
|
-
sinabs-3.0.
|
|
53
|
+
sinabs-3.0.4.dev2.dist-info/AUTHORS,sha256=jdt0oxfM_OW0_e5-ptxORAJ8U0uTzZjaB-F5iF2i50E,1802
|
|
54
|
+
sinabs-3.0.4.dev2.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
|
55
|
+
sinabs-3.0.4.dev2.dist-info/METADATA,sha256=CPy1BrpEHY7Zxwfbgv_2urrXvkFQPdhPHXhA7C2a1fM,3656
|
|
56
|
+
sinabs-3.0.4.dev2.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
|
57
|
+
sinabs-3.0.4.dev2.dist-info/pbr.json,sha256=8LH2b1zi84SKVbeJ7S_QPrZtjpk9mw97-KCSPwQVctM,47
|
|
58
|
+
sinabs-3.0.4.dev2.dist-info/top_level.txt,sha256=QOXGzf0ZeDjRnJ9OgAjkk6h5jrh66cwrwvtPJTyfDk8,7
|
|
59
|
+
sinabs-3.0.4.dev2.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"git_version": "fbef598", "is_release": false}
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"git_version": "b584589", "is_release": false}
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|