sinabs 3.0.4.dev2__py3-none-any.whl → 3.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sinabs/activation/reset_mechanism.py +3 -3
- sinabs/activation/surrogate_gradient_fn.py +4 -4
- sinabs/backend/dynapcnn/__init__.py +5 -4
- sinabs/backend/dynapcnn/chip_factory.py +33 -61
- sinabs/backend/dynapcnn/chips/dynapcnn.py +182 -86
- sinabs/backend/dynapcnn/chips/speck2e.py +6 -5
- sinabs/backend/dynapcnn/chips/speck2f.py +6 -5
- sinabs/backend/dynapcnn/config_builder.py +39 -59
- sinabs/backend/dynapcnn/connectivity_specs.py +48 -0
- sinabs/backend/dynapcnn/discretize.py +91 -156
- sinabs/backend/dynapcnn/dvs_layer.py +59 -101
- sinabs/backend/dynapcnn/dynapcnn_layer.py +185 -119
- sinabs/backend/dynapcnn/dynapcnn_layer_utils.py +335 -0
- sinabs/backend/dynapcnn/dynapcnn_network.py +602 -326
- sinabs/backend/dynapcnn/dynapcnnnetwork_module.py +370 -0
- sinabs/backend/dynapcnn/exceptions.py +122 -3
- sinabs/backend/dynapcnn/io.py +51 -91
- sinabs/backend/dynapcnn/mapping.py +111 -75
- sinabs/backend/dynapcnn/nir_graph_extractor.py +877 -0
- sinabs/backend/dynapcnn/sinabs_edges_handler.py +1024 -0
- sinabs/backend/dynapcnn/utils.py +214 -459
- sinabs/backend/dynapcnn/weight_rescaling_methods.py +53 -0
- sinabs/conversion.py +2 -2
- sinabs/from_torch.py +23 -1
- sinabs/hooks.py +38 -41
- sinabs/layers/alif.py +16 -16
- sinabs/layers/crop2d.py +2 -2
- sinabs/layers/exp_leak.py +1 -1
- sinabs/layers/iaf.py +11 -11
- sinabs/layers/lif.py +9 -9
- sinabs/layers/neuromorphic_relu.py +9 -8
- sinabs/layers/pool2d.py +5 -5
- sinabs/layers/quantize.py +1 -1
- sinabs/layers/stateful_layer.py +10 -7
- sinabs/layers/to_spike.py +9 -9
- sinabs/network.py +14 -12
- sinabs/synopcounter.py +10 -7
- sinabs/utils.py +155 -7
- sinabs/validate_memory_speck.py +0 -5
- {sinabs-3.0.4.dev2.dist-info → sinabs-3.1.0.dist-info}/METADATA +16 -6
- sinabs-3.1.0.dist-info/RECORD +65 -0
- {sinabs-3.0.4.dev2.dist-info → sinabs-3.1.0.dist-info}/WHEEL +1 -1
- {sinabs-3.0.4.dev2.dist-info → sinabs-3.1.0.dist-info/licenses}/AUTHORS +1 -0
- sinabs-3.1.0.dist-info/pbr.json +1 -0
- sinabs-3.0.4.dev2.dist-info/RECORD +0 -59
- sinabs-3.0.4.dev2.dist-info/pbr.json +0 -1
- {sinabs-3.0.4.dev2.dist-info → sinabs-3.1.0.dist-info/licenses}/LICENSE +0 -0
- {sinabs-3.0.4.dev2.dist-info → sinabs-3.1.0.dist-info}/top_level.txt +0 -0
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
from abc import ABC, abstractmethod
|
|
2
|
-
from typing import List
|
|
2
|
+
from typing import Dict, List
|
|
3
3
|
|
|
4
4
|
import samna
|
|
5
5
|
|
|
6
|
-
from .
|
|
6
|
+
from .dynapcnn_layer import DynapcnnLayer
|
|
7
7
|
from .mapping import LayerConstraints, get_valid_mapping
|
|
8
8
|
|
|
9
9
|
|
|
@@ -13,34 +13,35 @@ class ConfigBuilder(ABC):
|
|
|
13
13
|
def get_samna_module(self):
|
|
14
14
|
"""Get the samna parent module that hosts all the appropriate sub-modules and classes.
|
|
15
15
|
|
|
16
|
-
Returns
|
|
17
|
-
|
|
18
|
-
samna module
|
|
16
|
+
Returns:
|
|
17
|
+
samna module
|
|
19
18
|
"""
|
|
20
19
|
|
|
21
20
|
@classmethod
|
|
22
21
|
@abstractmethod
|
|
23
22
|
def get_default_config(cls):
|
|
24
23
|
"""
|
|
25
|
-
Returns
|
|
26
|
-
|
|
27
|
-
Returns the default configuration for the device type
|
|
24
|
+
Returns:
|
|
25
|
+
Default configuration for the device type
|
|
28
26
|
"""
|
|
29
27
|
|
|
30
28
|
@classmethod
|
|
31
29
|
@abstractmethod
|
|
32
|
-
def build_config(
|
|
30
|
+
def build_config(
|
|
31
|
+
cls,
|
|
32
|
+
layers: Dict[int, DynapcnnLayer],
|
|
33
|
+
layer2core_map: Dict[int, int],
|
|
34
|
+
destination_map: Dict[int, List[int]],
|
|
35
|
+
):
|
|
33
36
|
"""Build the configuration given a model.
|
|
34
37
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
Chip layers where the given model layers are to be mapped.
|
|
38
|
+
Args:
|
|
39
|
+
layers (Dict): Keys are layer indices, values are DynapcnnLayer instances.
|
|
40
|
+
layer2core_map (Dict): Keys are layer indices, values are corresponding
|
|
41
|
+
cores on hardware. Needed to map the destinations.
|
|
42
|
+
destination_map (Dict): Indices of destination layers for `layer`.
|
|
41
43
|
|
|
42
|
-
Returns
|
|
43
|
-
-------
|
|
44
|
+
Returns:
|
|
44
45
|
Samna Configuration object
|
|
45
46
|
"""
|
|
46
47
|
|
|
@@ -49,8 +50,7 @@ class ConfigBuilder(ABC):
|
|
|
49
50
|
def get_constraints(cls) -> List[LayerConstraints]:
|
|
50
51
|
"""Returns the layer constraints of a the given device.
|
|
51
52
|
|
|
52
|
-
Returns
|
|
53
|
-
-------
|
|
53
|
+
Returns:
|
|
54
54
|
List[LayerConstraints]
|
|
55
55
|
"""
|
|
56
56
|
|
|
@@ -60,43 +60,27 @@ class ConfigBuilder(ABC):
|
|
|
60
60
|
"""Enable the monitor for a given set of layers in the config object."""
|
|
61
61
|
|
|
62
62
|
@classmethod
|
|
63
|
-
def
|
|
64
|
-
"""Find a
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
Returns
|
|
72
|
-
-------
|
|
73
|
-
List of core indices corresponding to each layer of the model:
|
|
74
|
-
The index of the core on chip to which the i-th layer in the
|
|
75
|
-
model is mapped is the value of the i-th entry in the list.
|
|
63
|
+
def map_layers_to_cores(cls, layers: Dict[int, DynapcnnLayer]) -> Dict[int, int]:
|
|
64
|
+
"""Find a mapping from DynapcnnLayers onto on-chip cores
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
layers: Dict with layer indices as keys and DynapcnnLayer instances as values.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
Dict mapping layer indices (keys) to assigned core IDs (values).
|
|
76
71
|
"""
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
mapping = {m[0]: m[1] for m in mapping}
|
|
80
|
-
# Check if there is a dvs layer in the model
|
|
81
|
-
num_dynapcnn_cores = len(model.sequence)
|
|
82
|
-
if isinstance(model.sequence[0], DVSLayer):
|
|
83
|
-
num_dynapcnn_cores -= 1
|
|
84
|
-
# apply the mapping
|
|
85
|
-
chip_layers_ordering = [mapping[i] for i in range(num_dynapcnn_cores)]
|
|
86
|
-
return chip_layers_ordering
|
|
72
|
+
|
|
73
|
+
return get_valid_mapping(layers, cls.get_constraints())
|
|
87
74
|
|
|
88
75
|
@classmethod
|
|
89
76
|
def validate_configuration(cls, config) -> bool:
|
|
90
77
|
"""Check if a given configuration is valid.
|
|
91
78
|
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
config:
|
|
95
|
-
Configuration object
|
|
79
|
+
Args:
|
|
80
|
+
config: Configuration object.
|
|
96
81
|
|
|
97
|
-
Returns
|
|
98
|
-
|
|
99
|
-
True if the configuration is valid, else false
|
|
82
|
+
Returns:
|
|
83
|
+
True if the configuration is valid, else false
|
|
100
84
|
"""
|
|
101
85
|
is_valid, message = cls.get_samna_module().validate_configuration(config)
|
|
102
86
|
if not is_valid:
|
|
@@ -127,22 +111,18 @@ class ConfigBuilder(ABC):
|
|
|
127
111
|
def reset_states(cls, config, randomize=False):
|
|
128
112
|
"""Randomize or reset the neuron states.
|
|
129
113
|
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
If true, the states will be set to random initial values. Else they will be set to zero
|
|
114
|
+
Args:
|
|
115
|
+
randomize (bool): If true, the states will be set to random initial values.
|
|
116
|
+
Else they will be set to zero
|
|
134
117
|
"""
|
|
135
118
|
|
|
136
119
|
@classmethod
|
|
137
120
|
def set_all_v_mem_to_zeros(cls, samna_device, layer_id: int) -> None:
|
|
138
121
|
"""Reset all memory states to zeros.
|
|
139
122
|
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
samna device object to erase vmem memory.
|
|
144
|
-
layer_id:
|
|
145
|
-
layer index
|
|
123
|
+
Args:
|
|
124
|
+
samna_device: samna device object to erase vmem memory.
|
|
125
|
+
layer_id: layer index
|
|
146
126
|
"""
|
|
147
127
|
mod = cls.get_samna_module()
|
|
148
128
|
layer_constraint: LayerConstraints = cls.get_constraints()[layer_id]
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""
|
|
2
|
+
functionality : list device-independent supported connections between layers on chip
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Union
|
|
6
|
+
|
|
7
|
+
import torch.nn as nn
|
|
8
|
+
|
|
9
|
+
import sinabs.layers as sl
|
|
10
|
+
|
|
11
|
+
from .dvs_layer import DVSLayer
|
|
12
|
+
|
|
13
|
+
Pooling = (sl.SumPool2d, nn.AvgPool2d)
|
|
14
|
+
Weight = (nn.Conv2d, nn.Linear)
|
|
15
|
+
Neuron = (sl.IAFSqueeze,)
|
|
16
|
+
DVS = (DVSLayer,)
|
|
17
|
+
SupportedNodeTypes = (*Pooling, *Weight, *Neuron, *DVS)
|
|
18
|
+
|
|
19
|
+
VALID_SINABS_EDGE_TYPES_ABSTRACT = {
|
|
20
|
+
# convoluion is always followed by a neuron layer.
|
|
21
|
+
(Weight, Neuron): "weight-neuron",
|
|
22
|
+
# Neuron layer can be followed by pooling
|
|
23
|
+
(Neuron, Pooling): "neuron-pooling",
|
|
24
|
+
# Pooling can be followed by another pooling (will be consolidated)
|
|
25
|
+
(Pooling, Pooling): "pooling-pooling",
|
|
26
|
+
# Neuron layer can be followed by weight layer of next core
|
|
27
|
+
(Neuron, Weight): "neuron-weight",
|
|
28
|
+
# Pooling can be followed by weight layer of next core
|
|
29
|
+
(Pooling, Weight): "pooling-weight",
|
|
30
|
+
# Dvs can be followed by weight layer of next core
|
|
31
|
+
(DVS, Weight): "dvs-weight",
|
|
32
|
+
# Dvs can be followed by pooling layer
|
|
33
|
+
(DVS, Pooling): "dvs-pooling",
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
# Unpack dict
|
|
37
|
+
VALID_SINABS_EDGE_TYPES = {
|
|
38
|
+
(source_type, target_type): name
|
|
39
|
+
for types, name in VALID_SINABS_EDGE_TYPES_ABSTRACT.items()
|
|
40
|
+
for source_type in types[0]
|
|
41
|
+
for target_type in types[1]
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
# Only `Merge` layers are allowed to join multiple inputs
|
|
45
|
+
LAYER_TYPES_WITH_MULTIPLE_INPUTS = (sl.Merge,)
|
|
46
|
+
|
|
47
|
+
# Neuron and pooling layers can have their output sent to multiple cores
|
|
48
|
+
LAYER_TYPES_WITH_MULTIPLE_OUTPUTS = (*Neuron, *Pooling, *DVS)
|
|
@@ -19,21 +19,14 @@ def discretize_conv_spike(
|
|
|
19
19
|
This function takes a 2D convolutional and a spiking layer and returns a
|
|
20
20
|
copy of each, with discretized weights, bias and threshold.
|
|
21
21
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
Returns
|
|
32
|
-
-------
|
|
33
|
-
nn.Conv2d
|
|
34
|
-
Discretized copy of convolutional layer
|
|
35
|
-
sl.IAF
|
|
36
|
-
Discretized copy of spiking layer
|
|
22
|
+
Args:
|
|
23
|
+
conv_lyr (nn.Conv2d): Convolutional layer.
|
|
24
|
+
spike_lyr (sl.IAF): Spiking layer.
|
|
25
|
+
to_int (bool): Use integer types for discretized parameter.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
Tuple containing a discretized copy of convolutional layer and
|
|
29
|
+
a discretized copy of spiking layer.
|
|
37
30
|
"""
|
|
38
31
|
conv_lyr_copy = deepcopy(conv_lyr)
|
|
39
32
|
spike_lyr_copy = deepcopy(spike_lyr)
|
|
@@ -48,21 +41,14 @@ def discretize_conv_spike_(
|
|
|
48
41
|
This function takes a 2D convolutional and a spiking layer and discretizes
|
|
49
42
|
weights, bias and threshold in-place.
|
|
50
43
|
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
Returns
|
|
61
|
-
-------
|
|
62
|
-
nn.Conv2d
|
|
63
|
-
Discretized convolutional layer
|
|
64
|
-
sl.IAF
|
|
65
|
-
Discretized spiking layer
|
|
44
|
+
Args:
|
|
45
|
+
conv_lyr (nn.Conv2d): Convolutional layer.
|
|
46
|
+
spike_lyr (sl.IAF): Spiking layer.
|
|
47
|
+
to_int (bool): Use integer types for discretized parameter.
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
A tuple containing a discretized convolutional layer and a
|
|
51
|
+
discretized spiking layer.
|
|
66
52
|
"""
|
|
67
53
|
|
|
68
54
|
return _discretize_conv_spk_(conv_lyr, spike_lyr, to_int=to_int)
|
|
@@ -74,28 +60,20 @@ def discretize_conv(
|
|
|
74
60
|
spk_thr_low: float,
|
|
75
61
|
spk_state: Optional[torch.Tensor] = None,
|
|
76
62
|
to_int: bool = True,
|
|
77
|
-
):
|
|
63
|
+
) -> nn.Conv2d:
|
|
78
64
|
"""Discretize convolutional layer.
|
|
79
65
|
|
|
80
66
|
This function takes a 2D convolutional layer and parameters of a subsequent
|
|
81
67
|
spiking layer to return a discretized copy of the convolutional layer.
|
|
82
68
|
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
spk_state: torch.Tensor or None
|
|
92
|
-
State of spiking layer.
|
|
93
|
-
to_int: bool
|
|
94
|
-
Use integer types for discretized parameter
|
|
95
|
-
|
|
96
|
-
Returns
|
|
97
|
-
-------
|
|
98
|
-
nn.Conv2d
|
|
69
|
+
Args:
|
|
70
|
+
layer (nn.Conv2d): Convolutional layer.
|
|
71
|
+
spk_thr (float): Upper threshold of subsequent spiking layer.
|
|
72
|
+
spk_thr_low (float): Lower threshold of subsequent spiking layer.
|
|
73
|
+
spk_state (torch.Tensor): State of spiking layer. Defaults to None.
|
|
74
|
+
to_int (bool): Use integer types for discretized parameter. Defaults to True.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
99
77
|
Discretized copy of convolutional layer
|
|
100
78
|
"""
|
|
101
79
|
lyr_copy = deepcopy(layer)
|
|
@@ -115,28 +93,20 @@ def discretize_conv_(
|
|
|
115
93
|
spk_thr_low: float,
|
|
116
94
|
spk_state: Optional[torch.Tensor] = None,
|
|
117
95
|
to_int: bool = True,
|
|
118
|
-
):
|
|
96
|
+
) -> nn.Conv2d:
|
|
119
97
|
"""Discretize convolutional layer, in-place.
|
|
120
98
|
|
|
121
99
|
This function discretizes a 2D convolutional layer in-place, based on
|
|
122
100
|
parameters of a subsequent spiking layer.
|
|
123
101
|
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
spk_state: torch.Tensor or None
|
|
133
|
-
State of spiking layer.
|
|
134
|
-
to_int: bool
|
|
135
|
-
Use integer types for discretized parameter
|
|
136
|
-
|
|
137
|
-
Returns
|
|
138
|
-
-------
|
|
139
|
-
nn.Conv2d
|
|
102
|
+
Args:
|
|
103
|
+
layer (nn.Conv2d): Convolutional layer.
|
|
104
|
+
spk_thr (float): Upper threshold of subsequent spiking layer.
|
|
105
|
+
spk_thr_low (float): Lower threshold of subsequent spiking layer.
|
|
106
|
+
spk_state (torch.Tensor): State of spiking layer. Defaults to None.
|
|
107
|
+
to_int (bool): Use integer types for discretized parameter. Defaults to True.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
140
110
|
Discretized convolutional layer
|
|
141
111
|
"""
|
|
142
112
|
layer_discr, __ = _discretize_conv_spk_(
|
|
@@ -154,26 +124,20 @@ def discretize_spk(
|
|
|
154
124
|
conv_weight: torch.Tensor,
|
|
155
125
|
conv_bias: Optional[torch.Tensor] = None,
|
|
156
126
|
to_int: bool = True,
|
|
157
|
-
):
|
|
127
|
+
) -> sl.IAF:
|
|
158
128
|
"""Discretize spiking layer.
|
|
159
129
|
|
|
160
130
|
This function takes a spiking layer and parameters of a preceding
|
|
161
131
|
convolutional layer to return a discretized copy of the spiking layer.
|
|
162
132
|
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
to_int: bool
|
|
172
|
-
Use integer types for discretized parameter
|
|
173
|
-
|
|
174
|
-
Returns
|
|
175
|
-
-------
|
|
176
|
-
sl.IAF
|
|
133
|
+
Args:
|
|
134
|
+
layer (sl.IAF): Spiking layer.
|
|
135
|
+
conv_weight (torch.Tensor): Weight tensor of preceding convolutional layer.
|
|
136
|
+
conv_bias (torch.Tensor): Bias of preceding convolutional layer.
|
|
137
|
+
Optional argument, defaults to None.
|
|
138
|
+
to_int (bool): Use integer types for discretized parameter.
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
177
141
|
Discretized copy of spiking layer
|
|
178
142
|
"""
|
|
179
143
|
lyr_copy = deepcopy(layer)
|
|
@@ -188,26 +152,20 @@ def discretize_spk_(
|
|
|
188
152
|
conv_weight: torch.Tensor,
|
|
189
153
|
conv_bias: Optional[torch.Tensor] = None,
|
|
190
154
|
to_int: bool = True,
|
|
191
|
-
):
|
|
155
|
+
) -> sl.IAF:
|
|
192
156
|
"""Discretize spiking layer in-place.
|
|
193
157
|
|
|
194
158
|
This function discretizes a spiking layer in-place, based on parameters of a
|
|
195
159
|
preceding convolutional layer.
|
|
196
160
|
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
to_int: bool
|
|
206
|
-
Use integer types for discretized parameter
|
|
207
|
-
|
|
208
|
-
Returns
|
|
209
|
-
-------
|
|
210
|
-
sl.IAF
|
|
161
|
+
Args:
|
|
162
|
+
layer (sl.IAF): Spiking layer.
|
|
163
|
+
conv_weight (torch.Tensor): Weight tensor of preceding convolutional layer.
|
|
164
|
+
conv_bias (torch.Tensor): Bias of preceding convolutional layer.
|
|
165
|
+
Optional argument, defaults to None.
|
|
166
|
+
to_int (bool): Use integer types for discretized parameter.
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
211
169
|
Discretized spiking
|
|
212
170
|
"""
|
|
213
171
|
__, layer_discr = _discretize_conv_spk_(
|
|
@@ -225,7 +183,7 @@ def _discretize_conv_spk_(
|
|
|
225
183
|
conv_weight: Optional[torch.Tensor] = None,
|
|
226
184
|
conv_bias: Optional[torch.Tensor] = None,
|
|
227
185
|
to_int: bool = True,
|
|
228
|
-
):
|
|
186
|
+
) -> Tuple[nn.Conv2d, sl.IAF]:
|
|
229
187
|
"""Discretize convolutional and spiking layer.
|
|
230
188
|
|
|
231
189
|
Determine and apply a suitable scaling factor for weight and bias of
|
|
@@ -234,34 +192,27 @@ def _discretize_conv_spk_(
|
|
|
234
192
|
providing layers, respective parameters can be provided directly. If a layer
|
|
235
193
|
is not provided, `None` will be returned instead of its discrete version.
|
|
236
194
|
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
Spiking layer
|
|
243
|
-
spk_thr: float or None
|
|
244
|
-
Upper threshold of spiking layer. Has to be provided if `spike_lyr` is `None`.
|
|
195
|
+
Args:
|
|
196
|
+
conv_lyr (nn.Conv2d): Convolutional layer. Optional argument, defaults to None.
|
|
197
|
+
spike_lyr (sl.IAF): Spiking layer. Optional argument, defaults to None.
|
|
198
|
+
spk_thr (float): Upper threshold of spiking layer. Optional argument, defaults
|
|
199
|
+
to None. Has to be provided if `spike_lyr` is `None`.
|
|
245
200
|
Is ignored otherwise.
|
|
246
|
-
spk_thr_low
|
|
247
|
-
|
|
201
|
+
spk_thr_low (float): Lower threshold of spiking layer. Optional argument,
|
|
202
|
+
defaults to None. Has to be provided if `spike_lyr` is `None`.
|
|
248
203
|
Is ignored otherwise.
|
|
249
|
-
spk_state
|
|
250
|
-
|
|
251
|
-
conv_weight
|
|
252
|
-
|
|
204
|
+
spk_state (torch.Tensor): State of spiking layer. Optional argument
|
|
205
|
+
defaults to None. Ignored if `spike_lyr` is not `None`.
|
|
206
|
+
conv_weight (torch.Tensor): Weight of convolutional layer. Optional argument,
|
|
207
|
+
defaults to None. Has to be provided if `conv_lyr` is `None`.
|
|
253
208
|
Is ignored otherwise.
|
|
254
|
-
conv_bias
|
|
255
|
-
|
|
256
|
-
to_int:
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
nn.Conv2d or None
|
|
262
|
-
Discretized convolutional layer if `conv_lyr` is not `None`, else `None`
|
|
263
|
-
sl.IAF or None
|
|
264
|
-
Discretized spiking layer if `spk_lyr` is not `None`, else `None`
|
|
209
|
+
conv_bias (torch.Tensor): Bias of convolutional layer. Optional argument,
|
|
210
|
+
defaults to None. Ignored if `conv_lyr` is not `None`.
|
|
211
|
+
to_int (bool): Use integer types for discretized parameters.
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
Discretized convolutional layer if `conv_lyr` is not `None`, else `None`.
|
|
215
|
+
and discretized spiking layer if `spk_lyr` is not `None`, else `None`
|
|
265
216
|
"""
|
|
266
217
|
|
|
267
218
|
if conv_lyr is None:
|
|
@@ -287,7 +238,6 @@ def _discretize_conv_spk_(
|
|
|
287
238
|
conv_bias = torch.zeros(conv_lyr.out_channels)
|
|
288
239
|
|
|
289
240
|
if spike_lyr is None:
|
|
290
|
-
|
|
291
241
|
discr_spk = False
|
|
292
242
|
|
|
293
243
|
if spk_thr is None or spk_thr_low is None:
|
|
@@ -361,17 +311,12 @@ def determine_discretization_scale(obj: torch.Tensor, bit_precision: int) -> flo
|
|
|
361
311
|
Determine how much the values of a torch tensor can be scaled in order to fit
|
|
362
312
|
the given precision
|
|
363
313
|
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
Returns
|
|
372
|
-
-------
|
|
373
|
-
float
|
|
374
|
-
The scaling factor
|
|
314
|
+
Args:
|
|
315
|
+
obj (torch.Tensor): Tensor that is to be scaled.
|
|
316
|
+
bit_precision (int): The precision in bits.
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
The scaling factor
|
|
375
320
|
"""
|
|
376
321
|
|
|
377
322
|
# Discrete range
|
|
@@ -398,19 +343,14 @@ def discretize_tensor(
|
|
|
398
343
|
) -> torch.Tensor:
|
|
399
344
|
"""Scale a torch.Tensor and cast it to discrete integer values.
|
|
400
345
|
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
Returns
|
|
411
|
-
-------
|
|
412
|
-
torch.Tensor
|
|
413
|
-
Scaled and discretized copy of `obj`.
|
|
346
|
+
Args:
|
|
347
|
+
obj (torch.Tensor): Tensor that is to be discretized.
|
|
348
|
+
scaling (float): Scaling factor to be applied before discretization.
|
|
349
|
+
to_int (bool): If False, round the values, but don't cast to Int.
|
|
350
|
+
Defaults to True.
|
|
351
|
+
|
|
352
|
+
Returns:
|
|
353
|
+
Scaled and discretized copy of `obj`.
|
|
414
354
|
"""
|
|
415
355
|
|
|
416
356
|
# Scale the values
|
|
@@ -428,17 +368,12 @@ def discretize_tensor(
|
|
|
428
368
|
def discretize_scalar(obj: float, scaling: float) -> int:
|
|
429
369
|
"""Scale a float and cast it to discrete integer values.
|
|
430
370
|
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
Returns
|
|
439
|
-
-------
|
|
440
|
-
int
|
|
441
|
-
Scaled and discretized copy of `obj`.
|
|
371
|
+
Args:
|
|
372
|
+
obj (float): Value that is to be discretized.
|
|
373
|
+
scaling (float): Scaling factor to be applied before discretization.
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
Scaled and discretized copy of `obj`.
|
|
442
377
|
"""
|
|
443
378
|
|
|
444
379
|
# Scale the values
|