da4ml 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of da4ml might be problematic. Click here for more details.
- da4ml/_version.py +2 -2
- da4ml/cmvm/api.py +2 -6
- da4ml/cmvm/core/__init__.py +0 -1
- da4ml/cmvm/types.py +99 -19
- da4ml/codegen/__init__.py +5 -4
- da4ml/codegen/cpp/__init__.py +2 -1
- da4ml/codegen/cpp/cpp_codegen.py +58 -25
- da4ml/codegen/cpp/hls_model.py +252 -0
- da4ml/codegen/cpp/source/ap_types/ap_binary.h +78 -0
- da4ml/codegen/cpp/source/ap_types/ap_common.h +376 -0
- da4ml/codegen/cpp/source/ap_types/ap_decl.h +212 -0
- da4ml/codegen/cpp/source/ap_types/ap_fixed.h +360 -0
- da4ml/codegen/cpp/source/ap_types/ap_fixed_base.h +2354 -0
- da4ml/codegen/cpp/source/ap_types/ap_fixed_ref.h +718 -0
- da4ml/codegen/cpp/source/ap_types/ap_fixed_special.h +230 -0
- da4ml/codegen/cpp/source/ap_types/ap_int.h +330 -0
- da4ml/codegen/cpp/source/ap_types/ap_int_base.h +1885 -0
- da4ml/codegen/cpp/source/ap_types/ap_int_ref.h +1346 -0
- da4ml/codegen/cpp/source/ap_types/ap_int_special.h +223 -0
- da4ml/codegen/cpp/source/ap_types/ap_shift_reg.h +138 -0
- da4ml/codegen/cpp/source/ap_types/etc/ap_private.h +7199 -0
- da4ml/codegen/cpp/source/ap_types/hls_math.h +27 -0
- da4ml/codegen/cpp/source/ap_types/hls_stream.h +263 -0
- da4ml/codegen/cpp/source/ap_types/utils/x_hls_utils.h +80 -0
- da4ml/codegen/cpp/source/binder_util.hh +56 -0
- da4ml/codegen/cpp/source/build_binder.mk +24 -0
- da4ml/codegen/cpp/source/{vitis.h → vitis_bitshift.hh} +1 -1
- da4ml/codegen/verilog/__init__.py +2 -3
- da4ml/codegen/verilog/comb.py +65 -24
- da4ml/codegen/verilog/io_wrapper.py +36 -141
- da4ml/codegen/verilog/pipeline.py +21 -3
- da4ml/codegen/verilog/source/binder_util.hh +72 -0
- da4ml/codegen/verilog/source/build_prj.tcl +0 -1
- da4ml/codegen/verilog/source/mux.v +58 -0
- da4ml/codegen/verilog/source/negative.v +28 -0
- da4ml/codegen/verilog/source/shift_adder.v +4 -1
- da4ml/codegen/verilog/source/template.xdc +3 -0
- da4ml/codegen/verilog/verilog_model.py +42 -15
- da4ml/converter/__init__.py +0 -0
- da4ml/converter/hgq2/parser.py +105 -0
- da4ml/converter/hgq2/replica.py +383 -0
- da4ml/trace/__init__.py +2 -2
- da4ml/trace/fixed_variable.py +177 -18
- da4ml/trace/fixed_variable_array.py +124 -9
- da4ml/trace/ops/__init__.py +22 -6
- da4ml/trace/ops/conv_utils.py +146 -14
- da4ml/trace/ops/einsum_utils.py +9 -6
- da4ml/trace/ops/reduce_utils.py +103 -0
- da4ml/trace/pipeline.py +36 -34
- da4ml/trace/tracer.py +37 -5
- da4ml-0.3.0.dist-info/METADATA +107 -0
- da4ml-0.3.0.dist-info/RECORD +64 -0
- da4ml/codegen/cpp/source/vitis_bridge.h +0 -17
- da4ml-0.2.0.dist-info/METADATA +0 -65
- da4ml-0.2.0.dist-info/RECORD +0 -39
- /da4ml/codegen/verilog/source/{ioutils.hh → ioutil.hh} +0 -0
- {da4ml-0.2.0.dist-info → da4ml-0.3.0.dist-info}/WHEEL +0 -0
- {da4ml-0.2.0.dist-info → da4ml-0.3.0.dist-info}/licenses/LICENSE +0 -0
- {da4ml-0.2.0.dist-info → da4ml-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
from collections.abc import Sequence
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
import keras
|
|
6
|
+
from keras import KerasTensor, Operation
|
|
7
|
+
|
|
8
|
+
from ...trace import FixedVariableArray, HWConfig
|
|
9
|
+
from ...trace.fixed_variable_array import FixedVariableArrayInput
|
|
10
|
+
from .replica import _registry
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class OpObj:
|
|
15
|
+
operation: Operation
|
|
16
|
+
args: list
|
|
17
|
+
kwargs: dict
|
|
18
|
+
produces: tuple[KerasTensor, ...]
|
|
19
|
+
requires: tuple[KerasTensor, ...]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def parse_model(model: keras.Model):
|
|
23
|
+
operators: dict[int, list[OpObj]] = {}
|
|
24
|
+
for depth, nodes in model._nodes_by_depth.items():
|
|
25
|
+
_oprs = []
|
|
26
|
+
for node in nodes:
|
|
27
|
+
assert isinstance(node.operation, keras.Operation)
|
|
28
|
+
opr = OpObj(
|
|
29
|
+
operation=node.operation,
|
|
30
|
+
args=node.arguments.args,
|
|
31
|
+
kwargs=node.arguments.kwargs,
|
|
32
|
+
produces=node.outputs,
|
|
33
|
+
requires=node.arguments.keras_tensors,
|
|
34
|
+
)
|
|
35
|
+
_oprs.append(opr)
|
|
36
|
+
operators[depth] = _oprs
|
|
37
|
+
return [operators[i] for i in range(max(operators.keys()), -1, -1)]
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def replace_tensors(tensor_map: dict[KerasTensor, FixedVariableArray], obj: Any) -> Any:
|
|
41
|
+
if isinstance(obj, KerasTensor):
|
|
42
|
+
return tensor_map[obj]
|
|
43
|
+
if isinstance(obj, list):
|
|
44
|
+
return [replace_tensors(tensor_map, o) for o in obj]
|
|
45
|
+
if isinstance(obj, tuple):
|
|
46
|
+
return tuple(replace_tensors(tensor_map, o) for o in obj)
|
|
47
|
+
if isinstance(obj, dict):
|
|
48
|
+
return {k: replace_tensors(tensor_map, v) for k, v in obj.items()}
|
|
49
|
+
return obj
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def _apply_nn(
|
|
53
|
+
model: keras.Model, inputs: FixedVariableArray | Sequence[FixedVariableArray], verbose: bool = False
|
|
54
|
+
) -> tuple[FixedVariableArray, ...]:
|
|
55
|
+
"""
|
|
56
|
+
Apply a keras model to a fixed variable array or a sequence of fixed variable arrays.
|
|
57
|
+
|
|
58
|
+
Parameters
|
|
59
|
+
----------
|
|
60
|
+
model : keras.Model
|
|
61
|
+
The keras model to apply.
|
|
62
|
+
inputs : FixedVariableArray or Sequence[FixedVariableArray]
|
|
63
|
+
The input fixed variable array or sequence of fixed variable arrays.
|
|
64
|
+
|
|
65
|
+
Returns
|
|
66
|
+
-------
|
|
67
|
+
tuple of FixedVariableArray
|
|
68
|
+
A tuple containing the output(s) of the model as FixedVariableArray.
|
|
69
|
+
"""
|
|
70
|
+
if isinstance(inputs, FixedVariableArray):
|
|
71
|
+
inputs = (inputs,)
|
|
72
|
+
|
|
73
|
+
assert len(model.inputs) == len(inputs), f'Model has {len(model.inputs)} inputs, got {len(inputs)}'
|
|
74
|
+
tensor_map = {keras_tensor: da_tensor for keras_tensor, da_tensor in zip(model.inputs, inputs)}
|
|
75
|
+
|
|
76
|
+
for ops in parse_model(model):
|
|
77
|
+
for op in ops:
|
|
78
|
+
assert all(t in tensor_map for t in op.requires)
|
|
79
|
+
args = replace_tensors(tensor_map, op.args)
|
|
80
|
+
kwargs: dict[str, Any] = replace_tensors(tensor_map, op.kwargs)
|
|
81
|
+
if op.operation.__class__ is keras.layers.InputLayer:
|
|
82
|
+
continue
|
|
83
|
+
mirror_op = _registry[op.operation.__class__](op.operation)
|
|
84
|
+
if verbose:
|
|
85
|
+
print(f'Processing operation {op.operation.name} ({op.operation.__class__.__name__})')
|
|
86
|
+
outputs = mirror_op(*args, **kwargs)
|
|
87
|
+
for keras_tensor, da_tensor in zip(op.produces, outputs):
|
|
88
|
+
tensor_map[keras_tensor] = da_tensor
|
|
89
|
+
|
|
90
|
+
return tuple(tensor_map[keras_tensor] for keras_tensor in model.outputs)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def trace_model(
|
|
94
|
+
model: keras.Model,
|
|
95
|
+
hwconf: HWConfig = HWConfig(1, -1, -1),
|
|
96
|
+
solver_options: dict[str, Any] | None = None,
|
|
97
|
+
verbose: bool = False,
|
|
98
|
+
inputs: tuple[FixedVariableArray, ...] | None = None,
|
|
99
|
+
) -> tuple[tuple[FixedVariableArray, ...], tuple[FixedVariableArray, ...]]:
|
|
100
|
+
if inputs is None:
|
|
101
|
+
inputs = tuple(
|
|
102
|
+
FixedVariableArrayInput(inp.shape[1:], hwconf=hwconf, solver_options=solver_options) for inp in model.inputs
|
|
103
|
+
)
|
|
104
|
+
outputs = _apply_nn(model, inputs, verbose=verbose)
|
|
105
|
+
return inputs, outputs
|
|
@@ -0,0 +1,383 @@
|
|
|
1
|
+
import typing
|
|
2
|
+
from collections.abc import Sequence
|
|
3
|
+
from math import prod
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import hgq
|
|
7
|
+
import keras
|
|
8
|
+
import numpy as np
|
|
9
|
+
from hgq.layers import (
|
|
10
|
+
QBatchNormalization,
|
|
11
|
+
QBatchNormDense,
|
|
12
|
+
QConv1D,
|
|
13
|
+
QConv2D,
|
|
14
|
+
QConv3D,
|
|
15
|
+
QDense,
|
|
16
|
+
QEinsumDense,
|
|
17
|
+
QEinsumDenseBatchnorm,
|
|
18
|
+
QSum,
|
|
19
|
+
)
|
|
20
|
+
from hgq.layers.core.base import MultipleQuantizers, Quantizer
|
|
21
|
+
from hgq.quantizer.internal import FixedPointQuantizerBase
|
|
22
|
+
from keras.layers import ReLU
|
|
23
|
+
from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling
|
|
24
|
+
from keras.src.layers.pooling.base_pooling import BasePooling
|
|
25
|
+
from keras.src.ops.numpy import (
|
|
26
|
+
Add,
|
|
27
|
+
Concatenate,
|
|
28
|
+
Divide,
|
|
29
|
+
GetItem,
|
|
30
|
+
Moveaxis,
|
|
31
|
+
Multiply,
|
|
32
|
+
Ravel,
|
|
33
|
+
Repeat,
|
|
34
|
+
Reshape,
|
|
35
|
+
Subtract,
|
|
36
|
+
Sum,
|
|
37
|
+
Transpose,
|
|
38
|
+
TrueDivide,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
from ...trace import FixedVariableArray
|
|
42
|
+
from ...trace.ops import conv, einsum, pool, quantize, relu
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def mirror_quantizer(q: Quantizer, v: FixedVariableArray) -> FixedVariableArray:
|
|
46
|
+
q_internal: FixedPointQuantizerBase = q.quantizer
|
|
47
|
+
k, i, f = (np.array(x, dtype=np.int8)[0] for x in q_internal.kif)
|
|
48
|
+
round_mode, overflow_mode = q_internal.round_mode, q_internal.overflow_mode
|
|
49
|
+
return quantize(v, k, i, f, overflow_mode=overflow_mode, round_mode=round_mode)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
_registry: dict[type, 'type[MirrorOperationBase]'] = {}
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class MirrorOperationMeta(type):
|
|
56
|
+
def __new__(mcs, name: str, bases: tuple[type, ...], namespace: dict[str, typing.Any]):
|
|
57
|
+
cls = super().__new__(mcs, name, bases, namespace)
|
|
58
|
+
if name == 'MirrorOperationBase':
|
|
59
|
+
return cls
|
|
60
|
+
|
|
61
|
+
handles: type | tuple[type, ...] = namespace['handles']
|
|
62
|
+
if not isinstance(handles, tuple):
|
|
63
|
+
handles = (handles,)
|
|
64
|
+
|
|
65
|
+
for handle in handles:
|
|
66
|
+
_registry[handle] = cls # type: ignore
|
|
67
|
+
return cls
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class MirrorOperationBase(metaclass=MirrorOperationMeta):
|
|
71
|
+
handles: tuple[type, ...] = ()
|
|
72
|
+
|
|
73
|
+
def __init__(self, layer: 'keras.Operation'):
|
|
74
|
+
assert isinstance(layer, self.handles)
|
|
75
|
+
self.op: Any = layer
|
|
76
|
+
|
|
77
|
+
def call(self, *args, **kwargs) -> tuple[FixedVariableArray, ...] | FixedVariableArray: ...
|
|
78
|
+
|
|
79
|
+
def __call__(self, *args, **kwargs) -> tuple[FixedVariableArray, ...]:
|
|
80
|
+
assert all(not isinstance(a, FixedVariableArray) for a in kwargs.values())
|
|
81
|
+
assert all(isinstance(a, FixedVariableArray) or isinstance(a, Sequence) for a in args)
|
|
82
|
+
inputs = args[0] if len(args) == 1 else args
|
|
83
|
+
|
|
84
|
+
if not isinstance(self.op, hgq.layers.QLayerBase):
|
|
85
|
+
r = self.call(*args, **kwargs)
|
|
86
|
+
return r if isinstance(r, tuple) else (r,)
|
|
87
|
+
|
|
88
|
+
layer: hgq.layers.QLayerBase = self.op
|
|
89
|
+
assert kwargs.pop('training', False) is False, 'Training mode is not supported in mirror operation'
|
|
90
|
+
assert kwargs.pop('mask', None) is None, 'Masking is not supported in mirror operation'
|
|
91
|
+
|
|
92
|
+
if layer.enable_iq:
|
|
93
|
+
if isinstance(inputs, Sequence):
|
|
94
|
+
assert isinstance(layer.iq, MultipleQuantizers)
|
|
95
|
+
inputs = tuple(mirror_quantizer(q, v) for q, v in zip(layer.iq.quantizers, inputs))
|
|
96
|
+
else:
|
|
97
|
+
assert isinstance(layer.iq, Quantizer), f'Expected iq to be a Quantizer, got {type(layer.iq)}'
|
|
98
|
+
inputs = mirror_quantizer(layer.iq, inputs)
|
|
99
|
+
|
|
100
|
+
outputs = self.call(inputs, **kwargs)
|
|
101
|
+
|
|
102
|
+
activation = getattr(layer, 'activation', keras.activations.linear)
|
|
103
|
+
if activation is not keras.activations.linear:
|
|
104
|
+
if activation is keras.activations.relu:
|
|
105
|
+
if isinstance(outputs, tuple):
|
|
106
|
+
assert len(outputs) == 1, 'ReLU activation is expected to have a single output'
|
|
107
|
+
outputs = (relu(outputs[0]),)
|
|
108
|
+
else:
|
|
109
|
+
outputs = relu(outputs)
|
|
110
|
+
else:
|
|
111
|
+
raise NotImplementedError(f'Activation {activation} is not supported in mirror operation')
|
|
112
|
+
|
|
113
|
+
if layer.enable_oq:
|
|
114
|
+
if isinstance(outputs, tuple):
|
|
115
|
+
assert isinstance(layer.oq, MultipleQuantizers)
|
|
116
|
+
outputs = tuple(mirror_quantizer(q, v) for q, v in zip(layer.oq.quantizers, outputs))
|
|
117
|
+
else:
|
|
118
|
+
assert isinstance(layer.oq, Quantizer)
|
|
119
|
+
outputs = mirror_quantizer(layer.oq, outputs)
|
|
120
|
+
|
|
121
|
+
if isinstance(outputs, FixedVariableArray):
|
|
122
|
+
outputs = (outputs,)
|
|
123
|
+
|
|
124
|
+
return outputs
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
class MirrorQuantizer(MirrorOperationBase):
|
|
128
|
+
handles = (Quantizer,)
|
|
129
|
+
|
|
130
|
+
def __init__(self, op: 'Quantizer'):
|
|
131
|
+
super().__init__(op)
|
|
132
|
+
assert isinstance(op.quantizer, FixedPointQuantizerBase)
|
|
133
|
+
|
|
134
|
+
def call(self, inputs: FixedVariableArray) -> FixedVariableArray:
|
|
135
|
+
return mirror_quantizer(self.op, inputs)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
class MirrorQDense(MirrorOperationBase):
|
|
139
|
+
handles = (QDense, QEinsumDense, QEinsumDenseBatchnorm, QBatchNormDense, QBatchNormalization, keras.layers.EinsumDense)
|
|
140
|
+
|
|
141
|
+
def call(self, inputs: FixedVariableArray) -> FixedVariableArray:
|
|
142
|
+
op = self.op
|
|
143
|
+
if isinstance(op, (QDense, QBatchNormDense)):
|
|
144
|
+
qkernel = op.qkernel
|
|
145
|
+
qbias = op.qbias
|
|
146
|
+
eq = '...c,cC->...C'
|
|
147
|
+
elif isinstance(op, (QEinsumDense, QEinsumDenseBatchnorm)):
|
|
148
|
+
qkernel = op.qkernel
|
|
149
|
+
qbias = op.qbias
|
|
150
|
+
eq = op.equation
|
|
151
|
+
elif isinstance(op, keras.layers.EinsumDense):
|
|
152
|
+
qkernel = op.kernel
|
|
153
|
+
qbias = op.bias
|
|
154
|
+
eq = op.equation
|
|
155
|
+
elif isinstance(op, QBatchNormalization):
|
|
156
|
+
qkernel, qbias = op.qscaler_and_qoffset
|
|
157
|
+
dim = inputs._vars.ndim
|
|
158
|
+
axis = op.axis
|
|
159
|
+
assert axis != 0, 'Cannot normalizing on batch axis'
|
|
160
|
+
axis -= 1
|
|
161
|
+
idx = ''.join(chr(ord('a') + i) for i in range(dim))
|
|
162
|
+
eq = f'...{idx},{idx[axis]}->...{idx}'
|
|
163
|
+
else:
|
|
164
|
+
raise TypeError(f'Unsupported layer type: {type(op)}')
|
|
165
|
+
|
|
166
|
+
qkernel = np.array(qkernel)
|
|
167
|
+
qbias = np.array(qbias) if qbias is not None else None
|
|
168
|
+
return (einsum(eq, inputs[None], qkernel) + qbias)[0]
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
class MirrorQConv(MirrorOperationBase):
|
|
172
|
+
handles = (QConv1D, QConv2D, QConv3D)
|
|
173
|
+
|
|
174
|
+
def call(self, inputs: FixedVariableArray) -> FixedVariableArray:
|
|
175
|
+
layer: QConv1D | QConv2D | QConv3D = self.op
|
|
176
|
+
qkernel = np.array(layer.qkernel)
|
|
177
|
+
qbias = np.array(layer.qbias) if layer.qbias is not None else None
|
|
178
|
+
strides = layer.strides
|
|
179
|
+
padding = layer.padding
|
|
180
|
+
dilation_rate = layer.dilation_rate
|
|
181
|
+
groups = layer.groups
|
|
182
|
+
|
|
183
|
+
assert dilation_rate == 1 or all(d == 1 for d in dilation_rate), 'Dilation rate is not supported in mirror operation'
|
|
184
|
+
if layer.data_format == 'channels_first':
|
|
185
|
+
shape = (0,) + tuple(range(2, len(inputs.shape))) + (1,)
|
|
186
|
+
inputs = inputs.transpose(shape)
|
|
187
|
+
|
|
188
|
+
outputs = conv(inputs, qkernel, qbias, strides=strides, padding=padding, format=layer.data_format, groups=groups)
|
|
189
|
+
|
|
190
|
+
return outputs
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
class MirrorReLU(MirrorOperationBase):
|
|
194
|
+
handles = (ReLU,)
|
|
195
|
+
|
|
196
|
+
def call(self, inputs: FixedVariableArray) -> FixedVariableArray:
|
|
197
|
+
return relu(inputs)
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
class MirrorReshape(MirrorOperationBase):
|
|
201
|
+
handles = (keras.layers.Reshape, keras.layers.Flatten, Reshape, Ravel)
|
|
202
|
+
|
|
203
|
+
def call(self, inputs: FixedVariableArray) -> FixedVariableArray:
|
|
204
|
+
if isinstance(self.op, (keras.layers.Flatten, Ravel)):
|
|
205
|
+
return inputs.ravel()
|
|
206
|
+
elif isinstance(self.op, keras.layers.Reshape):
|
|
207
|
+
return inputs.reshape(self.op.target_shape)
|
|
208
|
+
elif isinstance(self.op, Reshape):
|
|
209
|
+
return inputs.reshape(self.op.newshape[1:])
|
|
210
|
+
else:
|
|
211
|
+
raise TypeError(f'Unsupported layer type: {type(self.op)}')
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
class MirrorMerge(MirrorOperationBase):
|
|
215
|
+
handles = (keras.layers.Add, keras.layers.Concatenate, hgq.layers.QAdd)
|
|
216
|
+
|
|
217
|
+
def call(self, inputs: tuple[FixedVariableArray, FixedVariableArray]) -> FixedVariableArray:
|
|
218
|
+
op: keras.Operation = self.op
|
|
219
|
+
if isinstance(op, (keras.layers.Add, hgq.layers.QAdd)):
|
|
220
|
+
return inputs[0] + inputs[1]
|
|
221
|
+
elif isinstance(op, keras.layers.Concatenate):
|
|
222
|
+
axis = op.axis
|
|
223
|
+
data = np.concatenate([v._vars for v in inputs], axis=axis)
|
|
224
|
+
return FixedVariableArray(data, inputs[0].solver_options)
|
|
225
|
+
else:
|
|
226
|
+
raise TypeError(f'Unsupported layer type: {type(op)}')
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
class MirrorPool(MirrorOperationBase):
|
|
230
|
+
handles = (
|
|
231
|
+
hgq.layers.QAvgPool1D,
|
|
232
|
+
hgq.layers.QAvgPool2D,
|
|
233
|
+
hgq.layers.QAvgPool3D,
|
|
234
|
+
hgq.layers.QMaxPool1D,
|
|
235
|
+
hgq.layers.QMaxPool2D,
|
|
236
|
+
hgq.layers.QMaxPool3D,
|
|
237
|
+
hgq.layers.QGlobalAveragePooling1D,
|
|
238
|
+
hgq.layers.QGlobalMaxPooling1D,
|
|
239
|
+
hgq.layers.QGlobalAveragePooling2D,
|
|
240
|
+
hgq.layers.QGlobalMaxPooling2D,
|
|
241
|
+
hgq.layers.QGlobalAveragePooling3D,
|
|
242
|
+
hgq.layers.QGlobalMaxPooling3D,
|
|
243
|
+
keras.layers.AveragePooling1D,
|
|
244
|
+
keras.layers.AveragePooling2D,
|
|
245
|
+
keras.layers.AveragePooling3D,
|
|
246
|
+
keras.layers.MaxPooling1D,
|
|
247
|
+
keras.layers.MaxPooling2D,
|
|
248
|
+
keras.layers.MaxPooling3D,
|
|
249
|
+
keras.layers.GlobalAveragePooling1D,
|
|
250
|
+
keras.layers.GlobalMaxPooling1D,
|
|
251
|
+
keras.layers.GlobalAveragePooling2D,
|
|
252
|
+
keras.layers.GlobalMaxPooling2D,
|
|
253
|
+
keras.layers.GlobalAveragePooling3D,
|
|
254
|
+
keras.layers.GlobalMaxPooling3D,
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
def call(self, inputs: FixedVariableArray) -> FixedVariableArray:
|
|
258
|
+
cname = self.op.__class__.__name__
|
|
259
|
+
if 'Max' in cname:
|
|
260
|
+
op = 'max'
|
|
261
|
+
else:
|
|
262
|
+
assert 'Average' in cname, f'Unsupported global pooling layer: {cname}'
|
|
263
|
+
op = 'avg'
|
|
264
|
+
|
|
265
|
+
data_format = self.op.data_format
|
|
266
|
+
if data_format == 'channels_first':
|
|
267
|
+
inputs = np.moveaxis(inputs, 1, -1) # type: ignore
|
|
268
|
+
|
|
269
|
+
if isinstance(self.op, BaseGlobalPooling):
|
|
270
|
+
pool_dim = self.op.input_spec.ndim - 2 # type: ignore
|
|
271
|
+
axis = tuple(range(pool_dim))
|
|
272
|
+
keepdims = self.op.keepdims
|
|
273
|
+
|
|
274
|
+
if op == 'max':
|
|
275
|
+
out = np.amax(inputs, axis=axis, keepdims=keepdims) # type: ignore
|
|
276
|
+
elif op == 'avg':
|
|
277
|
+
pool_size = prod(inputs.shape[:-1])
|
|
278
|
+
out = np.sum(inputs, axis=axis, keepdims=keepdims) / pool_size # type: ignore
|
|
279
|
+
else:
|
|
280
|
+
assert isinstance(self.op, BasePooling), f'Unsupported pooling layer: {type(self.op)}'
|
|
281
|
+
pool_size = self.op.pool_size
|
|
282
|
+
strides = self.op.strides
|
|
283
|
+
padding = self.op.padding
|
|
284
|
+
pool_dim = len(pool_size)
|
|
285
|
+
out = pool(
|
|
286
|
+
inputs,
|
|
287
|
+
pool_size=pool_size,
|
|
288
|
+
strides=strides,
|
|
289
|
+
padding=padding,
|
|
290
|
+
pool_type=op,
|
|
291
|
+
)
|
|
292
|
+
if data_format == 'channels_first':
|
|
293
|
+
out = np.moveaxis(out, -1, 1) # type: ignore
|
|
294
|
+
|
|
295
|
+
return out # type: ignore
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
class MirrorRepeatVector(MirrorOperationBase):
|
|
299
|
+
handles = (keras.layers.RepeatVector,)
|
|
300
|
+
|
|
301
|
+
def call(self, inputs: FixedVariableArray) -> FixedVariableArray:
|
|
302
|
+
layer: keras.layers.RepeatVector = self.op
|
|
303
|
+
if layer.n == 1:
|
|
304
|
+
return inputs
|
|
305
|
+
# return FixedVariableArray(np.repeat(inputs._vars, layer.n, axis=0), inputs.solver_options)
|
|
306
|
+
return np.repeat(inputs[None], layer.n, axis=0)[0] # type: ignore
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
class MirrorGetItem(MirrorOperationBase):
|
|
310
|
+
handles = (GetItem,)
|
|
311
|
+
|
|
312
|
+
def call(self, x: FixedVariableArray, key):
|
|
313
|
+
if isinstance(key, list):
|
|
314
|
+
key = tuple(key)
|
|
315
|
+
return x[None][key][0]
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
class MirrorSum(MirrorOperationBase):
|
|
319
|
+
handles = (Sum,)
|
|
320
|
+
|
|
321
|
+
def call(self, x: FixedVariableArray, axis=None, keepdims=False):
|
|
322
|
+
return np.sum(x[None], axis=axis, keepdims=keepdims)[0] # type: ignore
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
class MirrorQSum(MirrorOperationBase):
|
|
326
|
+
handles = (QSum,)
|
|
327
|
+
|
|
328
|
+
def call(self, x: FixedVariableArray):
|
|
329
|
+
layer: QSum = self.op
|
|
330
|
+
axes, scale, keepdims = layer.axes, layer.scale, layer.keepdims
|
|
331
|
+
return np.sum(x[None], axis=axes, keepdims=keepdims)[0] * scale # type: ignore
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
class MirrorArithmetic(MirrorOperationBase):
|
|
335
|
+
handles = (Add, Subtract, Multiply, TrueDivide, Divide)
|
|
336
|
+
|
|
337
|
+
def call(self, x1: FixedVariableArray, x2: FixedVariableArray):
|
|
338
|
+
match self.op.__class__.__name__:
|
|
339
|
+
case 'Add':
|
|
340
|
+
return x1 + x2
|
|
341
|
+
case 'Subtract':
|
|
342
|
+
return x1 - x2
|
|
343
|
+
case 'Multiply':
|
|
344
|
+
return x1 * x2
|
|
345
|
+
case 'TrueDivide' | 'Divide':
|
|
346
|
+
return x1 / x2
|
|
347
|
+
case _:
|
|
348
|
+
raise TypeError(f'Unsupported arithmetic operation: {type(self.op)}')
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
class MirrorConcatenate(MirrorOperationBase):
|
|
352
|
+
handles = (Concatenate,)
|
|
353
|
+
|
|
354
|
+
def call(self, xs: Sequence[FixedVariableArray]):
|
|
355
|
+
axis = self.op.axis
|
|
356
|
+
# return backend.numpy.concatenate(xs, axis=self.axis)
|
|
357
|
+
# return FixedVariableArray(np.concatenate([x._vars[None] for x in xs], axis=axis)[0], xs[0].solver_options)
|
|
358
|
+
return np.concatenate([x[None] for x in xs], axis=axis)[0] # type: ignore
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
class MirrorRepeat(MirrorOperationBase):
|
|
362
|
+
handles = (Repeat,)
|
|
363
|
+
|
|
364
|
+
def call(self, x: FixedVariableArray):
|
|
365
|
+
repeats, axis = self.op.repeats, self.op.axis
|
|
366
|
+
# return FixedVariableArray(np.repeat(x._vars[None], repeats, axis=axis)[0], x.solver_options)
|
|
367
|
+
return np.repeat(x[None], repeats, axis=axis)[0] # type: ignore
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
class MirrorTranspose(MirrorOperationBase):
|
|
371
|
+
handles = (Transpose,)
|
|
372
|
+
|
|
373
|
+
def call(self, x: FixedVariableArray):
|
|
374
|
+
axes = self.op.axes
|
|
375
|
+
return np.transpose(x, axes) # type: ignore
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
class MirrorMoveaxis(MirrorOperationBase):
|
|
379
|
+
handles = (Moveaxis,)
|
|
380
|
+
|
|
381
|
+
def call(self, x: FixedVariableArray):
|
|
382
|
+
source, destination = self.op.source, self.op.destination
|
|
383
|
+
return np.moveaxis(x[None], source, destination)[0] # type: ignore
|
da4ml/trace/__init__.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from .fixed_variable import HWConfig
|
|
2
|
-
from .fixed_variable_array import FixedVariableArray
|
|
2
|
+
from .fixed_variable_array import FixedVariableArray, FixedVariableArrayInput
|
|
3
3
|
from .pipeline import to_pipeline
|
|
4
4
|
from .tracer import comb_trace
|
|
5
5
|
|
|
6
|
-
__all__ = ['to_pipeline', 'comb_trace', 'FixedVariableArray', 'HWConfig']
|
|
6
|
+
__all__ = ['to_pipeline', 'comb_trace', 'FixedVariableArray', 'HWConfig', 'FixedVariableArrayInput']
|