mct-nightly 2.0.0.20240414.858__py3-none-any.whl → 2.0.0.20240416.403__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct_nightly-2.0.0.20240414.858.dist-info → mct_nightly-2.0.0.20240416.403.dist-info}/METADATA +1 -1
- {mct_nightly-2.0.0.20240414.858.dist-info → mct_nightly-2.0.0.20240416.403.dist-info}/RECORD +11 -11
- model_compression_toolkit/__init__.py +1 -1
- model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +40 -14
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +5 -3
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +4 -4
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +5 -3
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +4 -4
- {mct_nightly-2.0.0.20240414.858.dist-info → mct_nightly-2.0.0.20240416.403.dist-info}/LICENSE.md +0 -0
- {mct_nightly-2.0.0.20240414.858.dist-info → mct_nightly-2.0.0.20240416.403.dist-info}/WHEEL +0 -0
- {mct_nightly-2.0.0.20240414.858.dist-info → mct_nightly-2.0.0.20240416.403.dist-info}/top_level.txt +0 -0
{mct_nightly-2.0.0.20240414.858.dist-info → mct_nightly-2.0.0.20240416.403.dist-info}/RECORD
RENAMED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
model_compression_toolkit/__init__.py,sha256=
|
|
1
|
+
model_compression_toolkit/__init__.py,sha256=KTYCxOzzo2m8dPzI8ywsREykhLFg_6-dmbX-KU-IqPM,1573
|
|
2
2
|
model_compression_toolkit/constants.py,sha256=f9at1H_-vb5nvdHRmAHUco4ja4_QermK6yu0N9qbRGE,3723
|
|
3
3
|
model_compression_toolkit/defaultdict.py,sha256=LSc-sbZYXENMCw3U9F4GiXuv67IKpdn0Qm7Fr11jy-4,2277
|
|
4
4
|
model_compression_toolkit/logger.py,sha256=3DByV41XHRR3kLTJNbpaMmikL8icd9e1N-nkQAY9oDk,4567
|
|
@@ -109,7 +109,7 @@ model_compression_toolkit/core/common/quantization/quantize_graph_weights.py,sha
|
|
|
109
109
|
model_compression_toolkit/core/common/quantization/quantize_node.py,sha256=cdzGNWfT4MRogIU8ehs0tr3lVjnzAI-jeoS9b4TwVBo,2854
|
|
110
110
|
model_compression_toolkit/core/common/quantization/set_node_quantization_config.py,sha256=9BEv2l0z2trDEsr40VB8tO3ToBA_b2sd_jH9uqZ5Wo8,11503
|
|
111
111
|
model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py,sha256=eCDGwsWYLU6z7qbEVb4TozMW_nd5VEP_iCJ6PcvyEPw,1486
|
|
112
|
-
model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py,sha256=
|
|
112
|
+
model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py,sha256=TUJuSpX8pcsIPbJ6z_YGWgD_uafqlKRJcpsTIFpjMKU,19936
|
|
113
113
|
model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py,sha256=HSbAlDKXZMn8BtQQGL8TnlXvO2f_2oTLXAK1khraX7g,7410
|
|
114
114
|
model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py,sha256=9gnfJV89jpGwAx8ImJ5E9NjCv3lDtbyulP4OtgWb62M,1772
|
|
115
115
|
model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py,sha256=BiwDqt5CeU6CW0Qusy3LwWhFtf2J9BvSuGMsTsG6rSw,8538
|
|
@@ -444,12 +444,12 @@ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_
|
|
|
444
444
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py,sha256=X853xDEF-3rcPoqxbrlYN28vvW3buSdM36c_eN_LKx8,5758
|
|
445
445
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py,sha256=vKWAoQ2KkhuptS5HZB50zHG6KY8wHpHTxPugw_nGCRo,717
|
|
446
446
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py,sha256=8FZjOCaQRwrQLbtmzNrrRj2-VyZMUGzsIWKIDpGVEoQ,10947
|
|
447
|
-
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py,sha256=
|
|
448
|
-
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py,sha256=
|
|
447
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py,sha256=QVIOc_DrFHBb81q3N8Fmx5GkOBviWsulxEwHpsyFik4,6570
|
|
448
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py,sha256=jAyTXhcChO124odtWC3bYKRH4ZyqLPkKQluJFOoyPIM,5726
|
|
449
449
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py,sha256=wUk4Xsg7jpxOWYjq2K3WUwLcI185p_sVPK-ttG0ydhA,721
|
|
450
450
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py,sha256=T6Hp_Rk15SAz22g_SWDyHJecBpBAjxKt3ezuVEYf4LE,10680
|
|
451
|
-
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py,sha256=
|
|
452
|
-
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py,sha256=
|
|
451
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py,sha256=n9HA61-bFm8g0rals9aTvH7i09EU4B788nymFofLwkw,6578
|
|
452
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py,sha256=dFQjzFlLDwoUqKNP1at1fS1N1WJadSSasRyzHl6vaB8,5733
|
|
453
453
|
model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
454
454
|
model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py,sha256=lnhJcwvTF0t7ybeiTleIS1p0aD8xzFZxVPx4ISk5uWQ,2090
|
|
455
455
|
model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py,sha256=UUvUCcTots_sehdRnDfgkaE8WPQ7dPbeuhDF4Qy2nzw,1510
|
|
@@ -480,8 +480,8 @@ model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py,sha
|
|
|
480
480
|
model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py,sha256=MVwXNymmFRB2NXIBx4e2mdJ1RfoHxRPYRgjb1MQP5kY,1797
|
|
481
481
|
model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
482
482
|
model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py,sha256=7bbzqJN8ZAycVDvZr_5xC-niTAR5df8f03Kooev_pfg,3047
|
|
483
|
-
mct_nightly-2.0.0.
|
|
484
|
-
mct_nightly-2.0.0.
|
|
485
|
-
mct_nightly-2.0.0.
|
|
486
|
-
mct_nightly-2.0.0.
|
|
487
|
-
mct_nightly-2.0.0.
|
|
483
|
+
mct_nightly-2.0.0.20240416.403.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
|
|
484
|
+
mct_nightly-2.0.0.20240416.403.dist-info/METADATA,sha256=q5Rcs-EVPfnHC_czeMd1EmA5PcrczG399SifWAKW644,18795
|
|
485
|
+
mct_nightly-2.0.0.20240416.403.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
486
|
+
mct_nightly-2.0.0.20240416.403.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
|
|
487
|
+
mct_nightly-2.0.0.20240416.403.dist-info/RECORD,,
|
|
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
|
|
|
27
27
|
from model_compression_toolkit import pruning
|
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
|
29
29
|
|
|
30
|
-
__version__ = "2.0.0.
|
|
30
|
+
__version__ = "2.0.0.20240416.000403"
|
model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py
CHANGED
|
@@ -89,8 +89,8 @@ def _lp_error_histogram(q_bins: np.ndarray,
|
|
|
89
89
|
|
|
90
90
|
|
|
91
91
|
def _kl_error_function(x: np.ndarray,
|
|
92
|
-
range_min:
|
|
93
|
-
range_max:
|
|
92
|
+
range_min: np.ndarray,
|
|
93
|
+
range_max: np.ndarray,
|
|
94
94
|
n_bins: int = 2048,
|
|
95
95
|
n_bits: int = 8) -> np.float32:
|
|
96
96
|
"""
|
|
@@ -148,7 +148,8 @@ def _kl_error_function_wrapper(x: np.ndarray,
|
|
|
148
148
|
range_min: np.ndarray,
|
|
149
149
|
range_max: np.ndarray,
|
|
150
150
|
n_bins: int = 2048,
|
|
151
|
-
n_bits: int = 8
|
|
151
|
+
n_bits: int = 8,
|
|
152
|
+
per_channel: int = False) -> np.ndarray:
|
|
152
153
|
"""
|
|
153
154
|
Computes the error function between a tensor and its quantized version for each channel.
|
|
154
155
|
The error is based on the KL-divergence between the distributions.
|
|
@@ -161,6 +162,7 @@ def _kl_error_function_wrapper(x: np.ndarray,
|
|
|
161
162
|
range_max: Array specifying the maximum bound of the quantization range for each channel.
|
|
162
163
|
n_bins: Number of bins for the float histogram.
|
|
163
164
|
n_bits: Number of bits used for quantization.
|
|
165
|
+
per_channel: Whether quantization is done per-channel.
|
|
164
166
|
|
|
165
167
|
Returns:
|
|
166
168
|
An array containing the KL-divergence between the float and quantized histograms of the tensor for each channel.
|
|
@@ -168,8 +170,11 @@ def _kl_error_function_wrapper(x: np.ndarray,
|
|
|
168
170
|
"""
|
|
169
171
|
|
|
170
172
|
error_list = []
|
|
171
|
-
|
|
172
|
-
|
|
173
|
+
if per_channel:
|
|
174
|
+
for j in range(x.shape[0]): # iterate all channels of the tensor.
|
|
175
|
+
error_list.append(_kl_error_function(x[j], range_min[j], range_max[j], n_bins=n_bins, n_bits=n_bits))
|
|
176
|
+
else:
|
|
177
|
+
error_list.append(_kl_error_function(x, range_min, range_max, n_bins=n_bins, n_bits=n_bits))
|
|
173
178
|
return np.asarray(error_list)
|
|
174
179
|
|
|
175
180
|
|
|
@@ -177,8 +182,8 @@ def _kl_error_histogram(q_bins: np.ndarray,
|
|
|
177
182
|
q_count: np.ndarray,
|
|
178
183
|
bins: np.ndarray,
|
|
179
184
|
counts: np.ndarray,
|
|
180
|
-
range_min:
|
|
181
|
-
range_max:
|
|
185
|
+
range_min: np.ndarray,
|
|
186
|
+
range_max: np.ndarray) -> np.float32:
|
|
182
187
|
"""
|
|
183
188
|
Compute the error function between a histogram to its quantized version.
|
|
184
189
|
The error is computed based on the KL-divergence the distributions have.
|
|
@@ -241,8 +246,8 @@ def _kl_error_histogram(q_bins: np.ndarray,
|
|
|
241
246
|
|
|
242
247
|
|
|
243
248
|
def _get_bins_indices_from_range(bins: np.ndarray,
|
|
244
|
-
range_min:
|
|
245
|
-
range_max:
|
|
249
|
+
range_min: np.ndarray,
|
|
250
|
+
range_max: np.ndarray) -> Tuple[int, int]:
|
|
246
251
|
"""
|
|
247
252
|
For bins and a threshold, compute the first and last bins in between the threshold
|
|
248
253
|
ranges.
|
|
@@ -262,7 +267,7 @@ def _get_bins_indices_from_range(bins: np.ndarray,
|
|
|
262
267
|
return first_bin_idx, last_bin_idx
|
|
263
268
|
|
|
264
269
|
|
|
265
|
-
def _is_range_valid(bins: np.ndarray, range_min:
|
|
270
|
+
def _is_range_valid(bins: np.ndarray, range_min: np.ndarray, range_max: np.ndarray) -> bool:
|
|
266
271
|
"""
|
|
267
272
|
Check whether there are some bins from a numpy array of bins that are in between
|
|
268
273
|
a threshold range or not.
|
|
@@ -387,15 +392,36 @@ def get_threshold_selection_tensor_error_function(quantization_method: Quantizat
|
|
|
387
392
|
|
|
388
393
|
Returns: a Callable method that calculates the error between a tensor and a quantized tensor.
|
|
389
394
|
"""
|
|
395
|
+
if quant_error_method == qc.QuantizationErrorMethod.KL:
|
|
396
|
+
if axis is None:
|
|
397
|
+
# per-tensor
|
|
398
|
+
if quantization_method == QuantizationMethod.UNIFORM:
|
|
399
|
+
return lambda x, y, threshold: _kl_error_function_wrapper(x, range_min=threshold[0],
|
|
400
|
+
range_max=threshold[1],
|
|
401
|
+
n_bits=n_bits,
|
|
402
|
+
per_channel=False)
|
|
403
|
+
else:
|
|
404
|
+
return lambda x, y, threshold: _kl_error_function_wrapper(x, range_min=0 if not signed else -threshold,
|
|
405
|
+
range_max=threshold,
|
|
406
|
+
n_bits=n_bits,
|
|
407
|
+
per_channel=False)
|
|
408
|
+
else:
|
|
409
|
+
# per-channel
|
|
410
|
+
if quantization_method == QuantizationMethod.UNIFORM:
|
|
411
|
+
return lambda x, y, threshold: _kl_error_function_wrapper(x, range_min=threshold[:, 0],
|
|
412
|
+
range_max=threshold[:, 1],
|
|
413
|
+
n_bits=n_bits,
|
|
414
|
+
per_channel=True)
|
|
415
|
+
else:
|
|
416
|
+
return lambda x, y, threshold: _kl_error_function_wrapper(x, range_min=0 if not signed else -threshold,
|
|
417
|
+
range_max=threshold,
|
|
418
|
+
n_bits=n_bits,
|
|
419
|
+
per_channel=True)
|
|
390
420
|
|
|
391
421
|
quant_method_error_function_mapping = {
|
|
392
422
|
qc.QuantizationErrorMethod.MSE: lambda x, y, threshold: compute_mse(x, y, norm=norm, axis=axis),
|
|
393
423
|
qc.QuantizationErrorMethod.MAE: lambda x, y, threshold: compute_mae(x, y, norm=norm, axis=axis),
|
|
394
424
|
qc.QuantizationErrorMethod.LP: lambda x, y, threshold: compute_lp_norm(x, y, p=p, norm=norm, axis=axis),
|
|
395
|
-
qc.QuantizationErrorMethod.KL:
|
|
396
|
-
lambda x, y, threshold: _kl_error_function_wrapper(x, range_min=threshold[:,0], range_max=threshold[:,1],
|
|
397
|
-
n_bits=n_bits) if quantization_method == QuantizationMethod.UNIFORM
|
|
398
|
-
else _kl_error_function_wrapper(x, range_min=0 if not signed else -threshold, range_max=threshold, n_bits=n_bits)
|
|
399
425
|
}
|
|
400
426
|
|
|
401
427
|
return quant_method_error_function_mapping[quant_error_method]
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py
CHANGED
|
@@ -26,11 +26,11 @@ if FOUND_SONY_CUSTOM_LAYERS:
|
|
|
26
26
|
if version.parse(tf.__version__) >= version.parse("2.13"):
|
|
27
27
|
from keras.src.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
|
|
28
28
|
MaxPooling2D, Activation, ReLU, Add, Subtract, Multiply, PReLU, Flatten, Cropping2D, LeakyReLU, Permute, \
|
|
29
|
-
Conv2DTranspose
|
|
29
|
+
Conv2DTranspose, Identity
|
|
30
30
|
else:
|
|
31
31
|
from keras.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
|
|
32
32
|
MaxPooling2D, Activation, ReLU, Add, Subtract, Multiply, PReLU, Flatten, Cropping2D, LeakyReLU, Permute, \
|
|
33
|
-
Conv2DTranspose
|
|
33
|
+
Conv2DTranspose, Identity
|
|
34
34
|
|
|
35
35
|
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v2.tp_model import get_tp_model
|
|
36
36
|
import model_compression_toolkit as mct
|
|
@@ -62,7 +62,9 @@ def generate_keras_tpc(name: str, tp_model: tp.TargetPlatformModel):
|
|
|
62
62
|
|
|
63
63
|
keras_tpc = tp.TargetPlatformCapabilities(tp_model, name=name, version=TPC_VERSION)
|
|
64
64
|
|
|
65
|
-
no_quant_list = [
|
|
65
|
+
no_quant_list = [Identity,
|
|
66
|
+
tf.identity,
|
|
67
|
+
Reshape,
|
|
66
68
|
tf.reshape,
|
|
67
69
|
Permute,
|
|
68
70
|
tf.transpose,
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py
CHANGED
|
@@ -18,8 +18,8 @@ import operator
|
|
|
18
18
|
import torch
|
|
19
19
|
from torch import add, sub, mul, div, flatten, reshape, split, unsqueeze, dropout, sigmoid, tanh, chunk, unbind, topk, \
|
|
20
20
|
gather, equal, transpose, permute, argmax, squeeze
|
|
21
|
-
from torch.nn import Conv2d, Linear,
|
|
22
|
-
from torch.nn import Dropout, Flatten, Hardtanh
|
|
21
|
+
from torch.nn import Conv2d, Linear, ConvTranspose2d
|
|
22
|
+
from torch.nn import Dropout, Flatten, Hardtanh, Identity
|
|
23
23
|
from torch.nn import ReLU, ReLU6, PReLU, SiLU, Sigmoid, Tanh, Hardswish, LeakyReLU
|
|
24
24
|
from torch.nn.functional import relu, relu6, prelu, silu, hardtanh, hardswish, leaky_relu
|
|
25
25
|
|
|
@@ -65,7 +65,8 @@ def generate_pytorch_tpc(name: str, tp_model: tp.TargetPlatformModel):
|
|
|
65
65
|
BIAS_ATTR: DefaultDict(default_value=BIAS)}
|
|
66
66
|
|
|
67
67
|
with pytorch_tpc:
|
|
68
|
-
tp.OperationsSetToLayers("NoQuantization", [
|
|
68
|
+
tp.OperationsSetToLayers("NoQuantization", [Identity,
|
|
69
|
+
Dropout,
|
|
69
70
|
Flatten,
|
|
70
71
|
dropout,
|
|
71
72
|
flatten,
|
|
@@ -73,7 +74,6 @@ def generate_pytorch_tpc(name: str, tp_model: tp.TargetPlatformModel):
|
|
|
73
74
|
operator.getitem,
|
|
74
75
|
reshape,
|
|
75
76
|
unsqueeze,
|
|
76
|
-
BatchNorm2d,
|
|
77
77
|
chunk,
|
|
78
78
|
unbind,
|
|
79
79
|
torch.Tensor.size,
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py
CHANGED
|
@@ -26,11 +26,11 @@ if FOUND_SONY_CUSTOM_LAYERS:
|
|
|
26
26
|
if version.parse(tf.__version__) >= version.parse("2.13"):
|
|
27
27
|
from keras.src.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
|
|
28
28
|
MaxPooling2D, Activation, ReLU, Add, Subtract, Multiply, PReLU, Flatten, Cropping2D, LeakyReLU, Permute, \
|
|
29
|
-
Conv2DTranspose
|
|
29
|
+
Conv2DTranspose, Identity
|
|
30
30
|
else:
|
|
31
31
|
from keras.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
|
|
32
32
|
MaxPooling2D, Activation, ReLU, Add, Subtract, Multiply, PReLU, Flatten, Cropping2D, LeakyReLU, Permute, \
|
|
33
|
-
Conv2DTranspose
|
|
33
|
+
Conv2DTranspose, Identity
|
|
34
34
|
|
|
35
35
|
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v2_lut.tp_model import get_tp_model
|
|
36
36
|
import model_compression_toolkit as mct
|
|
@@ -61,7 +61,9 @@ def generate_keras_tpc(name: str, tp_model: tp.TargetPlatformModel):
|
|
|
61
61
|
|
|
62
62
|
keras_tpc = tp.TargetPlatformCapabilities(tp_model, name=name, version=TPC_VERSION)
|
|
63
63
|
|
|
64
|
-
no_quant_list = [
|
|
64
|
+
no_quant_list = [Identity,
|
|
65
|
+
tf.identity,
|
|
66
|
+
Reshape,
|
|
65
67
|
tf.reshape,
|
|
66
68
|
Permute,
|
|
67
69
|
tf.transpose,
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py
CHANGED
|
@@ -18,8 +18,8 @@ import operator
|
|
|
18
18
|
import torch
|
|
19
19
|
from torch import add, sub, mul, div, flatten, reshape, split, unsqueeze, dropout, sigmoid, tanh, chunk, unbind, topk, \
|
|
20
20
|
gather, equal, transpose, permute, argmax, squeeze
|
|
21
|
-
from torch.nn import Conv2d, Linear,
|
|
22
|
-
from torch.nn import Dropout, Flatten, Hardtanh
|
|
21
|
+
from torch.nn import Conv2d, Linear, ConvTranspose2d
|
|
22
|
+
from torch.nn import Dropout, Flatten, Hardtanh, Identity
|
|
23
23
|
from torch.nn import ReLU, ReLU6, PReLU, SiLU, Sigmoid, Tanh, Hardswish, LeakyReLU
|
|
24
24
|
from torch.nn.functional import relu, relu6, prelu, silu, hardtanh, hardswish, leaky_relu
|
|
25
25
|
|
|
@@ -64,7 +64,8 @@ def generate_pytorch_tpc(name: str, tp_model: tp.TargetPlatformModel):
|
|
|
64
64
|
BIAS_ATTR: DefaultDict(default_value=BIAS)}
|
|
65
65
|
|
|
66
66
|
with pytorch_tpc:
|
|
67
|
-
tp.OperationsSetToLayers("NoQuantization", [
|
|
67
|
+
tp.OperationsSetToLayers("NoQuantization", [Identity,
|
|
68
|
+
Dropout,
|
|
68
69
|
Flatten,
|
|
69
70
|
dropout,
|
|
70
71
|
flatten,
|
|
@@ -72,7 +73,6 @@ def generate_pytorch_tpc(name: str, tp_model: tp.TargetPlatformModel):
|
|
|
72
73
|
operator.getitem,
|
|
73
74
|
reshape,
|
|
74
75
|
unsqueeze,
|
|
75
|
-
BatchNorm2d,
|
|
76
76
|
chunk,
|
|
77
77
|
unbind,
|
|
78
78
|
torch.Tensor.size,
|
{mct_nightly-2.0.0.20240414.858.dist-info → mct_nightly-2.0.0.20240416.403.dist-info}/LICENSE.md
RENAMED
|
File without changes
|
|
File without changes
|
{mct_nightly-2.0.0.20240414.858.dist-info → mct_nightly-2.0.0.20240416.403.dist-info}/top_level.txt
RENAMED
|
File without changes
|