mct-nightly 2.0.0.20240411.406__py3-none-any.whl → 2.0.0.20240413.406__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. {mct_nightly-2.0.0.20240411.406.dist-info → mct_nightly-2.0.0.20240413.406.dist-info}/METADATA +2 -2
  2. {mct_nightly-2.0.0.20240411.406.dist-info → mct_nightly-2.0.0.20240413.406.dist-info}/RECORD +30 -21
  3. model_compression_toolkit/__init__.py +1 -1
  4. model_compression_toolkit/constants.py +4 -0
  5. model_compression_toolkit/core/common/graph/base_graph.py +3 -2
  6. model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +6 -1
  7. model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +5 -0
  8. model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +5 -0
  9. model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +29 -11
  10. model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +1 -1
  11. model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +1 -1
  12. model_compression_toolkit/gptq/keras/quantization_facade.py +6 -1
  13. model_compression_toolkit/gptq/pytorch/quantization_facade.py +6 -1
  14. model_compression_toolkit/metadata.py +29 -0
  15. model_compression_toolkit/ptq/keras/quantization_facade.py +6 -2
  16. model_compression_toolkit/ptq/pytorch/quantization_facade.py +6 -1
  17. model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +4 -1
  18. model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +1 -0
  19. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +12 -2
  20. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +16 -0
  21. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +210 -0
  22. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +129 -0
  23. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +111 -0
  24. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +16 -0
  25. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +207 -0
  26. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +129 -0
  27. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +110 -0
  28. {mct_nightly-2.0.0.20240411.406.dist-info → mct_nightly-2.0.0.20240413.406.dist-info}/LICENSE.md +0 -0
  29. {mct_nightly-2.0.0.20240411.406.dist-info → mct_nightly-2.0.0.20240413.406.dist-info}/WHEEL +0 -0
  30. {mct_nightly-2.0.0.20240411.406.dist-info → mct_nightly-2.0.0.20240413.406.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,129 @@
1
+ # Copyright 2024 Sony Semiconductor Israel, Inc. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ import tensorflow as tf
16
+ from packaging import version
17
+
18
+ from model_compression_toolkit.defaultdict import DefaultDict
19
+ from model_compression_toolkit.constants import FOUND_SONY_CUSTOM_LAYERS
20
+ from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, KERAS_KERNEL, BIAS_ATTR, \
21
+ KERAS_DEPTHWISE_KERNEL, BIAS
22
+
23
+ if FOUND_SONY_CUSTOM_LAYERS:
24
+ from sony_custom_layers.keras.object_detection.ssd_post_process import SSDPostProcess
25
+
26
+ if version.parse(tf.__version__) >= version.parse("2.13"):
27
+ from keras.src.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
28
+ MaxPooling2D, Activation, ReLU, Add, Subtract, Multiply, PReLU, Flatten, Cropping2D, LeakyReLU, Permute, \
29
+ Conv2DTranspose
30
+ else:
31
+ from keras.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
32
+ MaxPooling2D, Activation, ReLU, Add, Subtract, Multiply, PReLU, Flatten, Cropping2D, LeakyReLU, Permute, \
33
+ Conv2DTranspose
34
+
35
+ from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v2_lut.tp_model import get_tp_model
36
+ import model_compression_toolkit as mct
37
+ from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v2_lut import __version__ as TPC_VERSION
38
+
39
+ tp = mct.target_platform
40
+
41
+
42
+ def get_keras_tpc() -> tp.TargetPlatformCapabilities:
43
+ """
44
+ get a Keras TargetPlatformCapabilities object with default operation sets to layers mapping.
45
+ Returns: a Keras TargetPlatformCapabilities object for the given TargetPlatformModel.
46
+ """
47
+ imx500_tpc_tp_model = get_tp_model()
48
+ return generate_keras_tpc(name='imx500_tpc_keras_tpc', tp_model=imx500_tpc_tp_model)
49
+
50
+
51
+ def generate_keras_tpc(name: str, tp_model: tp.TargetPlatformModel):
52
+ """
53
+ Generates a TargetPlatformCapabilities object with default operation sets to layers mapping.
54
+
55
+ Args:
56
+ name: Name of the TargetPlatformCapabilities.
57
+ tp_model: TargetPlatformModel object.
58
+
59
+ Returns: a TargetPlatformCapabilities object for the given TargetPlatformModel.
60
+ """
61
+
62
+ keras_tpc = tp.TargetPlatformCapabilities(tp_model, name=name, version=TPC_VERSION)
63
+
64
+ no_quant_list = [Reshape,
65
+ tf.reshape,
66
+ Permute,
67
+ tf.transpose,
68
+ Flatten,
69
+ Cropping2D,
70
+ ZeroPadding2D,
71
+ Dropout,
72
+ MaxPooling2D,
73
+ tf.split,
74
+ tf.quantization.fake_quant_with_min_max_vars,
75
+ tf.math.argmax,
76
+ tf.shape,
77
+ tf.math.equal,
78
+ tf.gather,
79
+ tf.cast,
80
+ tf.unstack,
81
+ tf.compat.v1.gather,
82
+ tf.nn.top_k,
83
+ tf.__operators__.getitem,
84
+ tf.image.combined_non_max_suppression,
85
+ tf.compat.v1.shape]
86
+
87
+ if FOUND_SONY_CUSTOM_LAYERS:
88
+ no_quant_list.append(SSDPostProcess)
89
+
90
+ with keras_tpc:
91
+ tp.OperationsSetToLayers("NoQuantization", no_quant_list)
92
+
93
+ tp.OperationsSetToLayers("Conv",
94
+ [Conv2D,
95
+ DepthwiseConv2D,
96
+ Conv2DTranspose,
97
+ tf.nn.conv2d,
98
+ tf.nn.depthwise_conv2d,
99
+ tf.nn.conv2d_transpose],
100
+ # we provide attributes mapping that maps each layer type in the operations set
101
+ # that has weights attributes with provided quantization config (in the tp model) to
102
+ # its framework-specific attribute name.
103
+ # note that a DefaultDict should be provided if not all the layer types in the
104
+ # operation set are provided separately in the mapping.
105
+ attr_mapping={
106
+ KERNEL_ATTR: DefaultDict({
107
+ DepthwiseConv2D: KERAS_DEPTHWISE_KERNEL,
108
+ tf.nn.depthwise_conv2d: KERAS_DEPTHWISE_KERNEL}, default_value=KERAS_KERNEL),
109
+ BIAS_ATTR: DefaultDict(default_value=BIAS)})
110
+ tp.OperationsSetToLayers("FullyConnected", [Dense],
111
+ attr_mapping={KERNEL_ATTR: DefaultDict(default_value=KERAS_KERNEL),
112
+ BIAS_ATTR: DefaultDict(default_value=BIAS)})
113
+ tp.OperationsSetToLayers("AnyReLU", [tf.nn.relu,
114
+ tf.nn.relu6,
115
+ tf.nn.leaky_relu,
116
+ ReLU,
117
+ LeakyReLU,
118
+ tp.LayerFilterParams(Activation, activation="relu"),
119
+ tp.LayerFilterParams(Activation, activation="leaky_relu")])
120
+ tp.OperationsSetToLayers("Add", [tf.add, Add])
121
+ tp.OperationsSetToLayers("Sub", [tf.subtract, Subtract])
122
+ tp.OperationsSetToLayers("Mul", [tf.math.multiply, Multiply])
123
+ tp.OperationsSetToLayers("Div", [tf.math.divide])
124
+ tp.OperationsSetToLayers("PReLU", [PReLU])
125
+ tp.OperationsSetToLayers("Swish", [tf.nn.swish, tp.LayerFilterParams(Activation, activation="swish")])
126
+ tp.OperationsSetToLayers("Sigmoid", [tf.nn.sigmoid, tp.LayerFilterParams(Activation, activation="sigmoid")])
127
+ tp.OperationsSetToLayers("Tanh", [tf.nn.tanh, tp.LayerFilterParams(Activation, activation="tanh")])
128
+
129
+ return keras_tpc
@@ -0,0 +1,110 @@
1
+ # Copyright 2024 Sony Semiconductor Israel, Inc. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ import operator
17
+
18
+ import torch
19
+ from torch import add, sub, mul, div, flatten, reshape, split, unsqueeze, dropout, sigmoid, tanh, chunk, unbind, topk, \
20
+ gather, equal, transpose, permute, argmax, squeeze
21
+ from torch.nn import Conv2d, Linear, BatchNorm2d, ConvTranspose2d
22
+ from torch.nn import Dropout, Flatten, Hardtanh
23
+ from torch.nn import ReLU, ReLU6, PReLU, SiLU, Sigmoid, Tanh, Hardswish, LeakyReLU
24
+ from torch.nn.functional import relu, relu6, prelu, silu, hardtanh, hardswish, leaky_relu
25
+
26
+ from model_compression_toolkit.defaultdict import DefaultDict
27
+ from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, PYTORCH_KERNEL, BIAS_ATTR, \
28
+ BIAS
29
+ from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v2_lut.tp_model import get_tp_model
30
+ import model_compression_toolkit as mct
31
+ from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v2_lut import __version__ as TPC_VERSION
32
+
33
+ tp = mct.target_platform
34
+
35
+
36
+ def get_pytorch_tpc() -> tp.TargetPlatformCapabilities:
37
+ """
38
+ get a Pytorch TargetPlatformCapabilities object with default operation sets to layers mapping.
39
+ Returns: a Pytorch TargetPlatformCapabilities object for the given TargetPlatformModel.
40
+ """
41
+ imx500_tpc_tp_model = get_tp_model()
42
+ return generate_pytorch_tpc(name='imx500_tpc_pytorch_tpc', tp_model=imx500_tpc_tp_model)
43
+
44
+
45
+ def generate_pytorch_tpc(name: str, tp_model: tp.TargetPlatformModel):
46
+ """
47
+ Generates a TargetPlatformCapabilities object with default operation sets to layers mapping.
48
+ Args:
49
+ name: Name of the TargetPlatformModel.
50
+ tp_model: TargetPlatformModel object.
51
+ Returns: a TargetPlatformCapabilities object for the given TargetPlatformModel.
52
+ """
53
+
54
+ pytorch_tpc = tp.TargetPlatformCapabilities(tp_model,
55
+ name=name,
56
+ version=TPC_VERSION)
57
+
58
+ # we provide attributes mapping that maps each layer type in the operations set
59
+ # that has weights attributes with provided quantization config (in the tp model) to
60
+ # its framework-specific attribute name.
61
+ # note that a DefaultDict should be provided if not all the layer types in the
62
+ # operation set are provided separately in the mapping.
63
+ pytorch_linear_attr_mapping = {KERNEL_ATTR: DefaultDict(default_value=PYTORCH_KERNEL),
64
+ BIAS_ATTR: DefaultDict(default_value=BIAS)}
65
+
66
+ with pytorch_tpc:
67
+ tp.OperationsSetToLayers("NoQuantization", [Dropout,
68
+ Flatten,
69
+ dropout,
70
+ flatten,
71
+ split,
72
+ operator.getitem,
73
+ reshape,
74
+ unsqueeze,
75
+ BatchNorm2d,
76
+ chunk,
77
+ unbind,
78
+ torch.Tensor.size,
79
+ permute,
80
+ transpose,
81
+ equal,
82
+ argmax,
83
+ gather,
84
+ topk,
85
+ squeeze])
86
+
87
+ tp.OperationsSetToLayers("Conv", [Conv2d, ConvTranspose2d],
88
+ attr_mapping=pytorch_linear_attr_mapping)
89
+ tp.OperationsSetToLayers("FullyConnected", [Linear],
90
+ attr_mapping=pytorch_linear_attr_mapping)
91
+ tp.OperationsSetToLayers("AnyReLU", [torch.relu,
92
+ ReLU,
93
+ ReLU6,
94
+ LeakyReLU,
95
+ relu,
96
+ relu6,
97
+ leaky_relu,
98
+ tp.LayerFilterParams(Hardtanh, min_val=0),
99
+ tp.LayerFilterParams(hardtanh, min_val=0)])
100
+
101
+ tp.OperationsSetToLayers("Add", [operator.add, add])
102
+ tp.OperationsSetToLayers("Sub", [operator.sub, sub])
103
+ tp.OperationsSetToLayers("Mul", [operator.mul, mul])
104
+ tp.OperationsSetToLayers("Div", [operator.truediv, div])
105
+ tp.OperationsSetToLayers("PReLU", [PReLU, prelu])
106
+ tp.OperationsSetToLayers("Swish", [SiLU, silu, Hardswish, hardswish])
107
+ tp.OperationsSetToLayers("Sigmoid", [Sigmoid, sigmoid])
108
+ tp.OperationsSetToLayers("Tanh", [Tanh, tanh])
109
+
110
+ return pytorch_tpc