mct-nightly 2.2.0.20250106.546__py3-none-any.whl → 2.2.0.20250107.15510__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. {mct_nightly-2.2.0.20250106.546.dist-info → mct_nightly-2.2.0.20250107.15510.dist-info}/METADATA +1 -1
  2. {mct_nightly-2.2.0.20250106.546.dist-info → mct_nightly-2.2.0.20250107.15510.dist-info}/RECORD +43 -78
  3. model_compression_toolkit/__init__.py +1 -1
  4. model_compression_toolkit/core/__init__.py +1 -1
  5. model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +1 -1
  6. model_compression_toolkit/core/common/graph/memory_graph/cut.py +5 -2
  7. model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +25 -25
  8. model_compression_toolkit/core/common/quantization/quantization_config.py +19 -1
  9. model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +1 -33
  10. model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +2 -2
  11. model_compression_toolkit/core/keras/resource_utilization_data_facade.py +11 -1
  12. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/matmul_decomposition.py +499 -0
  13. model_compression_toolkit/core/pytorch/pytorch_implementation.py +3 -0
  14. model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +11 -3
  15. model_compression_toolkit/gptq/keras/quantization_facade.py +10 -1
  16. model_compression_toolkit/gptq/pytorch/quantization_facade.py +10 -1
  17. model_compression_toolkit/pruning/keras/pruning_facade.py +8 -2
  18. model_compression_toolkit/pruning/pytorch/pruning_facade.py +8 -2
  19. model_compression_toolkit/ptq/keras/quantization_facade.py +10 -1
  20. model_compression_toolkit/ptq/pytorch/quantization_facade.py +9 -1
  21. model_compression_toolkit/qat/__init__.py +5 -2
  22. model_compression_toolkit/qat/keras/quantization_facade.py +9 -1
  23. model_compression_toolkit/qat/pytorch/quantization_facade.py +9 -1
  24. model_compression_toolkit/target_platform_capabilities/schema/mct_current_schema.py +1 -1
  25. model_compression_toolkit/target_platform_capabilities/schema/v1.py +63 -55
  26. model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2fw.py +29 -18
  27. model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2keras.py +78 -57
  28. model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2pytorch.py +69 -54
  29. model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +2 -4
  30. model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -10
  31. model_compression_toolkit/target_platform_capabilities/tpc_io_handler.py +93 -0
  32. model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +46 -28
  33. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +6 -5
  34. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +51 -19
  35. model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +8 -4
  36. model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +19 -9
  37. model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +7 -4
  38. model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +46 -32
  39. model_compression_toolkit/xquant/keras/keras_report_utils.py +11 -3
  40. model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +10 -2
  41. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -98
  42. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -129
  43. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -108
  44. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -16
  45. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -217
  46. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -130
  47. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -109
  48. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -16
  49. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -215
  50. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -130
  51. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -110
  52. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -16
  53. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +0 -222
  54. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -132
  55. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -110
  56. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -16
  57. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +0 -219
  58. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -132
  59. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -109
  60. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/__init__.py +0 -16
  61. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +0 -246
  62. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_keras.py +0 -135
  63. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_pytorch.py +0 -113
  64. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/__init__.py +0 -16
  65. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +0 -230
  66. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py +0 -132
  67. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py +0 -110
  68. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/__init__.py +0 -16
  69. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py +0 -332
  70. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_keras.py +0 -140
  71. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_pytorch.py +0 -122
  72. model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -55
  73. model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -89
  74. model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -78
  75. model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -55
  76. model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -118
  77. model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -100
  78. {mct_nightly-2.2.0.20250106.546.dist-info → mct_nightly-2.2.0.20250107.15510.dist-info}/LICENSE.md +0 -0
  79. {mct_nightly-2.2.0.20250106.546.dist-info → mct_nightly-2.2.0.20250107.15510.dist-info}/WHEEL +0 -0
  80. {mct_nightly-2.2.0.20250106.546.dist-info → mct_nightly-2.2.0.20250107.15510.dist-info}/top_level.txt +0 -0
@@ -18,8 +18,7 @@ import model_compression_toolkit as mct
18
18
  import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
19
19
  from model_compression_toolkit.constants import FLOAT_BITWIDTH
20
20
  from model_compression_toolkit.target_platform_capabilities.constants import BIAS_ATTR, KERNEL_ATTR, TFLITE_TP_MODEL
21
- from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, \
22
- Signedness, \
21
+ from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, Signedness, \
23
22
  AttributeQuantizationConfig, OpQuantizationConfig
24
23
 
25
24
  tp = mct.target_platform
@@ -137,8 +136,7 @@ def generate_tp_model(default_config: OpQuantizationConfig,
137
136
  # of possible configurations to consider when quantizing a set of operations (in mixed-precision, for example).
138
137
  # If the QuantizationConfigOptions contains only one configuration,
139
138
  # this configuration will be used for the operation quantization:
140
- default_configuration_options = schema.QuantizationConfigOptions(
141
- quantization_configurations=tuple([default_config]))
139
+ default_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([default_config]))
142
140
 
143
141
  # In TFLite, the quantized operator specifications constraint operators quantization
144
142
  # differently. For more details:
@@ -146,44 +144,60 @@ def generate_tp_model(default_config: OpQuantizationConfig,
146
144
  operator_set = []
147
145
  fusing_patterns = []
148
146
 
149
- operator_set.append(schema.OperatorsSet(name="NoQuantization",
150
- qc_options=default_configuration_options.clone_and_edit(
151
- quantization_preserving=True)))
152
-
153
- fc = schema.OperatorsSet(name="FullyConnected",
154
- qc_options=default_configuration_options.clone_and_edit_weight_attribute(
155
- weights_per_channel_threshold=False))
156
-
157
- operator_set.append(schema.OperatorsSet(name="L2Normalization",
147
+ quant_preserving = default_configuration_options.clone_and_edit(quantization_preserving=True)
148
+
149
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.UNSTACK, qc_options=quant_preserving))
150
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.TRANSPOSE, qc_options=quant_preserving))
151
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.GATHER, qc_options=quant_preserving))
152
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.RESHAPE, qc_options=quant_preserving))
153
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.MAXPOOL, qc_options=quant_preserving))
154
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.AVGPOOL, qc_options=quant_preserving))
155
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.STRIDED_SLICE, qc_options=quant_preserving))
156
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.CONCATENATE, qc_options=quant_preserving))
157
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.MUL, qc_options=quant_preserving))
158
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.MIN, qc_options=quant_preserving))
159
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.MAX, qc_options=quant_preserving))
160
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.ZERO_PADDING2D, qc_options=quant_preserving))
161
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.RESIZE, qc_options=quant_preserving))
162
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.PAD, qc_options=quant_preserving))
163
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.FOLD, qc_options=quant_preserving))
164
+
165
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.L2NORM,
158
166
  qc_options=default_configuration_options.clone_and_edit(
159
167
  fixed_zero_point=0, fixed_scale=1 / 128)))
160
- operator_set.append(schema.OperatorsSet(name="LogSoftmax",
168
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.LOG_SOFTMAX,
161
169
  qc_options=default_configuration_options.clone_and_edit(
162
170
  fixed_zero_point=127, fixed_scale=16 / 256)))
163
- operator_set.append(schema.OperatorsSet(name="Tanh",
164
- qc_options=default_configuration_options.clone_and_edit(
165
- fixed_zero_point=0, fixed_scale=1 / 128)))
166
- operator_set.append(schema.OperatorsSet(name="Softmax",
167
- qc_options=default_configuration_options.clone_and_edit(
168
- fixed_zero_point=-128, fixed_scale=1 / 256)))
169
- operator_set.append(schema.OperatorsSet(name="Logistic",
171
+ operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.SOFTMAX,
170
172
  qc_options=default_configuration_options.clone_and_edit(
171
173
  fixed_zero_point=-128, fixed_scale=1 / 256)))
172
174
 
173
- conv2d = schema.OperatorsSet(name="Conv2d")
174
- kernel = schema.OperatorSetConcat(operators_set=[conv2d, fc])
175
+ sigmoid = schema.OperatorsSet(name=schema.OperatorSetNames.SIGMOID,
176
+ qc_options=default_configuration_options.clone_and_edit_weight_attribute(
177
+ weights_per_channel_threshold=False))
178
+ tanh = schema.OperatorsSet(name=schema.OperatorSetNames.TANH,
179
+ qc_options=default_configuration_options.clone_and_edit(
180
+ fixed_zero_point=-128, fixed_scale=1 / 256))
181
+ fc = schema.OperatorsSet(name=schema.OperatorSetNames.FULLY_CONNECTED,
182
+ qc_options=default_configuration_options.clone_and_edit_weight_attribute(
183
+ weights_per_channel_threshold=False))
184
+ squeeze = schema.OperatorsSet(name=schema.OperatorSetNames.SQUEEZE,
185
+ qc_options=default_configuration_options.clone_and_edit(
186
+ quantization_preserving=True))
187
+
188
+ conv2d = schema.OperatorsSet(name=schema.OperatorSetNames.CONV)
189
+ relu = schema.OperatorsSet(name=schema.OperatorSetNames.RELU)
190
+ relu6 = schema.OperatorsSet(name=schema.OperatorSetNames.RELU6)
191
+ elu = schema.OperatorsSet(name=schema.OperatorSetNames.ELU)
192
+ batch_norm = schema.OperatorsSet(name=schema.OperatorSetNames.BATCH_NORM)
193
+ add = schema.OperatorsSet(name=schema.OperatorSetNames.ADD)
194
+ bias_add = schema.OperatorsSet(name=schema.OperatorSetNames.ADD_BIAS)
175
195
 
176
- relu = schema.OperatorsSet(name="Relu")
177
- elu = schema.OperatorsSet(name="Elu")
196
+ kernel = schema.OperatorSetConcat(operators_set=[conv2d, fc])
178
197
  activations_to_fuse = schema.OperatorSetConcat(operators_set=[relu, elu])
179
198
 
180
- batch_norm = schema.OperatorsSet(name="BatchNorm")
181
- bias_add = schema.OperatorsSet(name="BiasAdd")
182
- add = schema.OperatorsSet(name="Add")
183
- squeeze = schema.OperatorsSet(name="Squeeze",
184
- qc_options=default_configuration_options.clone_and_edit(
185
- quantization_preserving=True))
186
- operator_set.extend([fc, conv2d, relu, elu, batch_norm, bias_add, add, squeeze])
199
+ operator_set.extend([fc, conv2d, relu, relu6, tanh, sigmoid, batch_norm, add, bias_add, elu, squeeze])
200
+
187
201
  # ------------------- #
188
202
  # Fusions
189
203
  # ------------------- #
@@ -13,11 +13,11 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
 
16
-
16
+ from model_compression_toolkit import get_target_platform_capabilities
17
+ from model_compression_toolkit.constants import TENSORFLOW
17
18
  from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
18
19
  from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
19
20
  from model_compression_toolkit.xquant.common.framework_report_utils import FrameworkReportUtils
20
- from model_compression_toolkit.ptq.keras.quantization_facade import DEFAULT_KERAS_TPC
21
21
  from model_compression_toolkit.xquant.common.model_folding_utils import ModelFoldingUtils
22
22
  from model_compression_toolkit.xquant.common.similarity_calculator import SimilarityCalculator
23
23
  from model_compression_toolkit.xquant.keras.dataset_utils import KerasDatasetUtils
@@ -26,6 +26,9 @@ from model_compression_toolkit.xquant.keras.model_analyzer import KerasModelAnal
26
26
  from model_compression_toolkit.xquant.keras.similarity_functions import KerasSimilarityFunctions
27
27
  from model_compression_toolkit.xquant.keras.tensorboard_utils import KerasTensorboardUtils
28
28
  from mct_quantizers.keras.metadata import get_metadata
29
+ from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
30
+ from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
31
+ AttachTpcToKeras
29
32
 
30
33
 
31
34
  class KerasReportUtils(FrameworkReportUtils):
@@ -40,10 +43,15 @@ class KerasReportUtils(FrameworkReportUtils):
40
43
  fw_info = DEFAULT_KERAS_INFO
41
44
  fw_impl = KerasImplementation()
42
45
 
46
+ # Set the default Target Platform Capabilities (TPC) for Keras.
47
+ default_tpc = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
48
+ attach2pytorch = AttachTpcToKeras()
49
+ target_platform_capabilities = attach2pytorch.attach(default_tpc)
50
+
43
51
  dataset_utils = KerasDatasetUtils()
44
52
  model_folding = ModelFoldingUtils(fw_info=fw_info,
45
53
  fw_impl=fw_impl,
46
- fw_default_tpc=DEFAULT_KERAS_TPC)
54
+ fw_default_tpc=target_platform_capabilities)
47
55
 
48
56
  similarity_calculator = SimilarityCalculator(dataset_utils=dataset_utils,
49
57
  model_folding=model_folding,
@@ -12,9 +12,13 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
+ from model_compression_toolkit import get_target_platform_capabilities
16
+ from model_compression_toolkit.constants import PYTORCH
15
17
  from model_compression_toolkit.core.pytorch.utils import get_working_device
18
+ from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
19
+ from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
20
+ AttachTpcToPytorch
16
21
 
17
- from model_compression_toolkit.ptq.pytorch.quantization_facade import DEFAULT_PYTORCH_TPC
18
22
  from model_compression_toolkit.xquant.common.framework_report_utils import FrameworkReportUtils
19
23
  from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
20
24
  from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
@@ -37,11 +41,15 @@ class PytorchReportUtils(FrameworkReportUtils):
37
41
  """
38
42
  fw_info = DEFAULT_PYTORCH_INFO
39
43
  fw_impl = PytorchImplementation()
44
+ # Set the default Target Platform Capabilities (TPC) for PyTorch.
45
+ default_tpc = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
46
+ attach2pytorch = AttachTpcToPytorch()
47
+ target_platform_capabilities = attach2pytorch.attach(default_tpc)
40
48
 
41
49
  dataset_utils = PytorchDatasetUtils()
42
50
  model_folding = ModelFoldingUtils(fw_info=fw_info,
43
51
  fw_impl=fw_impl,
44
- fw_default_tpc=DEFAULT_PYTORCH_TPC)
52
+ fw_default_tpc=target_platform_capabilities)
45
53
 
46
54
  similarity_calculator = SimilarityCalculator(dataset_utils=dataset_utils,
47
55
  model_folding=model_folding,
@@ -1,98 +0,0 @@
1
- # Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ==============================================================================
15
- from model_compression_toolkit.logger import Logger
16
-
17
- from model_compression_toolkit.constants import TENSORFLOW, PYTORCH
18
- from model_compression_toolkit.verify_packages import FOUND_TORCH, FOUND_TF
19
- from model_compression_toolkit.target_platform_capabilities.constants import LATEST
20
-
21
-
22
- def get_tpc_dict_by_fw(fw_name):
23
- tpc_models_dict = None
24
- if fw_name == TENSORFLOW:
25
- ###############################
26
- # Build Tensorflow TPC models
27
- ###############################
28
- if FOUND_TF:
29
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
30
- get_keras_tpc_latest
31
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1.tpc_keras import \
32
- get_keras_tpc as get_keras_tpc_v1
33
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1_lut.tpc_keras import \
34
- get_keras_tpc as get_keras_tpc_v1_lut
35
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1_pot.tpc_keras import \
36
- get_keras_tpc as get_keras_tpc_v1_pot
37
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v2.tpc_keras import \
38
- get_keras_tpc as get_keras_tpc_v2
39
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v2_lut.tpc_keras import \
40
- get_keras_tpc as get_keras_tpc_v2_lut
41
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v3.tpc_keras import \
42
- get_keras_tpc as get_keras_tpc_v3
43
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v3_lut.tpc_keras import \
44
- get_keras_tpc as get_keras_tpc_v3_lut
45
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v4.tpc_keras import \
46
- get_keras_tpc as get_keras_tpc_v4
47
-
48
- # Keras: TPC versioning
49
- tpc_models_dict = {'v1': get_keras_tpc_v1,
50
- 'v1_lut': get_keras_tpc_v1_lut,
51
- 'v1_pot': get_keras_tpc_v1_pot,
52
- 'v2': get_keras_tpc_v2,
53
- 'v2_lut': get_keras_tpc_v2_lut,
54
- 'v3': get_keras_tpc_v3,
55
- 'v3_lut': get_keras_tpc_v3_lut,
56
- 'v4': get_keras_tpc_v4,
57
- LATEST: get_keras_tpc_latest}
58
- elif fw_name == PYTORCH:
59
- ###############################
60
- # Build Pytorch TPC models
61
- ###############################
62
- if FOUND_TORCH:
63
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
64
- get_pytorch_tpc_latest
65
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1.tpc_pytorch import \
66
- get_pytorch_tpc as get_pytorch_tpc_v1
67
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1_pot.tpc_pytorch import \
68
- get_pytorch_tpc as get_pytorch_tpc_v1_pot
69
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1_lut.tpc_pytorch import \
70
- get_pytorch_tpc as get_pytorch_tpc_v1_lut
71
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v2.tpc_pytorch import \
72
- get_pytorch_tpc as get_pytorch_tpc_v2
73
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v2_lut.tpc_pytorch import \
74
- get_pytorch_tpc as get_pytorch_tpc_v2_lut
75
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v3.tpc_pytorch import \
76
- get_pytorch_tpc as get_pytorch_tpc_v3
77
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v3_lut.tpc_pytorch import \
78
- get_pytorch_tpc as get_pytorch_tpc_v3_lut
79
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v4.tpc_pytorch import \
80
- get_pytorch_tpc as get_pytorch_tpc_v4
81
-
82
- # Pytorch: TPC versioning
83
- tpc_models_dict = {'v1': get_pytorch_tpc_v1,
84
- 'v1_lut': get_pytorch_tpc_v1_lut,
85
- 'v1_pot': get_pytorch_tpc_v1_pot,
86
- 'v2': get_pytorch_tpc_v2,
87
- 'v2_lut': get_pytorch_tpc_v2_lut,
88
- 'v3': get_pytorch_tpc_v3,
89
- 'v3_lut': get_pytorch_tpc_v3_lut,
90
- 'v4': get_pytorch_tpc_v4,
91
- LATEST: get_pytorch_tpc_latest}
92
- if tpc_models_dict is not None:
93
- return tpc_models_dict
94
- else:
95
- Logger.critical(f'Framework {fw_name} is not supported in imx500 or the relevant packages are not '
96
- f'installed. Please make sure the relevant packages are installed when using MCT for optimizing'
97
- f' a {fw_name} model. For Tensorflow, please install tensorflow. For PyTorch, please install '
98
- f'torch.') # pragma: no cover
@@ -1,129 +0,0 @@
1
- # Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ==============================================================================
15
- import tensorflow as tf
16
- from packaging import version
17
-
18
- from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
19
- from model_compression_toolkit.defaultdict import DefaultDict
20
- from model_compression_toolkit.verify_packages import FOUND_SONY_CUSTOM_LAYERS
21
- from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, KERAS_DEPTHWISE_KERNEL, \
22
- KERAS_KERNEL, BIAS_ATTR, BIAS
23
-
24
- if FOUND_SONY_CUSTOM_LAYERS:
25
- from sony_custom_layers.keras.object_detection.ssd_post_process import SSDPostProcess
26
-
27
- if version.parse(tf.__version__) >= version.parse("2.13"):
28
- from keras.src.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
29
- MaxPooling2D, Activation, ReLU, Add, Subtract, Multiply, PReLU, Flatten, Cropping2D, LeakyReLU, Permute, \
30
- Conv2DTranspose
31
- else:
32
- from keras.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
33
- MaxPooling2D, Activation, ReLU, Add, Subtract, Multiply, PReLU, Flatten, Cropping2D, LeakyReLU, Permute, \
34
- Conv2DTranspose
35
-
36
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1.tp_model import get_tp_model
37
- import model_compression_toolkit as mct
38
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1 import __version__ as TPC_VERSION
39
-
40
- tp = mct.target_platform
41
-
42
-
43
- def get_keras_tpc() -> tp.TargetPlatformCapabilities:
44
- """
45
- get a Keras TargetPlatformCapabilities object with default operation sets to layers mapping.
46
- Returns: a Keras TargetPlatformCapabilities object for the given TargetPlatformModel.
47
- """
48
- imx500_tpc_tp_model = get_tp_model()
49
- return generate_keras_tpc(name='imx500_tpc_keras_tpc', tp_model=imx500_tpc_tp_model)
50
-
51
-
52
- def generate_keras_tpc(name: str, tp_model: TargetPlatformModel):
53
- """
54
- Generates a TargetPlatformCapabilities object with default operation sets to layers mapping.
55
-
56
- Args:
57
- name: Name of the TargetPlatformCapabilities.
58
- tp_model: TargetPlatformModel object.
59
-
60
- Returns: a TargetPlatformCapabilities object for the given TargetPlatformModel.
61
- """
62
-
63
- keras_tpc = tp.TargetPlatformCapabilities(tp_model)
64
-
65
- no_quant_list = [Reshape,
66
- tf.reshape,
67
- Permute,
68
- tf.transpose,
69
- Flatten,
70
- Cropping2D,
71
- ZeroPadding2D,
72
- Dropout,
73
- MaxPooling2D,
74
- tf.split,
75
- tf.quantization.fake_quant_with_min_max_vars,
76
- tf.math.argmax,
77
- tf.shape,
78
- tf.math.equal,
79
- tf.gather,
80
- tf.cast,
81
- tf.unstack,
82
- tf.compat.v1.gather,
83
- tf.nn.top_k,
84
- tf.__operators__.getitem,
85
- tf.image.combined_non_max_suppression,
86
- tf.compat.v1.shape]
87
-
88
- if FOUND_SONY_CUSTOM_LAYERS:
89
- no_quant_list.append(SSDPostProcess)
90
-
91
- with keras_tpc:
92
- tp.OperationsSetToLayers("NoQuantization", no_quant_list)
93
- tp.OperationsSetToLayers("Conv",
94
- [Conv2D,
95
- DepthwiseConv2D,
96
- Conv2DTranspose,
97
- tf.nn.conv2d,
98
- tf.nn.depthwise_conv2d,
99
- tf.nn.conv2d_transpose],
100
- # we provide attributes mapping that maps each layer type in the operations set
101
- # that has weights attributes with provided quantization config (in the tp model) to
102
- # its framework-specific attribute name.
103
- # note that a DefaultDict should be provided if not all the layer types in the
104
- # operation set are provided separately in the mapping.
105
- attr_mapping={
106
- KERNEL_ATTR: DefaultDict({
107
- DepthwiseConv2D: KERAS_DEPTHWISE_KERNEL,
108
- tf.nn.depthwise_conv2d: KERAS_DEPTHWISE_KERNEL}, default_value=KERAS_KERNEL),
109
- BIAS_ATTR: DefaultDict(default_value=BIAS)})
110
- tp.OperationsSetToLayers("FullyConnected", [Dense],
111
- attr_mapping={KERNEL_ATTR: DefaultDict(default_value=KERAS_KERNEL),
112
- BIAS_ATTR: DefaultDict(default_value=BIAS)})
113
- tp.OperationsSetToLayers("AnyReLU", [tf.nn.relu,
114
- tf.nn.relu6,
115
- tf.nn.leaky_relu,
116
- ReLU,
117
- LeakyReLU,
118
- tp.LayerFilterParams(Activation, activation="relu"),
119
- tp.LayerFilterParams(Activation, activation="leaky_relu")])
120
- tp.OperationsSetToLayers("Add", [tf.add, Add])
121
- tp.OperationsSetToLayers("Sub", [tf.subtract, Subtract])
122
- tp.OperationsSetToLayers("Mul", [tf.math.multiply, Multiply])
123
- tp.OperationsSetToLayers("Div", [tf.math.divide])
124
- tp.OperationsSetToLayers("PReLU", [PReLU])
125
- tp.OperationsSetToLayers("Swish", [tf.nn.swish, tp.LayerFilterParams(Activation, activation="swish")])
126
- tp.OperationsSetToLayers("Sigmoid", [tf.nn.sigmoid, tp.LayerFilterParams(Activation, activation="sigmoid")])
127
- tp.OperationsSetToLayers("Tanh", [tf.nn.tanh, tp.LayerFilterParams(Activation, activation="tanh")])
128
-
129
- return keras_tpc
@@ -1,108 +0,0 @@
1
- # Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ==============================================================================
15
-
16
- import operator
17
-
18
- import torch
19
- from torch import add, sub, mul, div, flatten, reshape, split, unsqueeze, dropout, sigmoid, tanh, chunk, unbind, topk, \
20
- gather, equal, transpose, permute, argmax, squeeze
21
- from torch.nn import Conv2d, Linear, BatchNorm2d, ConvTranspose2d
22
- from torch.nn import Dropout, Flatten, Hardtanh
23
- from torch.nn import ReLU, ReLU6, PReLU, SiLU, Sigmoid, Tanh, Hardswish, LeakyReLU
24
- from torch.nn.functional import relu, relu6, prelu, silu, hardtanh, hardswish, leaky_relu
25
-
26
- from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
27
- from model_compression_toolkit.defaultdict import DefaultDict
28
- from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, PYTORCH_KERNEL, \
29
- BIAS
30
- from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1.tp_model import get_tp_model
31
- import model_compression_toolkit as mct
32
-
33
- tp = mct.target_platform
34
-
35
-
36
- def get_pytorch_tpc() -> tp.TargetPlatformCapabilities:
37
- """
38
- get a Pytorch TargetPlatformCapabilities object with default operation sets to layers mapping.
39
- Returns: a Pytorch TargetPlatformCapabilities object for the given TargetPlatformModel.
40
- """
41
- imx500_tpc_tp_model = get_tp_model()
42
- return generate_pytorch_tpc(name='imx500_tpc_pytorch_tpc', tp_model=imx500_tpc_tp_model)
43
-
44
-
45
- def generate_pytorch_tpc(name: str, tp_model: TargetPlatformModel):
46
- """
47
- Generates a TargetPlatformCapabilities object with default operation sets to layers mapping.
48
- Args:
49
- name: Name of the TargetPlatformModel.
50
- tp_model: TargetPlatformModel object.
51
- Returns: a TargetPlatformCapabilities object for the given TargetPlatformModel.
52
- """
53
-
54
- pytorch_tpc = tp.TargetPlatformCapabilities(tp_model)
55
-
56
- # we provide attributes mapping that maps each layer type in the operations set
57
- # that has weights attributes with provided quantization config (in the tp model) to
58
- # its framework-specific attribute name.
59
- # note that a DefaultDict should be provided if not all the layer types in the
60
- # operation set are provided separately in the mapping.
61
- pytorch_linear_attr_mapping = {KERNEL_ATTR: DefaultDict(default_value=PYTORCH_KERNEL),
62
- BIAS_ATTR: DefaultDict(default_value=BIAS)}
63
-
64
- with pytorch_tpc:
65
- tp.OperationsSetToLayers("NoQuantization", [Dropout,
66
- Flatten,
67
- dropout,
68
- flatten,
69
- split,
70
- operator.getitem,
71
- reshape,
72
- unsqueeze,
73
- BatchNorm2d,
74
- chunk,
75
- unbind,
76
- torch.Tensor.size,
77
- permute,
78
- transpose,
79
- equal,
80
- argmax,
81
- gather,
82
- topk,
83
- squeeze])
84
-
85
- tp.OperationsSetToLayers("Conv", [Conv2d, ConvTranspose2d],
86
- attr_mapping=pytorch_linear_attr_mapping)
87
- tp.OperationsSetToLayers("FullyConnected", [Linear],
88
- attr_mapping=pytorch_linear_attr_mapping)
89
- tp.OperationsSetToLayers("AnyReLU", [torch.relu,
90
- ReLU,
91
- ReLU6,
92
- LeakyReLU,
93
- relu,
94
- relu6,
95
- leaky_relu,
96
- tp.LayerFilterParams(Hardtanh, min_val=0),
97
- tp.LayerFilterParams(hardtanh, min_val=0)])
98
-
99
- tp.OperationsSetToLayers("Add", [operator.add, add])
100
- tp.OperationsSetToLayers("Sub", [operator.sub, sub])
101
- tp.OperationsSetToLayers("Mul", [operator.mul, mul])
102
- tp.OperationsSetToLayers("Div", [operator.truediv, div])
103
- tp.OperationsSetToLayers("PReLU", [PReLU, prelu])
104
- tp.OperationsSetToLayers("Swish", [SiLU, silu, Hardswish, hardswish])
105
- tp.OperationsSetToLayers("Sigmoid", [Sigmoid, sigmoid])
106
- tp.OperationsSetToLayers("Tanh", [Tanh, tanh])
107
-
108
- return pytorch_tpc
@@ -1,16 +0,0 @@
1
- # Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ==============================================================================
15
-
16
- __version__ = 'v1_lut'