mct-nightly 2.2.0.20241211.531__py3-none-any.whl → 2.2.0.20241213.540__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (21) hide show
  1. {mct_nightly-2.2.0.20241211.531.dist-info → mct_nightly-2.2.0.20241213.540.dist-info}/METADATA +1 -1
  2. {mct_nightly-2.2.0.20241211.531.dist-info → mct_nightly-2.2.0.20241213.540.dist-info}/RECORD +21 -21
  3. model_compression_toolkit/__init__.py +1 -1
  4. model_compression_toolkit/core/common/graph/base_node.py +3 -2
  5. model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +3 -2
  6. model_compression_toolkit/target_platform_capabilities/schema/schema_functions.py +83 -14
  7. model_compression_toolkit/target_platform_capabilities/schema/v1.py +407 -475
  8. model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +5 -3
  9. model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +5 -3
  10. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +5 -6
  11. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +3 -3
  12. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +3 -3
  13. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +5 -6
  14. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +3 -3
  15. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +5 -6
  16. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +3 -3
  17. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py +9 -9
  18. model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +2 -2
  19. {mct_nightly-2.2.0.20241211.531.dist-info → mct_nightly-2.2.0.20241213.540.dist-info}/LICENSE.md +0 -0
  20. {mct_nightly-2.2.0.20241211.531.dist-info → mct_nightly-2.2.0.20241213.540.dist-info}/WHEEL +0 -0
  21. {mct_nightly-2.2.0.20241211.531.dist-info → mct_nightly-2.2.0.20241213.540.dist-info}/top_level.txt +0 -0
@@ -16,6 +16,8 @@
16
16
  from typing import List, Any, Dict
17
17
 
18
18
  from model_compression_toolkit.logger import Logger
19
+ from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \
20
+ get_config_options_by_operators_set, is_opset_in_model
19
21
  from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import _current_tpc
20
22
  from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.target_platform_capabilities_component import TargetPlatformCapabilitiesComponent
21
23
  from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorsSetBase, OperatorSetConcat
@@ -137,14 +139,14 @@ class OperationsToLayers:
137
139
  f'is of type {type(ops2layers)}'
138
140
 
139
141
  # Assert that opset in the current TargetPlatformCapabilities and has a unique name.
140
- is_opset_in_model = _current_tpc.get().tp_model.is_opset_in_model(ops2layers.name)
141
- assert is_opset_in_model, f'{ops2layers.name} is not defined in the target platform model that is associated with the target platform capabilities.'
142
+ opset_in_model = is_opset_in_model(_current_tpc.get().tp_model, ops2layers.name)
143
+ assert opset_in_model, f'{ops2layers.name} is not defined in the target platform model that is associated with the target platform capabilities.'
142
144
  assert not (ops2layers.name in existing_opset_names), f'OperationsSetToLayers names should be unique, but {ops2layers.name} appears to violate it.'
143
145
  existing_opset_names.append(ops2layers.name)
144
146
 
145
147
  # Assert that a layer does not appear in more than a single OperatorsSet in the TargetPlatformModel.
146
148
  for layer in ops2layers.layers:
147
- qco_by_opset_name = _current_tpc.get().tp_model.get_config_options_by_operators_set(ops2layers.name)
149
+ qco_by_opset_name = get_config_options_by_operators_set(_current_tpc.get().tp_model, ops2layers.name)
148
150
  if layer in existing_layers:
149
151
  Logger.critical(f'Found layer {layer.__name__} in more than one '
150
152
  f'OperatorsSet') # pragma: no cover
@@ -19,6 +19,8 @@ import pprint
19
19
  from typing import List, Any, Dict, Tuple
20
20
 
21
21
  from model_compression_toolkit.logger import Logger
22
+ from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \
23
+ get_config_options_by_operators_set, get_default_op_quantization_config, get_opset_by_name
22
24
  from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.operations_to_layers import \
23
25
  OperationsToLayers, OperationsSetToLayers
24
26
  from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.target_platform_capabilities_component import TargetPlatformCapabilitiesComponent
@@ -64,7 +66,7 @@ class TargetPlatformCapabilities(ImmutableClass):
64
66
  Returns:
65
67
  List of layers/LayerFilterParams that are attached to the opset name.
66
68
  """
67
- opset = self.tp_model.get_opset_by_name(opset_name)
69
+ opset = get_opset_by_name(self.tp_model, opset_name)
68
70
  if opset is None:
69
71
  Logger.warning(f'{opset_name} was not found in TargetPlatformCapabilities.')
70
72
  return None
@@ -165,7 +167,7 @@ class TargetPlatformCapabilities(ImmutableClass):
165
167
  to the TargetPlatformCapabilities.
166
168
 
167
169
  """
168
- return self.tp_model.get_default_op_quantization_config()
170
+ return get_default_op_quantization_config(self.tp_model)
169
171
 
170
172
 
171
173
  def _get_config_options_mapping(self) -> Tuple[Dict[Any, QuantizationConfigOptions],
@@ -181,7 +183,7 @@ class TargetPlatformCapabilities(ImmutableClass):
181
183
  filterlayer2qco = {}
182
184
  for op2layers in self.op_sets_to_layers.op_sets_to_layers:
183
185
  for l in op2layers.layers:
184
- qco = self.tp_model.get_config_options_by_operators_set(op2layers.name)
186
+ qco = get_config_options_by_operators_set(self.tp_model, op2layers.name)
185
187
  if qco is None:
186
188
  qco = self.tp_model.default_qco
187
189
 
@@ -164,7 +164,8 @@ def generate_tp_model(default_config: OpQuantizationConfig,
164
164
  tpc_patch_version=0,
165
165
  tpc_platform_type=IMX500_TP_MODEL,
166
166
  name=name,
167
- add_metadata=False)
167
+ add_metadata=False,
168
+ is_simd_padding=True)
168
169
 
169
170
  # To start defining the model's components (such as operator sets, and fusing patterns),
170
171
  # use 'with' the TargetPlatformModel instance, and create them as below:
@@ -175,8 +176,6 @@ def generate_tp_model(default_config: OpQuantizationConfig,
175
176
  # be used for operations that will be attached to this set's label.
176
177
  # Otherwise, it will be a configure-less set (used in fusing):
177
178
 
178
- generated_tpc.set_simd_padding(is_simd_padding=True)
179
-
180
179
  # May suit for operations like: Dropout, Reshape, etc.
181
180
  default_qco = tp.get_default_quantization_config_options()
182
181
  schema.OperatorsSet("NoQuantization",
@@ -206,9 +205,9 @@ def generate_tp_model(default_config: OpQuantizationConfig,
206
205
  # Combine multiple operators into a single operator to avoid quantization between
207
206
  # them. To do this we define fusing patterns using the OperatorsSets that were created.
208
207
  # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
209
- activations_after_conv_to_fuse = schema.OperatorSetConcat(any_relu, swish, prelu, sigmoid, tanh)
210
- activations_after_fc_to_fuse = schema.OperatorSetConcat(any_relu, swish, sigmoid)
211
- any_binary = schema.OperatorSetConcat(add, sub, mul, div)
208
+ activations_after_conv_to_fuse = schema.OperatorSetConcat([any_relu, swish, prelu, sigmoid, tanh])
209
+ activations_after_fc_to_fuse = schema.OperatorSetConcat([any_relu, swish, sigmoid])
210
+ any_binary = schema.OperatorSetConcat([add, sub, mul, div])
212
211
 
213
212
  # ------------------- #
214
213
  # Fusions
@@ -201,9 +201,9 @@ def generate_tp_model(default_config: OpQuantizationConfig,
201
201
  # Combine multiple operators into a single operator to avoid quantization between
202
202
  # them. To do this we define fusing patterns using the OperatorsSets that were created.
203
203
  # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
204
- activations_after_conv_to_fuse = schema.OperatorSetConcat(any_relu, swish, prelu, sigmoid, tanh)
205
- activations_after_fc_to_fuse = schema.OperatorSetConcat(any_relu, swish, sigmoid)
206
- any_binary = schema.OperatorSetConcat(add, sub, mul, div)
204
+ activations_after_conv_to_fuse = schema.OperatorSetConcat([any_relu, swish, prelu, sigmoid, tanh])
205
+ activations_after_fc_to_fuse = schema.OperatorSetConcat([any_relu, swish, sigmoid])
206
+ any_binary = schema.OperatorSetConcat([add, sub, mul, div])
207
207
 
208
208
  # ------------------- #
209
209
  # Fusions
@@ -197,9 +197,9 @@ def generate_tp_model(default_config: OpQuantizationConfig,
197
197
  # Combine multiple operators into a single operator to avoid quantization between
198
198
  # them. To do this we define fusing patterns using the OperatorsSets that were created.
199
199
  # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
200
- activations_after_conv_to_fuse = schema.OperatorSetConcat(any_relu, swish, prelu, sigmoid, tanh)
201
- activations_after_fc_to_fuse = schema.OperatorSetConcat(any_relu, swish, sigmoid)
202
- any_binary = schema.OperatorSetConcat(add, sub, mul, div)
200
+ activations_after_conv_to_fuse = schema.OperatorSetConcat([any_relu, swish, prelu, sigmoid, tanh])
201
+ activations_after_fc_to_fuse = schema.OperatorSetConcat([any_relu, swish, sigmoid])
202
+ any_binary = schema.OperatorSetConcat([add, sub, mul, div])
203
203
 
204
204
  # ------------------- #
205
205
  # Fusions
@@ -166,7 +166,8 @@ def generate_tp_model(default_config: OpQuantizationConfig,
166
166
  tpc_patch_version=0,
167
167
  tpc_platform_type=IMX500_TP_MODEL,
168
168
  add_metadata=True,
169
- name=name)
169
+ name=name,
170
+ is_simd_padding=True)
170
171
 
171
172
  # To start defining the model's components (such as operator sets, and fusing patterns),
172
173
  # use 'with' the TargetPlatformModel instance, and create them as below:
@@ -177,8 +178,6 @@ def generate_tp_model(default_config: OpQuantizationConfig,
177
178
  # be used for operations that will be attached to this set's label.
178
179
  # Otherwise, it will be a configure-less set (used in fusing):
179
180
 
180
- generated_tpm.set_simd_padding(is_simd_padding=True)
181
-
182
181
  # May suit for operations like: Dropout, Reshape, etc.
183
182
  default_qco = tp.get_default_quantization_config_options()
184
183
  schema.OperatorsSet("NoQuantization",
@@ -208,9 +207,9 @@ def generate_tp_model(default_config: OpQuantizationConfig,
208
207
  # Combine multiple operators into a single operator to avoid quantization between
209
208
  # them. To do this we define fusing patterns using the OperatorsSets that were created.
210
209
  # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
211
- activations_after_conv_to_fuse = schema.OperatorSetConcat(any_relu, swish, prelu, sigmoid, tanh)
212
- activations_after_fc_to_fuse = schema.OperatorSetConcat(any_relu, swish, sigmoid)
213
- any_binary = schema.OperatorSetConcat(add, sub, mul, div)
210
+ activations_after_conv_to_fuse = schema.OperatorSetConcat([any_relu, swish, prelu, sigmoid, tanh])
211
+ activations_after_fc_to_fuse = schema.OperatorSetConcat([any_relu, swish, sigmoid])
212
+ any_binary = schema.OperatorSetConcat([add, sub, mul, div])
214
213
 
215
214
  # ------------------- #
216
215
  # Fusions
@@ -203,9 +203,9 @@ def generate_tp_model(default_config: OpQuantizationConfig,
203
203
  # Combine multiple operators into a single operator to avoid quantization between
204
204
  # them. To do this we define fusing patterns using the OperatorsSets that were created.
205
205
  # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
206
- activations_after_conv_to_fuse = schema.OperatorSetConcat(any_relu, swish, prelu, sigmoid, tanh)
207
- activations_after_fc_to_fuse = schema.OperatorSetConcat(any_relu, swish, sigmoid)
208
- any_binary = schema.OperatorSetConcat(add, sub, mul, div)
206
+ activations_after_conv_to_fuse = schema.OperatorSetConcat([any_relu, swish, prelu, sigmoid, tanh])
207
+ activations_after_fc_to_fuse = schema.OperatorSetConcat([any_relu, swish, sigmoid])
208
+ any_binary = schema.OperatorSetConcat([add, sub, mul, div])
209
209
 
210
210
  # ------------------- #
211
211
  # Fusions
@@ -187,7 +187,8 @@ def generate_tp_model(default_config: OpQuantizationConfig,
187
187
  tpc_patch_version=0,
188
188
  tpc_platform_type=IMX500_TP_MODEL,
189
189
  add_metadata=True,
190
- name=name)
190
+ name=name,
191
+ is_simd_padding=True)
191
192
 
192
193
  # To start defining the model's components (such as operator sets, and fusing patterns),
193
194
  # use 'with' the TargetPlatformModel instance, and create them as below:
@@ -198,8 +199,6 @@ def generate_tp_model(default_config: OpQuantizationConfig,
198
199
  # be used for operations that will be attached to this set's label.
199
200
  # Otherwise, it will be a configure-less set (used in fusing):
200
201
 
201
- generated_tpm.set_simd_padding(is_simd_padding=True)
202
-
203
202
  # May suit for operations like: Dropout, Reshape, etc.
204
203
  default_qco = tp.get_default_quantization_config_options()
205
204
  schema.OperatorsSet("NoQuantization",
@@ -231,9 +230,9 @@ def generate_tp_model(default_config: OpQuantizationConfig,
231
230
  # Combine multiple operators into a single operator to avoid quantization between
232
231
  # them. To do this we define fusing patterns using the OperatorsSets that were created.
233
232
  # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
234
- activations_after_conv_to_fuse = schema.OperatorSetConcat(any_relu, swish, prelu, sigmoid, tanh)
235
- activations_after_fc_to_fuse = schema.OperatorSetConcat(any_relu, swish, sigmoid)
236
- any_binary = schema.OperatorSetConcat(add, sub, mul, div)
233
+ activations_after_conv_to_fuse = schema.OperatorSetConcat([any_relu, swish, prelu, sigmoid, tanh])
234
+ activations_after_fc_to_fuse = schema.OperatorSetConcat([any_relu, swish, sigmoid])
235
+ any_binary = schema.OperatorSetConcat([add, sub, mul, div])
237
236
 
238
237
  # ------------------- #
239
238
  # Fusions
@@ -214,9 +214,9 @@ def generate_tp_model(default_config: OpQuantizationConfig,
214
214
  # Combine multiple operators into a single operator to avoid quantization between
215
215
  # them. To do this we define fusing patterns using the OperatorsSets that were created.
216
216
  # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
217
- activations_after_conv_to_fuse = schema.OperatorSetConcat(any_relu, swish, prelu, sigmoid, tanh)
218
- activations_after_fc_to_fuse = schema.OperatorSetConcat(any_relu, swish, sigmoid)
219
- any_binary = schema.OperatorSetConcat(add, sub, mul, div)
217
+ activations_after_conv_to_fuse = schema.OperatorSetConcat([any_relu, swish, prelu, sigmoid, tanh])
218
+ activations_after_fc_to_fuse = schema.OperatorSetConcat([any_relu, swish, sigmoid])
219
+ any_binary = schema.OperatorSetConcat([add, sub, mul, div])
220
220
 
221
221
  # ------------------- #
222
222
  # Fusions
@@ -15,7 +15,7 @@
15
15
  from typing import List, Tuple
16
16
 
17
17
  import model_compression_toolkit as mct
18
- import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
18
+ import model_compression_toolkit.target_platform_capabilities.schema.v1 as schema
19
19
  from model_compression_toolkit.constants import FLOAT_BITWIDTH
20
20
  from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
21
21
  IMX500_TP_MODEL
@@ -235,7 +235,9 @@ def generate_tp_model(default_config: OpQuantizationConfig,
235
235
  tpc_minor_version=4,
236
236
  tpc_patch_version=0,
237
237
  tpc_platform_type=IMX500_TP_MODEL,
238
- add_metadata=True, name=name)
238
+ add_metadata=True,
239
+ name=name,
240
+ is_simd_padding=True)
239
241
 
240
242
  # To start defining the model's components (such as operator sets, and fusing patterns),
241
243
  # use 'with' the TargetPlatformModel instance, and create them as below:
@@ -246,8 +248,6 @@ def generate_tp_model(default_config: OpQuantizationConfig,
246
248
  # be used for operations that will be attached to this set's label.
247
249
  # Otherwise, it will be a configure-less set (used in fusing):
248
250
 
249
- generated_tpm.set_simd_padding(is_simd_padding=True)
250
-
251
251
  # May suit for operations like: Dropout, Reshape, etc.
252
252
  default_qco = tp.get_default_quantization_config_options()
253
253
  schema.OperatorsSet(OPSET_NO_QUANTIZATION,
@@ -294,11 +294,11 @@ def generate_tp_model(default_config: OpQuantizationConfig,
294
294
  # Combine multiple operators into a single operator to avoid quantization between
295
295
  # them. To do this we define fusing patterns using the OperatorsSets that were created.
296
296
  # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
297
- activations_after_conv_to_fuse = schema.OperatorSetConcat(any_relu, swish, prelu, sigmoid,
298
- tanh, gelu, hardswish, hardsigmoid)
299
- activations_after_fc_to_fuse = schema.OperatorSetConcat(any_relu, swish, sigmoid, tanh, gelu,
300
- hardswish, hardsigmoid)
301
- any_binary = schema.OperatorSetConcat(add, sub, mul, div)
297
+ activations_after_conv_to_fuse = schema.OperatorSetConcat([any_relu, swish, prelu, sigmoid,
298
+ tanh, gelu, hardswish, hardsigmoid])
299
+ activations_after_fc_to_fuse = schema.OperatorSetConcat([any_relu, swish, sigmoid, tanh, gelu,
300
+ hardswish, hardsigmoid])
301
+ any_binary = schema.OperatorSetConcat([add, sub, mul, div])
302
302
 
303
303
  # ------------------- #
304
304
  # Fusions
@@ -180,11 +180,11 @@ def generate_tp_model(default_config: OpQuantizationConfig,
180
180
  fixed_zero_point=-128, fixed_scale=1 / 256))
181
181
 
182
182
  conv2d = schema.OperatorsSet("Conv2d")
183
- kernel = schema.OperatorSetConcat(conv2d, fc)
183
+ kernel = schema.OperatorSetConcat([conv2d, fc])
184
184
 
185
185
  relu = schema.OperatorsSet("Relu")
186
186
  elu = schema.OperatorsSet("Elu")
187
- activations_to_fuse = schema.OperatorSetConcat(relu, elu)
187
+ activations_to_fuse = schema.OperatorSetConcat([relu, elu])
188
188
 
189
189
  batch_norm = schema.OperatorsSet("BatchNorm")
190
190
  bias_add = schema.OperatorsSet("BiasAdd")