mct-nightly 2.4.0.20250618.606__py3-none-any.whl → 2.4.0.20250619.621__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mct-nightly
3
- Version: 2.4.0.20250618.606
3
+ Version: 2.4.0.20250619.621
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Author-email: ssi-dnn-dev@sony.com
6
6
  Classifier: Programming Language :: Python :: 3
@@ -1,5 +1,5 @@
1
- mct_nightly-2.4.0.20250618.606.dist-info/licenses/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
2
- model_compression_toolkit/__init__.py,sha256=wrRgW-DB8mcY8PhRhrQ3I5qhLKYqFra-UMKgJRufNBs,1557
1
+ mct_nightly-2.4.0.20250619.621.dist-info/licenses/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
2
+ model_compression_toolkit/__init__.py,sha256=IwA7G4npLqjEevZwVWOsQQsli7NOIlB3fVVBKUgnNZU,1557
3
3
  model_compression_toolkit/constants.py,sha256=KNgiNLpsMgSYyXMNEbHXd4bFNerQc1D6HH3vpbUq_Gs,4086
4
4
  model_compression_toolkit/defaultdict.py,sha256=LSc-sbZYXENMCw3U9F4GiXuv67IKpdn0Qm7Fr11jy-4,2277
5
5
  model_compression_toolkit/logger.py,sha256=L3q7tn3Uht0i_7phnlOWMR2Te2zvzrt2HOz9vYEInts,4529
@@ -162,7 +162,7 @@ model_compression_toolkit/core/keras/default_framework_info.py,sha256=n0fkMlQ0Cg
162
162
  model_compression_toolkit/core/keras/keras_implementation.py,sha256=x5EOYBrg2chC9-OUlrd0laLpnnHCFhYYAFNKRhVh6aQ,28526
163
163
  model_compression_toolkit/core/keras/keras_model_validation.py,sha256=dMS9cqaYmliyzVu2-MrKx4AIubqz3HW3RY4if2TV6U8,1581
164
164
  model_compression_toolkit/core/keras/keras_node_prior_info.py,sha256=k9cwu3S-OUGFaOHxH6cyYS2JjxAYHfBddz0laf6Quds,3311
165
- model_compression_toolkit/core/keras/resource_utilization_data_facade.py,sha256=i1VsqqPsVb98YCepCjypIjnBXY-m_r7pUnPpTSETNWc,5532
165
+ model_compression_toolkit/core/keras/resource_utilization_data_facade.py,sha256=xxZlHyruhLuP2iEgMrZhq_AyAGORTqzweVLARFfpaRw,5643
166
166
  model_compression_toolkit/core/keras/tf_tensor_numpy.py,sha256=jzD8FGEEa8ZD7w8IpTRdp-Udf1MwOTgjg2XTS1Givic,2696
167
167
  model_compression_toolkit/core/keras/back2framework/__init__.py,sha256=rhIiXg_nBgUZ-baE3M6SzCuQbcnq4iebY1jtJBvKHOM,808
168
168
  model_compression_toolkit/core/keras/back2framework/factory_model_builder.py,sha256=UIQgOOdexycrSKombTMJVvTthR7MlrCihoqM8Kg-rnE,2293
@@ -331,14 +331,14 @@ model_compression_toolkit/exporter/model_exporter/keras/export_serialization_for
331
331
  model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py,sha256=n_iXPwMomMVJTZH9M1WV7OJo11ppXOWkANu41fIlsjY,11702
332
332
  model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py,sha256=XoFGkVBikKh1BuxObrMLjfVLDIgy3X0rhmEl08CdJls,3727
333
333
  model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py,sha256=iTUXaia8XLJmmWdk4iiCah9sxeIyBJy42s9_EpuPhnw,8261
334
- model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py,sha256=NzcX7rxLk__Kpuim_VXaOHS4tyiRtRBoERPE00GbdfA,5862
334
+ model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py,sha256=2LQ7afCtciq8pDcCfQvwXz-uMlABiZJdRsztIWs6040,5973
335
335
  model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py,sha256=qXXkv3X_wb7t622EOHwXIxfGLGaDqh0T0y4UxREi4Bo,1976
336
336
  model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py,sha256=uZ2RigbY9O2PJ0Il8wPpS_s7frgg9WUGd_SHeKGyl1A,699
337
337
  model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py,sha256=9adOGG1nyviNzuL-1aJXyL0c_VQllSZWiG2gR-puywo,6420
338
338
  model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py,sha256=bPevy6OBqng41PqytBR55e6cBEuyrUS0H8dWX4zgjQ4,967
339
- model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py,sha256=1ix8j7rxc1giPjf2PZKwaaCb5pKo0obUvPmRtklmugY,10056
339
+ model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py,sha256=H0vXDcnQhVXAy-WOpnoM8kjTs5gzmYmO2IiDECSUpd0,10239
340
340
  model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py,sha256=ksWV2A-Njo-wAxQ_Ye2sLIZXBWJ_WNyjT7-qFFwvV2o,2897
341
- model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py,sha256=7xuUrHPMiifn23sWfeiqR9wkYhm8EweDRL_vF-JSxMY,6642
341
+ model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py,sha256=pDBAsUSm3Dq5ZKFH1XftvZ5GZn_R63IJpuhUII9Z_k0,6759
342
342
  model_compression_toolkit/exporter/model_wrapper/__init__.py,sha256=7CF2zvpTrIEm8qnbuHnLZyTZkwBBxV24V8QA0oxGbh0,1187
343
343
  model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py,sha256=pKAdbTCFM_2BrZXUtTIw0ouKotrWwUDF_hP3rPwCM2k,696
344
344
  model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py,sha256=Bd3QhAR__YC9Xmobd5qHv9ofh_rPn_eTFV0sXizcBnY,2297
@@ -529,7 +529,7 @@ model_compression_toolkit/xquant/pytorch/model_analyzer.py,sha256=b93o800yVB3Z-i
529
529
  model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py,sha256=Y0oBl8qPFsdNrK49XczwmVacInJcOPHslVnFBs-iTCc,3742
530
530
  model_compression_toolkit/xquant/pytorch/similarity_functions.py,sha256=CERxq5K8rqaiE-DlwhZBTUd9x69dtYJlkHOPLB54vm8,2354
531
531
  model_compression_toolkit/xquant/pytorch/tensorboard_utils.py,sha256=n0HvWBzkBkUJZlS3WeynhpsRTps2qQkjlq7luliBHNU,9627
532
- mct_nightly-2.4.0.20250618.606.dist-info/METADATA,sha256=4-RG_tJDUZfTOBhT-rkaWLwAhWIoZNB8szWU_njK5iQ,25087
533
- mct_nightly-2.4.0.20250618.606.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
534
- mct_nightly-2.4.0.20250618.606.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
535
- mct_nightly-2.4.0.20250618.606.dist-info/RECORD,,
532
+ mct_nightly-2.4.0.20250619.621.dist-info/METADATA,sha256=Kc3thJIr5lW5AUz47ho_FKA41lX6vXg4CUijKaE0elg,25087
533
+ mct_nightly-2.4.0.20250619.621.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
534
+ mct_nightly-2.4.0.20250619.621.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
535
+ mct_nightly-2.4.0.20250619.621.dist-info/RECORD,,
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
27
27
  from model_compression_toolkit import pruning
28
28
  from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
29
29
 
30
- __version__ = "2.4.0.20250618.000606"
30
+ __version__ = "2.4.0.20250619.000621"
@@ -28,12 +28,14 @@ if FOUND_TF:
28
28
  AttachTpcToKeras
29
29
  from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
30
30
  from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
31
+ from model_compression_toolkit.core.keras.default_framework_info import set_keras_info
31
32
  from tensorflow.keras.models import Model
32
33
 
33
34
  from model_compression_toolkit import get_target_platform_capabilities
34
35
 
35
36
  KERAS_DEFAULT_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
36
37
 
38
+ @set_keras_info
37
39
  def keras_resource_utilization_data(in_model: Model,
38
40
  representative_data_gen: Callable,
39
41
  core_config: CoreConfig = CoreConfig(
@@ -21,6 +21,7 @@ from model_compression_toolkit.logger import Logger
21
21
 
22
22
  if FOUND_TF:
23
23
  import keras
24
+ from model_compression_toolkit.core.keras.default_framework_info import set_keras_info
24
25
  from model_compression_toolkit.exporter.model_wrapper.keras.validate_layer import is_keras_layer_exportable
25
26
  from model_compression_toolkit.exporter.model_exporter.keras.fakely_quant_keras_exporter import \
26
27
  FakelyQuantKerasExporter
@@ -36,6 +37,7 @@ if FOUND_TF:
36
37
  KerasExportSerializationFormat.TFLITE: [QuantizationFormat.FAKELY_QUANT, QuantizationFormat.INT8]
37
38
  }
38
39
 
40
+ @set_keras_info
39
41
  def keras_export_model(model: keras.models.Model,
40
42
  save_model_path: str,
41
43
  is_layer_exportable_fn: Callable = is_keras_layer_exportable,
@@ -73,23 +73,25 @@ if FOUND_ONNX:
73
73
  Returns:
74
74
  Fake-quant PyTorch model.
75
75
  """
76
- # List all activation quantization holders with num_bits>8 and replace them with Identity, because
77
- # ONNX doesn't support quantization of more than 8 bits for torch.fake_quantize_per_tensor_affine.
78
- act_holder_list = [n for n, m in self.model.named_modules()
79
- if isinstance(m, PytorchActivationQuantizationHolder) and
80
- m.activation_holder_quantizer.num_bits > 8]
81
- for act_holder in act_holder_list: # pragma: no cover
82
- obj = self.model
83
- attrs = act_holder.split(".")
84
- for a in attrs[:-1]:
85
- obj = getattr(obj, a)
86
- if hasattr(obj, attrs[-1]):
87
- delattr(obj, attrs[-1])
88
- setattr(obj, attrs[-1], torch.nn.Identity())
89
- else:
90
- Logger.info(f"During removal of activation quantization of a quantizer (with bits > 8) in ONNX FQ "
91
- f"export, deletion of activation holder '{act_holder}' failed — could not locate one or"
92
- f"more intermediate attributes in the path.")
76
+ # When exporting using Fakely Quant Quantization Format list all activation quantization holders with
77
+ # num_bits>8 and replace them with Identity, because ONNX doesn't support quantization of more than 8 bits
78
+ # for torch.fake_quantize_per_tensor_affine.
79
+ if not self._use_onnx_custom_quantizer_ops:
80
+ act_holder_list = [n for n, m in self.model.named_modules()
81
+ if isinstance(m, PytorchActivationQuantizationHolder) and
82
+ m.activation_holder_quantizer.num_bits > 8]
83
+ for act_holder in act_holder_list: # pragma: no cover
84
+ obj = self.model
85
+ attrs = act_holder.split(".")
86
+ for a in attrs[:-1]:
87
+ obj = getattr(obj, a)
88
+ if hasattr(obj, attrs[-1]):
89
+ delattr(obj, attrs[-1])
90
+ setattr(obj, attrs[-1], torch.nn.Identity())
91
+ else:
92
+ Logger.info(f"During removal of activation quantization of a quantizer (with bits > 8) in ONNX"
93
+ f"FQ export, deletion of activation holder '{act_holder}' failed — could not locate"
94
+ f"one or more intermediate attributes in the path.")
93
95
 
94
96
  for layer in self.model.children():
95
97
  self.is_layer_exportable_fn(layer)
@@ -27,6 +27,7 @@ DEFAULT_ONNX_OPSET_VERSION = 15
27
27
 
28
28
  if FOUND_TORCH:
29
29
  import torch.nn
30
+ from model_compression_toolkit.core.pytorch.default_framework_info import set_pytorch_info
30
31
  from model_compression_toolkit.exporter.model_exporter.pytorch.fakely_quant_onnx_pytorch_exporter import FakelyQuantONNXPyTorchExporter
31
32
  from model_compression_toolkit.exporter.model_exporter.pytorch.fakely_quant_torchscript_pytorch_exporter import FakelyQuantTorchScriptPyTorchExporter
32
33
  from model_compression_toolkit.exporter.model_wrapper.pytorch.validate_layer import is_pytorch_layer_exportable
@@ -41,6 +42,7 @@ if FOUND_TORCH:
41
42
  PytorchExportSerializationFormat.ONNX: [QuantizationFormat.FAKELY_QUANT, QuantizationFormat.MCTQ]
42
43
  }
43
44
 
45
+ @set_pytorch_info
44
46
  def pytorch_export_model(model: torch.nn.Module,
45
47
  save_model_path: str,
46
48
  repr_dataset: Callable,