mct-nightly 2.0.0.20240508.121838__py3-none-any.whl → 2.0.0.20240508.122218__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 2.0.0.20240508.121838
3
+ Version: 2.0.0.20240508.122218
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -1,4 +1,4 @@
1
- model_compression_toolkit/__init__.py,sha256=TDig5WOAQRhJrpoVfD05x8ZxU7iOSUl3OtaVQ1acZMM,1573
1
+ model_compression_toolkit/__init__.py,sha256=UwOX-ld2fBD1wK2ssmJUlOZGDaW6NDVqN1PQO_5RR6A,1573
2
2
  model_compression_toolkit/constants.py,sha256=yIJyJ-e1WrDeKD9kG15qkqfYnoj7J1J2CxnJDt008ik,3756
3
3
  model_compression_toolkit/defaultdict.py,sha256=LSc-sbZYXENMCw3U9F4GiXuv67IKpdn0Qm7Fr11jy-4,2277
4
4
  model_compression_toolkit/logger.py,sha256=3DByV41XHRR3kLTJNbpaMmikL8icd9e1N-nkQAY9oDk,4567
@@ -315,9 +315,9 @@ model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py,s
315
315
  model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py,sha256=uZ2RigbY9O2PJ0Il8wPpS_s7frgg9WUGd_SHeKGyl1A,699
316
316
  model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py,sha256=UPVkEUQCMZ4Lld6CRnEOPEmlfe5vcQZG0Q3FwRBodD4,4021
317
317
  model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py,sha256=bPevy6OBqng41PqytBR55e6cBEuyrUS0H8dWX4zgjQ4,967
318
- model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py,sha256=r2pOWFK-mSG8OzRiKGVOG4skzX0ZiM0eiRuBsL-ThoI,6067
318
+ model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py,sha256=b-qC60LiRtc52gIXdUbrdTBKUgCIaResDLXFE8zt_F4,6732
319
319
  model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py,sha256=ksWV2A-Njo-wAxQ_Ye2sLIZXBWJ_WNyjT7-qFFwvV2o,2897
320
- model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py,sha256=yz5dPMX5r1d9LJV4rYFS1pXqCbVUxvUmV4LELWcRinQ,6350
320
+ model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py,sha256=5G9dikFY4A66XVjpaOWVWX81Qr6ZdwnoyBzFDL_abi8,6242
321
321
  model_compression_toolkit/exporter/model_wrapper/__init__.py,sha256=7CF2zvpTrIEm8qnbuHnLZyTZkwBBxV24V8QA0oxGbh0,1187
322
322
  model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py,sha256=pKAdbTCFM_2BrZXUtTIw0ouKotrWwUDF_hP3rPwCM2k,696
323
323
  model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py,sha256=Bd3QhAR__YC9Xmobd5qHv9ofh_rPn_eTFV0sXizcBnY,2297
@@ -483,8 +483,8 @@ model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py,sha
483
483
  model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py,sha256=MVwXNymmFRB2NXIBx4e2mdJ1RfoHxRPYRgjb1MQP5kY,1797
484
484
  model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
485
485
  model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py,sha256=MxylaVFPgN7zBiRBy6WV610EA4scLgRJFbMucKvvNDU,2896
486
- mct_nightly-2.0.0.20240508.121838.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
487
- mct_nightly-2.0.0.20240508.121838.dist-info/METADATA,sha256=pYFfSFSP2HsNDAum_dYBTotnT_hTBo3sPLJUZXAkrkE,18798
488
- mct_nightly-2.0.0.20240508.121838.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
489
- mct_nightly-2.0.0.20240508.121838.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
490
- mct_nightly-2.0.0.20240508.121838.dist-info/RECORD,,
486
+ mct_nightly-2.0.0.20240508.122218.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
487
+ mct_nightly-2.0.0.20240508.122218.dist-info/METADATA,sha256=la_dJPFTwsLA7eYayNIaXNrtV6umEQjM8O7uyHz45Aw,18798
488
+ mct_nightly-2.0.0.20240508.122218.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
489
+ mct_nightly-2.0.0.20240508.122218.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
490
+ mct_nightly-2.0.0.20240508.122218.dist-info/RECORD,,
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
27
27
  from model_compression_toolkit import pruning
28
28
  from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
29
29
 
30
- __version__ = "2.0.0.20240508.121838"
30
+ __version__ = "2.0.0.20240508.122218"
@@ -16,117 +16,124 @@ from typing import Callable
16
16
  from io import BytesIO
17
17
 
18
18
  import torch.nn
19
- import onnx
20
19
 
21
20
  from mct_quantizers import PytorchActivationQuantizationHolder, PytorchQuantizationWrapper
21
+ from model_compression_toolkit.constants import FOUND_ONNX
22
22
  from model_compression_toolkit.logger import Logger
23
23
  from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
24
24
  from model_compression_toolkit.exporter.model_exporter.pytorch.base_pytorch_exporter import BasePyTorchExporter
25
25
  from mct_quantizers import pytorch_quantizers
26
- from mct_quantizers.pytorch.metadata import add_onnx_metadata
27
-
28
- DEFAULT_ONNX_OPSET_VERSION=15
29
-
30
-
31
- class FakelyQuantONNXPyTorchExporter(BasePyTorchExporter):
32
- """
33
- Exporter for fakely-quant PyTorch models.
34
- The exporter expects to receive an exportable model (where each layer's full quantization parameters
35
- can be retrieved), and convert it into a fakely-quant model (namely, weights that are in fake-quant
36
- format) and fake-quant layers for the activations.
37
- """
38
-
39
- def __init__(self,
40
- model: torch.nn.Module,
41
- is_layer_exportable_fn: Callable,
42
- save_model_path: str,
43
- repr_dataset: Callable,
44
- use_onnx_custom_quantizer_ops: bool = False,
45
- onnx_opset_version=DEFAULT_ONNX_OPSET_VERSION):
46
- """
47
-
48
- Args:
49
- model: Model to export.
50
- is_layer_exportable_fn: Callable to check whether a layer can be exported or not.
51
- save_model_path: Path to save the exported model.
52
- repr_dataset: Representative dataset (needed for creating torch script).
53
- use_onnx_custom_quantizer_ops: Whether to export quantizers custom ops in ONNX or not.
54
- onnx_opset_version: ONNX opset version to use for exported ONNX model.
55
- """
56
26
 
57
- super().__init__(model,
58
- is_layer_exportable_fn,
59
- save_model_path,
60
- repr_dataset)
61
27
 
62
- self._use_onnx_custom_quantizer_ops = use_onnx_custom_quantizer_ops
63
- self._onnx_opset_version = onnx_opset_version
28
+ if FOUND_ONNX:
29
+ import onnx
30
+ from mct_quantizers.pytorch.metadata import add_onnx_metadata
64
31
 
65
- def export(self) -> None:
32
+ class FakelyQuantONNXPyTorchExporter(BasePyTorchExporter):
66
33
  """
67
- Convert an exportable (fully-quantized) PyTorch model to a fakely-quant model
68
- (namely, weights that are in fake-quant format) and fake-quant layers for the activations.
69
-
70
- Returns:
71
- Fake-quant PyTorch model.
34
+ Exporter for fakely-quant PyTorch models.
35
+ The exporter expects to receive an exportable model (where each layer's full quantization parameters
36
+ can be retrieved), and convert it into a fakely-quant model (namely, weights that are in fake-quant
37
+ format) and fake-quant layers for the activations.
72
38
  """
73
- for layer in self.model.children():
74
- self.is_layer_exportable_fn(layer)
75
-
76
- # Set forward that is used during onnx export.
77
- # If _use_onnx_custom_quantizer_ops is set to True, the quantizer forward function will use
78
- # the custom implementation when exporting the operator into onnx model. If not, it removes the
79
- # wraps and quantizes the ops in place (for weights, for activation torch quantization function is
80
- # exported since it's used during forward).
81
- if self._use_onnx_custom_quantizer_ops:
82
- self._enable_onnx_custom_ops_export()
83
- else:
84
- self._substitute_fully_quantized_model()
85
-
86
- if self._use_onnx_custom_quantizer_ops:
87
- Logger.info(f"Exporting onnx model with MCTQ quantizers: {self.save_model_path}")
88
- else:
89
- Logger.info(f"Exporting fake-quant onnx model: {self.save_model_path}")
90
-
91
- model_input = to_torch_tensor(next(self.repr_dataset())[0])
92
-
93
- if hasattr(self.model, 'metadata'):
94
- onnx_bytes = BytesIO()
95
- torch.onnx.export(self.model,
96
- model_input,
97
- onnx_bytes,
98
- opset_version=self._onnx_opset_version,
99
- verbose=False,
100
- input_names=['input'],
101
- output_names=['output'],
102
- dynamic_axes={'input': {0: 'batch_size'},
103
- 'output': {0: 'batch_size'}})
104
- onnx_model = onnx.load_from_string(onnx_bytes.getvalue())
105
- onnx_model = add_onnx_metadata(onnx_model, self.model.metadata)
106
- onnx.save_model(onnx_model, self.save_model_path)
107
- else:
108
- torch.onnx.export(self.model,
109
- model_input,
110
- self.save_model_path,
111
- opset_version=self._onnx_opset_version,
112
- verbose=False,
113
- input_names=['input'],
114
- output_names=['output'],
115
- dynamic_axes={'input': {0: 'batch_size'},
116
- 'output': {0: 'batch_size'}})
117
-
118
- def _enable_onnx_custom_ops_export(self):
119
- """
120
- Enable the custom implementation forward in quantizers, so it is exported
121
- with custom quantizers.
122
- """
123
-
124
- for n, m in self.model.named_modules():
125
- if isinstance(m, PytorchActivationQuantizationHolder):
126
- assert isinstance(m.activation_holder_quantizer, pytorch_quantizers.BasePyTorchInferableQuantizer)
127
- m.activation_holder_quantizer.enable_custom_impl()
128
39
 
129
- if isinstance(m, PytorchQuantizationWrapper):
130
- for wq in m.weights_quantizers.values():
131
- assert isinstance(wq, pytorch_quantizers.BasePyTorchInferableQuantizer)
132
- wq.enable_custom_impl()
40
+ def __init__(self,
41
+ model: torch.nn.Module,
42
+ is_layer_exportable_fn: Callable,
43
+ save_model_path: str,
44
+ repr_dataset: Callable,
45
+ onnx_opset_version: int,
46
+ use_onnx_custom_quantizer_ops: bool = False,):
47
+ """
48
+
49
+ Args:
50
+ model: Model to export.
51
+ is_layer_exportable_fn: Callable to check whether a layer can be exported or not.
52
+ save_model_path: Path to save the exported model.
53
+ repr_dataset: Representative dataset (needed for creating torch script).
54
+ onnx_opset_version: ONNX opset version to use for exported ONNX model.
55
+ use_onnx_custom_quantizer_ops: Whether to export quantizers custom ops in ONNX or not.
56
+ """
57
+
58
+ super().__init__(model,
59
+ is_layer_exportable_fn,
60
+ save_model_path,
61
+ repr_dataset)
62
+
63
+ self._use_onnx_custom_quantizer_ops = use_onnx_custom_quantizer_ops
64
+ self._onnx_opset_version = onnx_opset_version
65
+
66
+ def export(self) -> None:
67
+ """
68
+ Convert an exportable (fully-quantized) PyTorch model to a fakely-quant model
69
+ (namely, weights that are in fake-quant format) and fake-quant layers for the activations.
70
+
71
+ Returns:
72
+ Fake-quant PyTorch model.
73
+ """
74
+ for layer in self.model.children():
75
+ self.is_layer_exportable_fn(layer)
76
+
77
+ # Set forward that is used during onnx export.
78
+ # If _use_onnx_custom_quantizer_ops is set to True, the quantizer forward function will use
79
+ # the custom implementation when exporting the operator into onnx model. If not, it removes the
80
+ # wraps and quantizes the ops in place (for weights, for activation torch quantization function is
81
+ # exported since it's used during forward).
82
+ if self._use_onnx_custom_quantizer_ops:
83
+ self._enable_onnx_custom_ops_export()
84
+ else:
85
+ self._substitute_fully_quantized_model()
86
+
87
+ if self._use_onnx_custom_quantizer_ops:
88
+ Logger.info(f"Exporting onnx model with MCTQ quantizers: {self.save_model_path}")
89
+ else:
90
+ Logger.info(f"Exporting fake-quant onnx model: {self.save_model_path}")
91
+
92
+ model_input = to_torch_tensor(next(self.repr_dataset())[0])
93
+
94
+ if hasattr(self.model, 'metadata'):
95
+ onnx_bytes = BytesIO()
96
+ torch.onnx.export(self.model,
97
+ model_input,
98
+ onnx_bytes,
99
+ opset_version=self._onnx_opset_version,
100
+ verbose=False,
101
+ input_names=['input'],
102
+ output_names=['output'],
103
+ dynamic_axes={'input': {0: 'batch_size'},
104
+ 'output': {0: 'batch_size'}})
105
+ onnx_model = onnx.load_from_string(onnx_bytes.getvalue())
106
+ onnx_model = add_onnx_metadata(onnx_model, self.model.metadata)
107
+ onnx.save_model(onnx_model, self.save_model_path)
108
+ else:
109
+ torch.onnx.export(self.model,
110
+ model_input,
111
+ self.save_model_path,
112
+ opset_version=self._onnx_opset_version,
113
+ verbose=False,
114
+ input_names=['input'],
115
+ output_names=['output'],
116
+ dynamic_axes={'input': {0: 'batch_size'},
117
+ 'output': {0: 'batch_size'}})
118
+
119
+ def _enable_onnx_custom_ops_export(self):
120
+ """
121
+ Enable the custom implementation forward in quantizers, so it is exported
122
+ with custom quantizers.
123
+ """
124
+
125
+ for n, m in self.model.named_modules():
126
+ if isinstance(m, PytorchActivationQuantizationHolder):
127
+ assert isinstance(m.activation_holder_quantizer, pytorch_quantizers.BasePyTorchInferableQuantizer)
128
+ m.activation_holder_quantizer.enable_custom_impl()
129
+
130
+ if isinstance(m, PytorchQuantizationWrapper):
131
+ for wq in m.weights_quantizers.values():
132
+ assert isinstance(wq, pytorch_quantizers.BasePyTorchInferableQuantizer)
133
+ wq.enable_custom_impl()
134
+
135
+ else:
136
+ def FakelyQuantONNXPyTorchExporter(*args, **kwargs):
137
+ Logger.critical('Installing onnx is mandatory '
138
+ 'when using FakelyQuantONNXPyTorchExporter. '
139
+ 'Could not find onnx package.') # pragma: no cover
@@ -14,20 +14,20 @@
14
14
  # ==============================================================================
15
15
  from typing import Callable
16
16
 
17
- from model_compression_toolkit.constants import FOUND_TORCH
17
+ from model_compression_toolkit.constants import FOUND_TORCH, FOUND_ONNX
18
18
  from model_compression_toolkit.exporter.model_exporter.fw_agonstic.quantization_format import QuantizationFormat
19
19
  from model_compression_toolkit.exporter.model_exporter.pytorch.export_serialization_format import \
20
20
  PytorchExportSerializationFormat
21
21
  from model_compression_toolkit.logger import Logger
22
- from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
22
+
23
+
24
+ DEFAULT_ONNX_OPSET_VERSION = 15
23
25
 
24
26
 
25
27
  if FOUND_TORCH:
26
28
  import torch.nn
27
- from model_compression_toolkit.exporter.model_exporter.pytorch.fakely_quant_onnx_pytorch_exporter import \
28
- FakelyQuantONNXPyTorchExporter, DEFAULT_ONNX_OPSET_VERSION
29
- from model_compression_toolkit.exporter.model_exporter.pytorch.fakely_quant_torchscript_pytorch_exporter import \
30
- FakelyQuantTorchScriptPyTorchExporter
29
+ from model_compression_toolkit.exporter.model_exporter.pytorch.fakely_quant_onnx_pytorch_exporter import FakelyQuantONNXPyTorchExporter
30
+ from model_compression_toolkit.exporter.model_exporter.pytorch.fakely_quant_torchscript_pytorch_exporter import FakelyQuantTorchScriptPyTorchExporter
31
31
  from model_compression_toolkit.exporter.model_wrapper.pytorch.validate_layer import is_pytorch_layer_exportable
32
32
 
33
33
  supported_serialization_quantization_export_dict = {