mct-nightly 1.11.0.20240401.428__py3-none-any.whl → 2.0.0.20240403.423__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct_nightly-1.11.0.20240401.428.dist-info → mct_nightly-2.0.0.20240403.423.dist-info}/METADATA +5 -5
- {mct_nightly-1.11.0.20240401.428.dist-info → mct_nightly-2.0.0.20240403.423.dist-info}/RECORD +8 -8
- model_compression_toolkit/__init__.py +1 -1
- model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +1 -1
- model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +1 -1
- {mct_nightly-1.11.0.20240401.428.dist-info → mct_nightly-2.0.0.20240403.423.dist-info}/LICENSE.md +0 -0
- {mct_nightly-1.11.0.20240401.428.dist-info → mct_nightly-2.0.0.20240403.423.dist-info}/WHEEL +0 -0
- {mct_nightly-1.11.0.20240401.428.dist-info → mct_nightly-2.0.0.20240403.423.dist-info}/top_level.txt +0 -0
{mct_nightly-1.11.0.20240401.428.dist-info → mct_nightly-2.0.0.20240403.423.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: mct-nightly
|
|
3
|
-
Version:
|
|
3
|
+
Version: 2.0.0.20240403.423
|
|
4
4
|
Summary: A Model Compression Toolkit for neural networks
|
|
5
5
|
Home-page: UNKNOWN
|
|
6
6
|
License: UNKNOWN
|
|
@@ -103,8 +103,8 @@ You can customize data generation configurations to suit your specific needs. [G
|
|
|
103
103
|
|
|
104
104
|
### Quantization
|
|
105
105
|
MCT supports different quantization methods:
|
|
106
|
-
* Post-training quantization (PTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/
|
|
107
|
-
* Gradient-based post-training quantization (GPTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/
|
|
106
|
+
* Post-training quantization (PTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/keras_post_training_quantization.html), [PyTorch API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/pytorch_post_training_quantization.html)
|
|
107
|
+
* Gradient-based post-training quantization (GPTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/keras_gradient_post_training_quantization.html), [PyTorch API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/pytorch_gradient_post_training_quantization.html)
|
|
108
108
|
* Quantization-aware training (QAT) [*](#experimental-features)
|
|
109
109
|
|
|
110
110
|
|
|
@@ -147,8 +147,8 @@ taking into account the target platform's Single Instruction, Multiple Data (SIM
|
|
|
147
147
|
By pruning groups of channels (SIMD groups), our approach not only reduces model size
|
|
148
148
|
and complexity, but ensures that better utilization of channels is in line with the SIMD architecture
|
|
149
149
|
for a target Resource Utilization of weights memory footprint.
|
|
150
|
-
[Keras API](https://sony.github.io/model_optimization/docs/api/
|
|
151
|
-
[Pytorch API](https://github.
|
|
150
|
+
[Keras API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/keras_pruning_experimental.html)
|
|
151
|
+
[Pytorch API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/pytorch_pruning_experimental.html)
|
|
152
152
|
|
|
153
153
|
#### Experimental features
|
|
154
154
|
|
{mct_nightly-1.11.0.20240401.428.dist-info → mct_nightly-2.0.0.20240403.423.dist-info}/RECORD
RENAMED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
model_compression_toolkit/__init__.py,sha256=
|
|
1
|
+
model_compression_toolkit/__init__.py,sha256=rr-lU1cO0RuS_7QNw41uShlq2s0ILvvtATqaUT-NKzo,1573
|
|
2
2
|
model_compression_toolkit/constants.py,sha256=KW_HUEPmQEYqCvWGyORqkYxpvO7w5LViB5J5D-pm_6o,3648
|
|
3
3
|
model_compression_toolkit/defaultdict.py,sha256=LSc-sbZYXENMCw3U9F4GiXuv67IKpdn0Qm7Fr11jy-4,2277
|
|
4
4
|
model_compression_toolkit/logger.py,sha256=3DByV41XHRR3kLTJNbpaMmikL8icd9e1N-nkQAY9oDk,4567
|
|
@@ -320,12 +320,12 @@ model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quant
|
|
|
320
320
|
model_compression_toolkit/exporter/model_wrapper/keras/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
321
321
|
model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py,sha256=YffgbVYJG5LKeIsW84Pi7NqzQcvJMeQRnAKQCCmIL6c,3776
|
|
322
322
|
model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
323
|
-
model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py,sha256=
|
|
323
|
+
model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py,sha256=NBDzg2rX5BcVELtExHxS5wi0HFxwpGrEedB4ZPSVMas,5130
|
|
324
324
|
model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py,sha256=uL6tJWC4s2IWUy8GJVwtMWpwZZioRRztfKyPJHo14xI,9442
|
|
325
325
|
model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
|
|
326
326
|
model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py,sha256=uTQcnzvP44CgPO0twsUdiMmTBE_Td6ZdQtz5U0GZuPI,3464
|
|
327
327
|
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
328
|
-
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py,sha256=
|
|
328
|
+
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py,sha256=T3QNZl0JFRAm62Z66quHPx0iNHgXwyfSpoBgbqJBBnY,4915
|
|
329
329
|
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py,sha256=4sN5z-6BXrTE5Dp2FX_jKO9ty5iZ2r4RM7XvXtDVLSI,9348
|
|
330
330
|
model_compression_toolkit/gptq/__init__.py,sha256=YKg-tMj9D4Yd0xW9VRD5EN1J5JrmlRbNEF2fOSgodqA,1228
|
|
331
331
|
model_compression_toolkit/gptq/runner.py,sha256=MIg-oBtR1nbHkexySdCJD_XfjRoHSknLotmGBMuD5qM,5924
|
|
@@ -471,8 +471,8 @@ model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py,sha
|
|
|
471
471
|
model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py,sha256=MVwXNymmFRB2NXIBx4e2mdJ1RfoHxRPYRgjb1MQP5kY,1797
|
|
472
472
|
model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
473
473
|
model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py,sha256=7bbzqJN8ZAycVDvZr_5xC-niTAR5df8f03Kooev_pfg,3047
|
|
474
|
-
mct_nightly-
|
|
475
|
-
mct_nightly-
|
|
476
|
-
mct_nightly-
|
|
477
|
-
mct_nightly-
|
|
478
|
-
mct_nightly-
|
|
474
|
+
mct_nightly-2.0.0.20240403.423.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
|
|
475
|
+
mct_nightly-2.0.0.20240403.423.dist-info/METADATA,sha256=dHTEKAHtl1x-dI2jaHQiDYi7QJInTciTrVMfjF2DzxQ,18795
|
|
476
|
+
mct_nightly-2.0.0.20240403.423.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
477
|
+
mct_nightly-2.0.0.20240403.423.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
|
|
478
|
+
mct_nightly-2.0.0.20240403.423.dist-info/RECORD,,
|
|
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
|
|
|
27
27
|
from model_compression_toolkit import pruning
|
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
|
29
29
|
|
|
30
|
-
__version__ = "
|
|
30
|
+
__version__ = "2.0.0.20240403.000423"
|
model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py
CHANGED
|
@@ -92,7 +92,7 @@ if FOUND_TF:
|
|
|
92
92
|
|
|
93
93
|
Logger.info("Please run your accuracy evaluation on the exported quantized model to verify it's accuracy.\n"
|
|
94
94
|
"Checkout the FAQ and Troubleshooting pages for resolving common issues and improving the quantized model accuracy:\n"
|
|
95
|
-
"FAQ: https://github.com/sony/model_optimization/tree/main/FAQ.md"
|
|
95
|
+
"FAQ: https://github.com/sony/model_optimization/tree/main/FAQ.md\n"
|
|
96
96
|
"Quantization Troubleshooting: https://github.com/sony/model_optimization/tree/main/quantization_troubleshooting.md")
|
|
97
97
|
return exportable_model, user_info
|
|
98
98
|
else:
|
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py
CHANGED
|
@@ -84,7 +84,7 @@ if FOUND_TORCH:
|
|
|
84
84
|
|
|
85
85
|
Logger.info("Please run your accuracy evaluation on the exported quantized model to verify it's accuracy.\n"
|
|
86
86
|
"Checkout the FAQ and Troubleshooting pages for resolving common issues and improving the quantized model accuracy:\n"
|
|
87
|
-
"FAQ: https://github.com/sony/model_optimization/tree/main/FAQ.md"
|
|
87
|
+
"FAQ: https://github.com/sony/model_optimization/tree/main/FAQ.md\n"
|
|
88
88
|
"Quantization Troubleshooting: https://github.com/sony/model_optimization/tree/main/quantization_troubleshooting.md")
|
|
89
89
|
|
|
90
90
|
return exportable_model, user_info
|
{mct_nightly-1.11.0.20240401.428.dist-info → mct_nightly-2.0.0.20240403.423.dist-info}/LICENSE.md
RENAMED
|
File without changes
|
{mct_nightly-1.11.0.20240401.428.dist-info → mct_nightly-2.0.0.20240403.423.dist-info}/WHEEL
RENAMED
|
File without changes
|
{mct_nightly-1.11.0.20240401.428.dist-info → mct_nightly-2.0.0.20240403.423.dist-info}/top_level.txt
RENAMED
|
File without changes
|