mct-nightly 1.8.0.20052023.post401__py3-none-any.whl → 1.8.0.20230610.post356__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct_nightly-1.8.0.20052023.post401.dist-info → mct_nightly-1.8.0.20230610.post356.dist-info}/METADATA +10 -7
- {mct_nightly-1.8.0.20052023.post401.dist-info → mct_nightly-1.8.0.20230610.post356.dist-info}/RECORD +68 -115
- model_compression_toolkit/__init__.py +23 -3
- model_compression_toolkit/core/common/framework_info.py +1 -1
- model_compression_toolkit/core/keras/back2framework/instance_builder.py +16 -9
- model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +8 -34
- model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +5 -1
- model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +103 -28
- model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +39 -44
- model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +1 -1
- model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +20 -18
- model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +3 -3
- model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +1 -1
- model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +36 -9
- model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +4 -4
- model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +24 -32
- model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +31 -8
- model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +5 -5
- model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +34 -8
- model_compression_toolkit/gptq/keras/gptq_training.py +15 -16
- model_compression_toolkit/gptq/keras/graph_info.py +2 -2
- model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +4 -5
- model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +5 -7
- model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +1 -1
- model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +6 -6
- model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +7 -7
- model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +6 -6
- model_compression_toolkit/gptq/pytorch/gptq_training.py +30 -10
- model_compression_toolkit/gptq/pytorch/graph_info.py +5 -2
- model_compression_toolkit/gptq/pytorch/quantization_facade.py +4 -2
- model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +4 -4
- model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +5 -7
- model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +1 -1
- model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +7 -7
- model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +7 -8
- model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +7 -8
- model_compression_toolkit/qat/common/__init__.py +2 -1
- model_compression_toolkit/qat/common/qat_config.py +2 -2
- model_compression_toolkit/qat/keras/quantization_facade.py +18 -8
- model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +1 -1
- model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +11 -11
- model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +11 -12
- model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +12 -13
- model_compression_toolkit/qat/pytorch/quantization_facade.py +27 -16
- model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py +2 -2
- model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +31 -4
- model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +10 -9
- model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +11 -10
- model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +2 -1
- model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +1 -25
- model_compression_toolkit/{quantizers_infrastructure/inferable_infrastructure/keras/quantizers/constants.py → trainable_infrastructure/__init__.py} +3 -10
- model_compression_toolkit/{quantizers_infrastructure/trainable_infrastructure → trainable_infrastructure}/common/base_trainable_quantizer.py +3 -3
- model_compression_toolkit/{quantizers_infrastructure/trainable_infrastructure → trainable_infrastructure}/common/get_quantizer_config.py +1 -1
- model_compression_toolkit/{quantizers_infrastructure/trainable_infrastructure → trainable_infrastructure}/common/get_quantizers.py +3 -3
- model_compression_toolkit/{quantizers_infrastructure/trainable_infrastructure → trainable_infrastructure}/keras/base_keras_quantizer.py +4 -4
- model_compression_toolkit/{quantizers_infrastructure/trainable_infrastructure → trainable_infrastructure}/keras/config_serialization.py +2 -2
- model_compression_toolkit/{quantizers_infrastructure/inferable_infrastructure → trainable_infrastructure}/keras/load_model.py +16 -23
- model_compression_toolkit/{quantizers_infrastructure/trainable_infrastructure → trainable_infrastructure}/pytorch/base_pytorch_quantizer.py +3 -3
- model_compression_toolkit/quantizers_infrastructure/__init__.py +0 -23
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/base_inferable_quantizer.py +0 -87
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/constants.py +0 -46
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_all_subclasses.py +0 -31
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_quantizers.py +0 -53
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/quant_utils.py +0 -49
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/activation_quantization_holder.py +0 -147
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantize_wrapper.py +0 -345
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizer_utils.py +0 -85
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/__init__.py +0 -27
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/__init__.py +0 -14
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py +0 -148
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py +0 -65
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py +0 -86
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py +0 -111
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/base_keras_inferable_quantizer.py +0 -56
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/__init__.py +0 -14
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py +0 -79
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py +0 -179
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py +0 -67
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py +0 -87
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py +0 -163
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/validation_functions.py +0 -66
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/__init__.py +0 -14
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantize_wrapper.py +0 -269
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizer_utils.py +0 -152
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/__init__.py +0 -35
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/__init__.py +0 -14
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py +0 -96
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py +0 -62
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py +0 -83
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py +0 -100
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_lut_symmetric_inferable_quantizer.py +0 -95
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_pytorch_inferable_quantizer.py +0 -48
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_symmetric_inferable_quantizer.py +0 -70
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_uniform_inferable_quantizer.py +0 -57
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/constants.py +0 -26
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/__init__.py +0 -14
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py +0 -77
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py +0 -106
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py +0 -66
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py +0 -104
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py +0 -109
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/__init__.py +0 -14
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/__init__.py +0 -14
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/__init__.py +0 -14
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/__init__.py +0 -14
- {mct_nightly-1.8.0.20052023.post401.dist-info → mct_nightly-1.8.0.20230610.post356.dist-info}/LICENSE.md +0 -0
- {mct_nightly-1.8.0.20052023.post401.dist-info → mct_nightly-1.8.0.20230610.post356.dist-info}/WHEEL +0 -0
- {mct_nightly-1.8.0.20052023.post401.dist-info → mct_nightly-1.8.0.20230610.post356.dist-info}/top_level.txt +0 -0
- /model_compression_toolkit/{quantizers_infrastructure/inferable_infrastructure → trainable_infrastructure/common}/__init__.py +0 -0
- /model_compression_toolkit/{quantizers_infrastructure → trainable_infrastructure/common}/constants.py +0 -0
- /model_compression_toolkit/{quantizers_infrastructure/trainable_infrastructure → trainable_infrastructure}/common/quant_utils.py +0 -0
- /model_compression_toolkit/{quantizers_infrastructure/trainable_infrastructure → trainable_infrastructure}/common/trainable_quantizer_config.py +0 -0
- /model_compression_toolkit/{quantizers_infrastructure/inferable_infrastructure/common → trainable_infrastructure/keras}/__init__.py +0 -0
- /model_compression_toolkit/{quantizers_infrastructure/trainable_infrastructure → trainable_infrastructure}/keras/quantizer_utils.py +0 -0
- /model_compression_toolkit/{quantizers_infrastructure/inferable_infrastructure/keras → trainable_infrastructure/pytorch}/__init__.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: mct-nightly
|
|
3
|
-
Version: 1.8.0.
|
|
3
|
+
Version: 1.8.0.20230610.post356
|
|
4
4
|
Summary: A Model Compression Toolkit for neural networks
|
|
5
5
|
Home-page: UNKNOWN
|
|
6
6
|
License: UNKNOWN
|
|
@@ -23,6 +23,7 @@ Requires-Dist: PuLP
|
|
|
23
23
|
Requires-Dist: matplotlib
|
|
24
24
|
Requires-Dist: scipy
|
|
25
25
|
Requires-Dist: protobuf
|
|
26
|
+
Requires-Dist: mct-quantizers-nightly
|
|
26
27
|
|
|
27
28
|
# Model Compression Toolkit (MCT)
|
|
28
29
|
|
|
@@ -89,15 +90,17 @@ For more details, we highly recommend visiting our project website where experim
|
|
|
89
90
|
|
|
90
91
|
This section provides a quick starting guide. We begin with installation via source code or pip server. Then, we provide a short usage example.
|
|
91
92
|
|
|
92
|
-
###
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
#### From Source
|
|
93
|
+
### Setting up work environment
|
|
94
|
+
Clone the repository and install the required packages (via [requirements](requirements.txt)).
|
|
97
95
|
```
|
|
98
96
|
git clone https://github.com/sony/model_optimization.git
|
|
99
|
-
|
|
97
|
+
cd model_optimization
|
|
98
|
+
pip install -r requirements.txt
|
|
100
99
|
```
|
|
100
|
+
|
|
101
|
+
### Installation
|
|
102
|
+
See the MCT install guide for the pip package.
|
|
103
|
+
|
|
101
104
|
#### From PyPi - latest stable release
|
|
102
105
|
```
|
|
103
106
|
pip install model-compression-toolkit
|
{mct_nightly-1.8.0.20052023.post401.dist-info → mct_nightly-1.8.0.20230610.post356.dist-info}/RECORD
RENAMED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
model_compression_toolkit/__init__.py,sha256=
|
|
1
|
+
model_compression_toolkit/__init__.py,sha256=q5Yg5hY1LHrHUMA0oAP2YZK_7FpEoPY2pyqnjHbHgCI,3608
|
|
2
2
|
model_compression_toolkit/constants.py,sha256=9AIjCQuTlfS4M45Jw1r_KK4u--18DhvVNMK-DZDof7w,3949
|
|
3
3
|
model_compression_toolkit/logger.py,sha256=b9DVktZ-LymFcRxv2aL_sdiE6S2sSrFGWltx6dgEuUY,4863
|
|
4
4
|
model_compression_toolkit/core/__init__.py,sha256=qnBA6aaojI7RpEQZU2vXWiELHfVJf-MnAP-4T0tcFDY,2008
|
|
@@ -10,7 +10,7 @@ model_compression_toolkit/core/common/base_substitutions.py,sha256=xDFSmVVs_iFSZ
|
|
|
10
10
|
model_compression_toolkit/core/common/data_loader.py,sha256=7YF5Mqz64Xb4rVwY3knrdIZ4JEHybXxiQqx0deR_c5k,4017
|
|
11
11
|
model_compression_toolkit/core/common/defaultdict.py,sha256=n-F3dP-VTMnGy9KfCwp7D_WBlvFxe3waX4LpnOX8FH0,2281
|
|
12
12
|
model_compression_toolkit/core/common/framework_implementation.py,sha256=spFDFM31jH1Cz8t4why1LvL1TOIAJAIltNvzz2T5tNI,22391
|
|
13
|
-
model_compression_toolkit/core/common/framework_info.py,sha256=
|
|
13
|
+
model_compression_toolkit/core/common/framework_info.py,sha256=hwmstv7IuBRfa6IxDbeG4y-7AxKx4bwCyI_Exi2C7mo,6424
|
|
14
14
|
model_compression_toolkit/core/common/memory_computation.py,sha256=ixoSpV5ZYZGyzhre3kQcvR2sNA8KBsPZ3lgbkDnw9Cs,1205
|
|
15
15
|
model_compression_toolkit/core/common/model_builder_mode.py,sha256=jll9-59OPaE3ug7Y9-lLyV99_FoNHxkGZMgcm0Vkpss,1324
|
|
16
16
|
model_compression_toolkit/core/common/model_collector.py,sha256=pNmJsU7QPCQ8-YUrzz__85YwF7Mk4Q27gozDSYCpzrg,5005
|
|
@@ -134,8 +134,8 @@ model_compression_toolkit/core/keras/tf_tensor_numpy.py,sha256=BauH-Ssoiuv5wu81f
|
|
|
134
134
|
model_compression_toolkit/core/keras/back2framework/__init__.py,sha256=rhIiXg_nBgUZ-baE3M6SzCuQbcnq4iebY1jtJBvKHOM,808
|
|
135
135
|
model_compression_toolkit/core/keras/back2framework/factory_model_builder.py,sha256=GSh1Piz5qpA7IlvHTMqUvPn7WBDa0IHEDZdd_TzY9XA,2226
|
|
136
136
|
model_compression_toolkit/core/keras/back2framework/float_model_builder.py,sha256=9SFHhX-JnkB8PvYIIHRYlReBDI_RkZY9LditzW_ElLk,2444
|
|
137
|
-
model_compression_toolkit/core/keras/back2framework/instance_builder.py,sha256=
|
|
138
|
-
model_compression_toolkit/core/keras/back2framework/keras_model_builder.py,sha256=
|
|
137
|
+
model_compression_toolkit/core/keras/back2framework/instance_builder.py,sha256=DzfVbu4T4argPB1093ZGAk16GgjKcbuufukXtymF30A,4078
|
|
138
|
+
model_compression_toolkit/core/keras/back2framework/keras_model_builder.py,sha256=8EWpPCDVAruF1G0hE29qfW-FHA6jWazFosJcPSVCrEg,16452
|
|
139
139
|
model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py,sha256=6I4AIR4WUZ84vm1tjFef-VKNIk5qnAxPc1H_VVqzjbk,7150
|
|
140
140
|
model_compression_toolkit/core/keras/back2framework/model_gradients.py,sha256=E_VHcRXkgaQv_pJbqNs1L68fzkplDZNEk_qdgWsFdp4,15316
|
|
141
141
|
model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py,sha256=5wFb4nx_F0Wu4c8pLf6n6OzxOHtpOJ6_3mQsNSXIudU,2481
|
|
@@ -192,10 +192,10 @@ model_compression_toolkit/core/pytorch/utils.py,sha256=rBQMAbWluyIMjVfeghzq6FZv3
|
|
|
192
192
|
model_compression_toolkit/core/pytorch/back2framework/__init__.py,sha256=H_WixgN0elVWf3exgGYsi58imPoYDj5eYPeh6x4yfug,813
|
|
193
193
|
model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py,sha256=aSmU5MKbqZLylZbaZDlUWPa_jfJoaRhqz_6v_zeLc7o,2274
|
|
194
194
|
model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py,sha256=tLrlUyYhxVKVjkad1ZAtbRra0HedB3iVfIkZ_dYnQ-4,3419
|
|
195
|
-
model_compression_toolkit/core/pytorch/back2framework/instance_builder.py,sha256=
|
|
195
|
+
model_compression_toolkit/core/pytorch/back2framework/instance_builder.py,sha256=B67awxkYKpjmNUcEgzgsav59Dken0t360ozpCd6BQVA,1848
|
|
196
196
|
model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py,sha256=BWNYDNA7-y1aVR6w6ECFwgF0NEl-W4OvCWrmHYqeQI4,5157
|
|
197
197
|
model_compression_toolkit/core/pytorch/back2framework/model_gradients.py,sha256=f3vXyJ-b4Xo_oczxK0rT0Rnkyo7hVIOtfvDPR5iK9-Y,18214
|
|
198
|
-
model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py,sha256=
|
|
198
|
+
model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py,sha256=Jag1zAEtrhGldPrzMRN3V8g-5-MMBqY_so49Vc1DuS0,16642
|
|
199
199
|
model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py,sha256=MHP8SWXEqT4a5pWOELJZhqS_orYa8zWiKkOliczfrNc,3709
|
|
200
200
|
model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
201
201
|
model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py,sha256=q2JDw10NKng50ee2i9faGzWZ-IydnR2aOMGSn9RoZmc,5773
|
|
@@ -236,28 +236,28 @@ model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py,sha256
|
|
|
236
236
|
model_compression_toolkit/exporter/model_exporter/keras/__init__.py,sha256=uZ2RigbY9O2PJ0Il8wPpS_s7frgg9WUGd_SHeKGyl1A,699
|
|
237
237
|
model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py,sha256=93UkXZEm6sTCATUwLu4exVyIFAFBzbA4Yg_Tr89Rb9U,1495
|
|
238
238
|
model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py,sha256=HnRVLGc7dyQvtDxUNuo9gU3huuQQTB5hFRUGfvzI5AY,797
|
|
239
|
-
model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py,sha256=
|
|
240
|
-
model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py,sha256=
|
|
241
|
-
model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py,sha256=
|
|
239
|
+
model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py,sha256=OmVgzRugXKQ9WItEDxWSxsSC0PEWdaa3DcFec7FVWlM,5550
|
|
240
|
+
model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py,sha256=sgTlqNjHSf0vVzQpq2811WL6k7SIo_QZaV7TxUtIC8I,3043
|
|
241
|
+
model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py,sha256=RR-GPsMvEBHmkJEXCIA-bW_ULUcDeSGG9h0gX4YZ2Vw,8144
|
|
242
242
|
model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py,sha256=fuJEwMo6r2Y3_vBMCSXtMbJ-Y98R4uE6WNRJJNG3kYc,6057
|
|
243
243
|
model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py,sha256=uZ2RigbY9O2PJ0Il8wPpS_s7frgg9WUGd_SHeKGyl1A,699
|
|
244
244
|
model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py,sha256=ahgT4EQDSrOyzEBfKqWCNJWsH6RCJOCSRxzlsqaObYA,1600
|
|
245
245
|
model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py,sha256=el2QRZTz2jLGj7x0eWQuzyVW6u6xXSNV-mKoEtneTLY,800
|
|
246
|
-
model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py,sha256=
|
|
247
|
-
model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py,sha256=
|
|
246
|
+
model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py,sha256=drAJc0nBGqYXpWdMDRjbkGR8CdkO1QZ-amMMgOpxidE,3923
|
|
247
|
+
model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py,sha256=QUQZ60OkzbaElE94EFJVH9qiHHU7V8SC3RiJoVPeT0s,2843
|
|
248
248
|
model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py,sha256=rzUd377eyHHo0dD6z0efTTR-8Bo6Ebqo0X8ECL-Q7Gw,5866
|
|
249
249
|
model_compression_toolkit/exporter/model_wrapper/__init__.py,sha256=7CF2zvpTrIEm8qnbuHnLZyTZkwBBxV24V8QA0oxGbh0,1187
|
|
250
250
|
model_compression_toolkit/exporter/model_wrapper/keras/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
251
|
-
model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py,sha256=
|
|
251
|
+
model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py,sha256=ihcMbqi_UGYnDZNnTS3XouKF7dmrrBGIZbfFEzW6KXE,3543
|
|
252
252
|
model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
253
|
-
model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py,sha256=
|
|
254
|
-
model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py,sha256=
|
|
253
|
+
model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py,sha256=iuo76cqmoHpF9eAc3Sqz4W-i6nnY1eeySBOdzh8bY5g,4287
|
|
254
|
+
model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py,sha256=prOosEwrTEUsg4gvnZwgyLtDu2id-eMsZ97pEHHBGwM,8318
|
|
255
255
|
model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizers.py,sha256=n7VTA-a9TrLFpfdYAqrAKj6PGlAyLq8-xdwnMMpX71k,2077
|
|
256
256
|
model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
|
|
257
|
-
model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py,sha256=
|
|
257
|
+
model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py,sha256=gvX5ILs5vjQ_F_dq5KaFs0GOQEq9gYXO5a6YZlYY8h4,3449
|
|
258
258
|
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
259
|
-
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py,sha256=
|
|
260
|
-
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py,sha256=
|
|
259
|
+
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py,sha256=SJ5fetbUMkmB0tkHkmVhMrLksh7eqMQJLFuMD08ZKWM,3921
|
|
260
|
+
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py,sha256=gNURwKHO5C3fez_SPZ9lxfp7FamN5A6W6Jp4AaGQJBE,7582
|
|
261
261
|
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizers.py,sha256=hinP-wtyxZyoW860GdJAk6M3iPjmwwPXQTUxd56yhq8,2086
|
|
262
262
|
model_compression_toolkit/gptq/__init__.py,sha256=tPxlcYl8JwK-EWVTy5IVgGOaUJsnG-6PnOKeYNeGJjQ,1250
|
|
263
263
|
model_compression_toolkit/gptq/runner.py,sha256=vWd7cWKgTGc9oPcTtwTQZoI3MArCx19Y61uteLFCxVo,5534
|
|
@@ -270,37 +270,37 @@ model_compression_toolkit/gptq/common/gptq_training.py,sha256=U24sNWiVzXEfnk4ePO
|
|
|
270
270
|
model_compression_toolkit/gptq/keras/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
271
271
|
model_compression_toolkit/gptq/keras/gptq_keras_implementation.py,sha256=axBwnCSjq5xk-xGymOwSOqjp39It-CVtGcCTRTf0E_4,1248
|
|
272
272
|
model_compression_toolkit/gptq/keras/gptq_loss.py,sha256=rbRkF15MYd6nq4G49kcjb_dPTa-XNq9cTkrb93mXawo,6241
|
|
273
|
-
model_compression_toolkit/gptq/keras/gptq_training.py,sha256=
|
|
274
|
-
model_compression_toolkit/gptq/keras/graph_info.py,sha256=
|
|
273
|
+
model_compression_toolkit/gptq/keras/gptq_training.py,sha256=49HM4zj5BMlTzEtFGLdEqk0upvuKTYlvCMx9okzNyNk,17319
|
|
274
|
+
model_compression_toolkit/gptq/keras/graph_info.py,sha256=B9wMdlnUNHaFog6UsE3bEZrcB6j1ySNYaU4o_kI91H0,4564
|
|
275
275
|
model_compression_toolkit/gptq/keras/quantization_facade.py,sha256=bb_Rakbw2PQjK9R_ocxSHZy-lGjXBVXJ-Kht-lTKLsA,14583
|
|
276
276
|
model_compression_toolkit/gptq/keras/quantizer/__init__.py,sha256=-DK1CDXvlsnEbki4lukZLpl6Xrbo91_jcqxXlG5Eg6Q,963
|
|
277
|
-
model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py,sha256=
|
|
277
|
+
model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py,sha256=RWmsUXCw051shsPZ6igkSJBzqp7r4ddW1zYzZd3g0Xs,4751
|
|
278
278
|
model_compression_toolkit/gptq/keras/quantizer/quant_utils.py,sha256=Vt7Qb8i4JsE4sFtcjpfM4FTXTtfV1t6SwfoNH8a_Iaw,5055
|
|
279
|
-
model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py,sha256=
|
|
279
|
+
model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py,sha256=fh5CNTs0S47txLn8pWJfnif4CJEb1PsQbYFGBWhOp1Q,4136
|
|
280
280
|
model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py,sha256=iKzHnxl2ZSEp09oatfJVoiDuu6Q_iN36mOxQzDr1cy8,2087
|
|
281
281
|
model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
282
|
-
model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py,sha256=
|
|
283
|
-
model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py,sha256=
|
|
284
|
-
model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py,sha256=
|
|
282
|
+
model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py,sha256=t9-CQZE9AgnQ_Lq4SPd5uemvNcbtUHnU0qTHnx-QxZc,3962
|
|
283
|
+
model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py,sha256=fOEN27K5SWZV1-NOrOXkFqOMtU8FgDCw5l0Il2VtERQ,11891
|
|
284
|
+
model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py,sha256=ctjaKNPjGAt-SiU4QhL9Aewkz7KP2VlUxGxzyyJpym8,10381
|
|
285
285
|
model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
286
|
-
model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=
|
|
286
|
+
model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=dLyCe8kTEWTUrorRBJs0RUmWddwELqToTyyx2mLT8_8,8370
|
|
287
287
|
model_compression_toolkit/gptq/pytorch/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
288
288
|
model_compression_toolkit/gptq/pytorch/gptq_loss.py,sha256=kDuWw-6zh17wZpYWh4Xa94rpoodf82DksgjQCnL7nBc,2719
|
|
289
289
|
model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py,sha256=tECPTavxn8EEwgLaP2zvxdJH6Vg9jC0YOIMJ7857Sdc,1268
|
|
290
|
-
model_compression_toolkit/gptq/pytorch/gptq_training.py,sha256=
|
|
291
|
-
model_compression_toolkit/gptq/pytorch/graph_info.py,sha256
|
|
292
|
-
model_compression_toolkit/gptq/pytorch/quantization_facade.py,sha256=
|
|
290
|
+
model_compression_toolkit/gptq/pytorch/gptq_training.py,sha256=pcFj5rywszU264jTJVM3X1_qN5BEcxMzCj1-I6DKoHY,14668
|
|
291
|
+
model_compression_toolkit/gptq/pytorch/graph_info.py,sha256=-0GDC2cr-XXS7cTFTnDflJivGN7VaPnzVPsxCE-vZNU,3955
|
|
292
|
+
model_compression_toolkit/gptq/pytorch/quantization_facade.py,sha256=sZvbP4RJMMbiwDz0QMVj0h_FKrG-R6SO7hsznpZQLwM,12554
|
|
293
293
|
model_compression_toolkit/gptq/pytorch/quantizer/__init__.py,sha256=ZHNHo1yzye44m9_ht4UUZfTpK01RiVR3Tr74-vtnOGI,968
|
|
294
|
-
model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py,sha256=
|
|
294
|
+
model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py,sha256=Zb-P0yRyZHHBlDvUBdRwxDpdduEJyJp6OT9pfKFF5ks,4171
|
|
295
295
|
model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py,sha256=OocYYRqvl7rZ37QT0hTzfJnWGiNCPskg7cziTlR7TRk,3893
|
|
296
|
-
model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py,sha256=
|
|
296
|
+
model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py,sha256=PHbfJf7qdqWMmTGxxdGGoGFsQhhSqTELa6Sv3jeS9sQ,3996
|
|
297
297
|
model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py,sha256=9owTzSu_xz29dsjONB-AYXuCZoPo_4nqxTk3yH18a0g,2089
|
|
298
298
|
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py,sha256=lNJ29DYxaLUPDstRDA1PGI5r9Fulq_hvrZMlhst1Z5g,697
|
|
299
|
-
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py,sha256=
|
|
300
|
-
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py,sha256=
|
|
301
|
-
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py,sha256=
|
|
299
|
+
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py,sha256=oO7WgsAHMnWoXNm_gTKAAe-Nd79mGL_m677ai-ui424,4132
|
|
300
|
+
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py,sha256=_WiXyXmx-nb8uD55CQDh405YHD_6dxLGsF1aQcDw1pU,12036
|
|
301
|
+
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py,sha256=I4Nw3k_UbU6IZdtlZ7PgUsDrJK0wxtdY6mTQsWBwtyc,9103
|
|
302
302
|
model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
|
|
303
|
-
model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256
|
|
303
|
+
model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=-s9pegYOCsW1UXOn7pIi4vRY5m2Kq46sbYh43gsC55k,8782
|
|
304
304
|
model_compression_toolkit/legacy/__init__.py,sha256=lNJ29DYxaLUPDstRDA1PGI5r9Fulq_hvrZMlhst1Z5g,697
|
|
305
305
|
model_compression_toolkit/legacy/keras_quantization_facade.py,sha256=GMVq3pYW3oWx6LYtO8xPBVU3wpzYHJ0gvdUANc8E584,17682
|
|
306
306
|
model_compression_toolkit/legacy/pytorch_quantization_facade.py,sha256=ngRtDGOj672ezuSo05OrI4fewz7oMXIgkpLIC7qJLZs,17223
|
|
@@ -311,95 +311,33 @@ model_compression_toolkit/ptq/keras/quantization_facade.py,sha256=ZW-Eofgmdqrtyk
|
|
|
311
311
|
model_compression_toolkit/ptq/pytorch/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
312
312
|
model_compression_toolkit/ptq/pytorch/quantization_facade.py,sha256=2KZKp8UBPbAgYAiw-vISLRGhH4BJPuUCMh8wsueL2pg,8375
|
|
313
313
|
model_compression_toolkit/qat/__init__.py,sha256=BYKgH1NwB9fqF1TszULQ5tDfLI-GqgZV5sao-lDN9EM,1091
|
|
314
|
-
model_compression_toolkit/qat/common/__init__.py,sha256=
|
|
315
|
-
model_compression_toolkit/qat/common/qat_config.py,sha256=
|
|
314
|
+
model_compression_toolkit/qat/common/__init__.py,sha256=6tLZ4R4pYP6QVztLVQC_jik2nES3l4uhML0qUxZrezk,829
|
|
315
|
+
model_compression_toolkit/qat/common/qat_config.py,sha256=zMbyAmIQ5S5fmFHA8OjFy2CppT7ZdXxApXgNJJ8EELc,3293
|
|
316
316
|
model_compression_toolkit/qat/keras/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
317
|
-
model_compression_toolkit/qat/keras/quantization_facade.py,sha256=
|
|
317
|
+
model_compression_toolkit/qat/keras/quantization_facade.py,sha256=a3p8i8hdW28lAsuC9BeFTYLwWboA7JXxNyuU8KxVMcY,16043
|
|
318
318
|
model_compression_toolkit/qat/keras/quantizer/__init__.py,sha256=fWDCj0kIWttYn0sV04Uwih-RAe0vJfz71cxYWeX9qWA,856
|
|
319
|
-
model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py,sha256=
|
|
319
|
+
model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py,sha256=ABhft_begf8lAlxxV3IxykHs8qYVfWCoNE4VEN4i_1c,2138
|
|
320
320
|
model_compression_toolkit/qat/keras/quantizer/quant_utils.py,sha256=rS2z_ozyjzQ07MMczaAFNZ7K6RKwAnBOKyRac4UvF44,2123
|
|
321
|
-
model_compression_toolkit/qat/keras/quantizer/quantization_builder.py,sha256=
|
|
321
|
+
model_compression_toolkit/qat/keras/quantizer/quantization_builder.py,sha256=mZwghAnKagL7916CJycFHgJdD5aY6At1A_IBkmYqae4,5635
|
|
322
322
|
model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
323
|
-
model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=
|
|
324
|
-
model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py,sha256=
|
|
323
|
+
model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=GJmGjMbzn3lN9pGzdFpuHyhJSNbwStR06hHrDVMA-H4,13436
|
|
324
|
+
model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py,sha256=BOIkOUb1WG9MsnzNrr41N9pP0kYWL7qa4kW5ZguVWdo,10708
|
|
325
325
|
model_compression_toolkit/qat/pytorch/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
326
|
-
model_compression_toolkit/qat/pytorch/quantization_facade.py,sha256=
|
|
326
|
+
model_compression_toolkit/qat/pytorch/quantization_facade.py,sha256=9s0u1L9GyWzf2SSHDyzpsavPVWplb52fk_UJKpGKnN0,12377
|
|
327
327
|
model_compression_toolkit/qat/pytorch/quantizer/__init__.py,sha256=R4vwVcbg6QprCTNzibyF9PtbKKKBsfu9ffypKDNscJQ,859
|
|
328
|
-
model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py,sha256=
|
|
329
|
-
model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py,sha256=
|
|
328
|
+
model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py,sha256=FnhuFCuQoSf78FM1z1UZgXXd3k-mKSM7i9dYOuJUmeA,2213
|
|
329
|
+
model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py,sha256=GOYRDXvQSGe_iUFVmvDy5BqC952hu_-rQO06n8QCyw0,5491
|
|
330
330
|
model_compression_toolkit/qat/pytorch/quantizer/quantizer_utils.py,sha256=5XswoF-5aaFangkHssWKAQTsk6lf_zzndzfCsBWBVMs,5004
|
|
331
331
|
model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
|
|
332
|
-
model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=
|
|
333
|
-
model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py,sha256=
|
|
334
|
-
model_compression_toolkit/quantizers_infrastructure/__init__.py,sha256=_NfdFxpFw21mocnBN2nR4otYSvZ9zNnQGnzJpXXJIiU,1764
|
|
335
|
-
model_compression_toolkit/quantizers_infrastructure/constants.py,sha256=HN120boJxAnEXNrLSj-o_s-VX4o6C-1ap_KZ4840sd0,875
|
|
336
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
337
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
338
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/base_inferable_quantizer.py,sha256=T1AJCHt2DozYLojpX3u0r6PfJlzQzbuYr80fP9oB9H4,3158
|
|
339
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/constants.py,sha256=FRoAiEOR5zh-_onKwTWbH9wRpiBWqOK-TdyyoV1DFtA,1913
|
|
340
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_all_subclasses.py,sha256=6KqemyFcM4a6KoCZ-6dm46iIZ_kusPnj5crH8RTAvuo,1213
|
|
341
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_quantizers.py,sha256=-oQkpnMYzv2P1p2C6ULuCLq6CK7yXOm8qPzpiYO3LuQ,2979
|
|
342
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/quant_utils.py,sha256=SDzQuh3q9ugSD80Z9IuaWOPskH5VsRRyuBOeIeWJDdQ,2153
|
|
343
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
344
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/activation_quantization_holder.py,sha256=KUbiNoMaC-gMrgt_L0kXXsFrQbtc4m30fXG2GV7mv5I,5867
|
|
345
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/load_model.py,sha256=nUrK7WX2Oi3N8IzN1W1vJI_TEd442dc0-Ggi1ttL11E,4353
|
|
346
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantize_wrapper.py,sha256=CxIMtDYZjCdhzp6zq2y6lhbmkhZ7y4-MIGMAVSkrvH4,14622
|
|
347
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizer_utils.py,sha256=Z0SoHkTl5dC0y3hrcj0bC5fSa-oU7IYuGN5sBTb4THA,3440
|
|
348
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/validation_functions.py,sha256=BQI6wYwieNSJyUJMgETa028vmqYAchxwZneeZGrwakA,2937
|
|
349
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/__init__.py,sha256=Xg2t9VH61BIvqYtJg6GBKkAnDmFe1u4K0i0r2OBAK-I,2742
|
|
350
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/base_keras_inferable_quantizer.py,sha256=wqM2oGRoZ9IO-NtrEId7KhMHfdB_0yLXj2MOzjGlDlc,2216
|
|
351
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/constants.py,sha256=ItUEs8c7LVxBPMopLD5BO2Ry9DIxFIrk_M7AdSEyBFg,979
|
|
352
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
353
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py,sha256=NcYCx0Iy-3uWw2taiiws6nIvk2xDbv5qoshKNI0yS4A,7478
|
|
354
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py,sha256=sk78WMZe3RMl_5HsZwPHImLyAElDKGQeHt1dltThaTs,3184
|
|
355
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py,sha256=3XYwmH3uPaBDFAz9a8_usa6CYlFfxp40eSG6r2sFdcM,3966
|
|
356
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py,sha256=e19l4ldW93f_DvUOAN15bXuEkEIfuSRoyY2fVMqO89U,5241
|
|
357
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
358
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py,sha256=V0B_LAgk2nQGlcRJY2LqxEbVIOrGZyOdaX0t6fMoJLo,4202
|
|
359
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py,sha256=INUo-hIS9GCBgkEVTRVPxRlA71aBlgzcqUF2S8k7Oi8,9464
|
|
360
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py,sha256=AjOOdA1Ebe6YuhziauJG6_f7Pmq4Cg_gPbo0a1q424s,3479
|
|
361
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py,sha256=qTtAqd7ndfLZHPiwM1St9hQ03fesLGfWRe6B6IxCW-g,4381
|
|
362
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py,sha256=pMXijWxu_4MSZu0O2qMJS04DfgQBDu55yJKdNkXHO4c,8669
|
|
363
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
364
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantize_wrapper.py,sha256=m7ar9v3FW_O4pCvFwh4rOku_tiwyfAHwOHmjmBNT70k,11248
|
|
365
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizer_utils.py,sha256=2KN976TZTObiaEhoUL0-Rpceui-Nifw5LdKLdU7SRY0,5929
|
|
366
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/__init__.py,sha256=-hiXng1pF3wjI-YYYZqZ-NZ1TStGuec4bci3jxvYVY0,2820
|
|
367
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_lut_symmetric_inferable_quantizer.py,sha256=degWqM_rXiLorirgigGkpWKXhJUembhOBMFzMGQXbtM,4876
|
|
368
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_pytorch_inferable_quantizer.py,sha256=2Kmmafr601njD0zICBzOfmIUtdBDHDK7-06phwKBSPc,1907
|
|
369
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_symmetric_inferable_quantizer.py,sha256=Ppvihr5ahqFv7E-lzEP9yylH7CRRQTf9S-50tsOem3E,3075
|
|
370
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_uniform_inferable_quantizer.py,sha256=gfoVvpvZlPdEjV68YLs0RINb0fYZPx1Loahv-M0cGss,2506
|
|
371
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/constants.py,sha256=9bxo6snEJkRv5XWmhBGsV6g8LCe_1NgAE5ufIq2ewYU,1007
|
|
372
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/__init__.py,sha256=lNJ29DYxaLUPDstRDA1PGI5r9Fulq_hvrZMlhst1Z5g,697
|
|
373
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py,sha256=MBCNIfiBSgnBhOPTv6Vu8mXgzBmycDpNJ0roTCXHnTw,4563
|
|
374
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py,sha256=zoj54sAIyzJ1pHKdKroxTfWPEOaqQzq4Bk-XRc0L4IM,2933
|
|
375
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py,sha256=jVI_Nn1IreIT1j81jqxIaTAsCue26lmYzbZMpdPJ3V4,3636
|
|
376
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py,sha256=X-76l77Af57VSrOXnnCmLQY9FhCMoPW3cjJBEWnjVTg,4860
|
|
377
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/__init__.py,sha256=lNJ29DYxaLUPDstRDA1PGI5r9Fulq_hvrZMlhst1Z5g,697
|
|
378
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py,sha256=cibtTfgdlaq33cZLor9DHuphmlNz9STcYSc77V-sjMQ,3977
|
|
379
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py,sha256=45W3lKvFlLi5U8J82HHeK6pkeVyvLSy3_vdwU7zpskU,5186
|
|
380
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py,sha256=PJkWUiG2KusYn_OZZVkbYy1juKrl7txiLb_2Wch8KLs,3152
|
|
381
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py,sha256=lwHyby6x50DuFZJkAVChrb7dlESincOGRHwHZO-z_M8,5019
|
|
382
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py,sha256=NDQHtw-6IFIcbA8cAPx4K3RsyyF_Q1Lmw-Xxupy6b2E,5387
|
|
383
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
384
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
385
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/base_trainable_quantizer.py,sha256=u4F4BvT-SmEOw8ZtsF32hcMvs3qWgLK4mWFNP0hKMA0,7714
|
|
386
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizer_config.py,sha256=iG7hfmJkHZshQk5_x5NieUeOpGX0P0iXhaMISr9GDRU,6377
|
|
387
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizers.py,sha256=2kJyzVI4OllWMEXRyy9oKqtLSr7l7sI2QATgt8K2zs4,3719
|
|
388
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/quant_utils.py,sha256=zdiew1jwR7tUKm9XWlHnAPxIZsAdKqbzzC2vH02j5wA,1505
|
|
389
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/trainable_quantizer_config.py,sha256=My5Wz34jPOyh8z33OTpKnOobRB0cpO_Qgmtsd5lizHo,4791
|
|
390
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
391
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/base_keras_quantizer.py,sha256=1vGWP73ydXMAuLELJb4zo_ZIxrJeGfCccIiD3ylLRag,4290
|
|
392
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/config_serialization.py,sha256=mAyWYG5sXuUi8freOA3NC9GvDUaOycz2kPDTnpvvNPA,4079
|
|
393
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/quantizer_utils.py,sha256=MVwXNymmFRB2NXIBx4e2mdJ1RfoHxRPYRgjb1MQP5kY,1797
|
|
394
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
395
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/base_pytorch_quantizer.py,sha256=JXLKUihyZ9e27BlTIq2vkl2pKP1pFlcRKJT-cW1QjL8,3137
|
|
332
|
+
model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=C7jsd4nU13jIf6o4t9RmZH4buPBVNCD7jHUCxbxZ_mk,9629
|
|
333
|
+
model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py,sha256=UjdPi30P25lpYMqKP3St9PwQwPIws7_sAyupZHhBKEY,8651
|
|
396
334
|
model_compression_toolkit/target_platform_capabilities/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
397
335
|
model_compression_toolkit/target_platform_capabilities/constants.py,sha256=mYUESjXeN7EPomZKgqHZE031KNOO4wmFdRB8gA4m50U,920
|
|
398
336
|
model_compression_toolkit/target_platform_capabilities/immutable.py,sha256=rSPd3Xpx4Rzt6-EwDA9tXvKygrrt4xvmq01JVCXY0hQ,1723
|
|
399
|
-
model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py,sha256=
|
|
337
|
+
model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py,sha256=_LzyDupsTDiJvIsVA-L-M_fRrW8ePcul8mr60L8DW9g,1574
|
|
400
338
|
model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py,sha256=5Bu5MkOYYDGzZgTu-PBQ4xVCnso1mtssc9zz1pZjl7o,2010
|
|
401
339
|
model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py,sha256=NIKUE2AtRv4CFOhpwjVvfG3rLfvd6p7DYBSuK0SKo4s,2353
|
|
402
|
-
model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py,sha256=
|
|
340
|
+
model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py,sha256=Nh04rizl8D_qu_DvRacvmDwY9UjI53Jbcfjk1Rq6nD4,8538
|
|
403
341
|
model_compression_toolkit/target_platform_capabilities/target_platform/operators.py,sha256=rRmrmPBY4rxCWVpEc6FxeOPUFh8MkfwgQsqD82U9a7w,3108
|
|
404
342
|
model_compression_toolkit/target_platform_capabilities/target_platform/quantization_format.py,sha256=3UIZtGTV0WX3dbfiIMUFWID5W68vtKfiVoPWUbpQFzM,787
|
|
405
343
|
model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py,sha256=dFauUrY7BejPDVX8HcSotoHKcT7S9kk65jgzZdPis2E,9206
|
|
@@ -465,8 +403,23 @@ model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/
|
|
|
465
403
|
model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py,sha256=u8KAytDvkXAbgqpVEKynSqag3Wrc7TAtCP9Ru9Y0hR8,8048
|
|
466
404
|
model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py,sha256=EQ69-jzEUjSHITrX7-f3lIWsRsVt-QWPlVs03PsVxGE,6131
|
|
467
405
|
model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py,sha256=d31s294euI4q0AH814qYiV-j8nYtQVIDR7zhhSfjRYw,5002
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
406
|
+
model_compression_toolkit/trainable_infrastructure/__init__.py,sha256=O0J24JDzwdo-p9yf1oVtDnK1ZoZ1K8iPAp7WJ40bcTQ,1104
|
|
407
|
+
model_compression_toolkit/trainable_infrastructure/common/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
408
|
+
model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py,sha256=ON5X6a4p46ofXzRcXyIgOGSgO7JXG85frE9vTjOZu2o,7564
|
|
409
|
+
model_compression_toolkit/trainable_infrastructure/common/constants.py,sha256=HN120boJxAnEXNrLSj-o_s-VX4o6C-1ap_KZ4840sd0,875
|
|
410
|
+
model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py,sha256=3_CfuMjUa6ttDwersLGbMRZ6h5KHRfGC1qAPM-5yQtw,6351
|
|
411
|
+
model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py,sha256=fUpe1bjuhKiJRsXT1oVfpqml_bzfZo6G0uVkrB2lTEI,3558
|
|
412
|
+
model_compression_toolkit/trainable_infrastructure/common/quant_utils.py,sha256=zdiew1jwR7tUKm9XWlHnAPxIZsAdKqbzzC2vH02j5wA,1505
|
|
413
|
+
model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py,sha256=My5Wz34jPOyh8z33OTpKnOobRB0cpO_Qgmtsd5lizHo,4791
|
|
414
|
+
model_compression_toolkit/trainable_infrastructure/keras/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
415
|
+
model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py,sha256=5bme26QavYSur9iOzLc6XokWeymO50s5GEnmEs11DhE,4211
|
|
416
|
+
model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py,sha256=PpyM0u8fsvv5xw5Gmt-3Z1UFATX5KLJzLbsXaquuyuk,3991
|
|
417
|
+
model_compression_toolkit/trainable_infrastructure/keras/load_model.py,sha256=VqIkh-DHxtR1mkqncsfPUIuITR4m9S9eAWep5H-xsvc,3480
|
|
418
|
+
model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py,sha256=MVwXNymmFRB2NXIBx4e2mdJ1RfoHxRPYRgjb1MQP5kY,1797
|
|
419
|
+
model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
420
|
+
model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py,sha256=gmXTw94ylCZ2V3lVh16EZxHVyKqAaM0YlJlPTINiQrA,3084
|
|
421
|
+
mct_nightly-1.8.0.20230610.post356.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
|
|
422
|
+
mct_nightly-1.8.0.20230610.post356.dist-info/METADATA,sha256=uvZkxcaEieIAaD9RHVQKdF5mSCg3dC4RbJsrKDN9GSo,11394
|
|
423
|
+
mct_nightly-1.8.0.20230610.post356.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
|
424
|
+
mct_nightly-1.8.0.20230610.post356.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
|
|
425
|
+
mct_nightly-1.8.0.20230610.post356.dist-info/RECORD,,
|
|
@@ -13,19 +13,39 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ==============================================================================
|
|
15
15
|
|
|
16
|
-
|
|
17
16
|
from model_compression_toolkit.target_platform_capabilities import target_platform
|
|
18
17
|
from model_compression_toolkit.target_platform_capabilities.tpc_models.get_target_platform_capabilities import get_target_platform_capabilities
|
|
19
18
|
from model_compression_toolkit import core
|
|
20
19
|
from model_compression_toolkit.logger import set_log_folder
|
|
21
20
|
from model_compression_toolkit.legacy.keras_quantization_facade import keras_post_training_quantization, keras_post_training_quantization_mixed_precision
|
|
22
21
|
from model_compression_toolkit.legacy.pytorch_quantization_facade import pytorch_post_training_quantization, pytorch_post_training_quantization_mixed_precision
|
|
23
|
-
from model_compression_toolkit import
|
|
22
|
+
from model_compression_toolkit import trainable_infrastructure
|
|
24
23
|
from model_compression_toolkit import ptq
|
|
25
24
|
from model_compression_toolkit import qat
|
|
26
25
|
from model_compression_toolkit import exporter
|
|
27
26
|
from model_compression_toolkit import gptq
|
|
28
|
-
from model_compression_toolkit.
|
|
27
|
+
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
|
28
|
+
|
|
29
29
|
|
|
30
|
+
# Old API (will not be accessible in future releases)
|
|
31
|
+
from model_compression_toolkit.core.common import network_editors as network_editor
|
|
32
|
+
from model_compression_toolkit.core.common.quantization import quantization_config
|
|
33
|
+
from model_compression_toolkit.core.common.mixed_precision import mixed_precision_quantization_config
|
|
34
|
+
from model_compression_toolkit.core.common.quantization.debug_config import DebugConfig
|
|
35
|
+
from model_compression_toolkit.core.common.quantization.quantization_config import QuantizationConfig, QuantizationErrorMethod, DEFAULTCONFIG
|
|
36
|
+
from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
|
|
37
|
+
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import MixedPrecisionQuantizationConfig
|
|
38
|
+
from model_compression_toolkit.logger import set_log_folder
|
|
39
|
+
from model_compression_toolkit.core.common.data_loader import FolderImageLoader
|
|
40
|
+
from model_compression_toolkit.core.common.framework_info import FrameworkInfo, ChannelAxis
|
|
41
|
+
from model_compression_toolkit.core.common.defaultdict import DefaultDict
|
|
42
|
+
from model_compression_toolkit.legacy.keras_quantization_facade import keras_post_training_quantization, keras_post_training_quantization_mixed_precision
|
|
43
|
+
from model_compression_toolkit.legacy.pytorch_quantization_facade import pytorch_post_training_quantization, pytorch_post_training_quantization_mixed_precision
|
|
44
|
+
from model_compression_toolkit.core.keras.kpi_data_facade import keras_kpi_data
|
|
45
|
+
from model_compression_toolkit.core.pytorch.kpi_data_facade import pytorch_kpi_data
|
|
46
|
+
from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig
|
|
47
|
+
from model_compression_toolkit.gptq.common.gptq_config import RoundingType
|
|
48
|
+
from model_compression_toolkit.gptq.keras.quantization_facade import get_keras_gptq_config
|
|
49
|
+
from model_compression_toolkit.gptq.pytorch.quantization_facade import get_pytorch_gptq_config
|
|
30
50
|
|
|
31
51
|
__version__ = "1.8.0"
|
|
@@ -22,7 +22,7 @@ from typing import Dict, Any, List
|
|
|
22
22
|
|
|
23
23
|
from model_compression_toolkit.core.common.defaultdict import DefaultDict
|
|
24
24
|
from model_compression_toolkit.core.common.graph.base_node import BaseNode
|
|
25
|
-
from model_compression_toolkit.target_platform_capabilities.target_platform
|
|
25
|
+
from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
|
|
26
26
|
|
|
27
27
|
|
|
28
28
|
class ChannelAxis(Enum):
|
|
@@ -31,14 +31,22 @@ class OperationHandler:
|
|
|
31
31
|
Class to handle conversions from graph nodes to Keras operators and retrieving them.
|
|
32
32
|
"""
|
|
33
33
|
|
|
34
|
-
def __init__(self, graph: Graph):
|
|
34
|
+
def __init__(self, graph: Graph, wrapper: Callable = None):
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
graph: Graph to build its layers based on its nodes.
|
|
39
|
+
wrapper: Wrapper to use for wrapping the layers.
|
|
40
|
+
"""
|
|
41
|
+
|
|
35
42
|
# hold nodes after sorting them
|
|
36
43
|
self.node_sort = list(topological_sort(graph))
|
|
37
44
|
|
|
38
45
|
self.layer_to_node_dict = {}
|
|
39
46
|
|
|
40
47
|
# hold dictionary from node to its equivalent Keras layer
|
|
41
|
-
self.node_to_fw_op_dict = instance_builder(self.node_sort
|
|
48
|
+
self.node_to_fw_op_dict = instance_builder(self.node_sort,
|
|
49
|
+
wrapper)
|
|
42
50
|
|
|
43
51
|
def get_node_op_function(self, n: BaseNode) -> Layer:
|
|
44
52
|
"""
|
|
@@ -74,12 +82,7 @@ def node_builder(n: common.BaseNode) -> Layer:
|
|
|
74
82
|
Keras layer that was built from the node.
|
|
75
83
|
"""
|
|
76
84
|
framework_attr = copy.copy(n.framework_attr)
|
|
77
|
-
|
|
78
|
-
# replace input node with identity, so can wrap it with QuantizationWrapper
|
|
79
|
-
_layer_class = Layer # Identity
|
|
80
|
-
framework_attr = {}
|
|
81
|
-
else:
|
|
82
|
-
_layer_class = n.layer_class
|
|
85
|
+
_layer_class = n.layer_class
|
|
83
86
|
framework_attr[LAYER_NAME] = n.name # Overwrite framework name to identical graph node name
|
|
84
87
|
node_instance = _layer_class.from_config(framework_attr) # Build layer from node's configuration.
|
|
85
88
|
with tf.name_scope(n.name):
|
|
@@ -90,13 +93,15 @@ def node_builder(n: common.BaseNode) -> Layer:
|
|
|
90
93
|
return node_instance
|
|
91
94
|
|
|
92
95
|
|
|
93
|
-
def instance_builder(toposort: List[BaseNode]
|
|
96
|
+
def instance_builder(toposort: List[BaseNode],
|
|
97
|
+
wrapper: Callable = None) -> Dict[BaseNode, Layer]:
|
|
94
98
|
"""
|
|
95
99
|
Build a dictionary of nodes to their corresponding Keras
|
|
96
100
|
layers, given a list of nodes.
|
|
97
101
|
|
|
98
102
|
Args:
|
|
99
103
|
toposort: List of nodes sorted topological to build their layers.
|
|
104
|
+
wrapper: Wrapper to use for wrapping the layers.
|
|
100
105
|
|
|
101
106
|
Returns:
|
|
102
107
|
A dictionary of nodes to their corresponding Keras layers.
|
|
@@ -106,6 +111,8 @@ def instance_builder(toposort: List[BaseNode]) -> Dict[BaseNode, Layer]:
|
|
|
106
111
|
for n in toposort:
|
|
107
112
|
if not n.reuse: # Hold a single node in dictionary for all reused nodes from the same layer.
|
|
108
113
|
keras_node = node_builder(n)
|
|
114
|
+
if wrapper is not None:
|
|
115
|
+
keras_node = wrapper(n, keras_node)
|
|
109
116
|
nodes_dict.update({n: keras_node})
|
|
110
117
|
|
|
111
118
|
return nodes_dict
|
|
@@ -21,7 +21,7 @@ from packaging import version
|
|
|
21
21
|
from model_compression_toolkit.constants import INPUT_BASE_NAME
|
|
22
22
|
from model_compression_toolkit.core.common.back2framework.base_model_builder import BaseModelBuilder
|
|
23
23
|
from model_compression_toolkit.core.common.user_info import UserInformation
|
|
24
|
-
from
|
|
24
|
+
from mct_quantizers import KerasActivationQuantizationHolder
|
|
25
25
|
|
|
26
26
|
# As from Tensorflow 2.6, keras is a separate package and some classes should be imported differently.
|
|
27
27
|
if version.parse(tf.__version__) < version.parse("2.6"):
|
|
@@ -113,7 +113,8 @@ class KerasModelBuilder(BaseModelBuilder):
|
|
|
113
113
|
return_float_outputs)
|
|
114
114
|
|
|
115
115
|
# Build an OperationHandler to handle conversions from graph nodes to Keras operators.
|
|
116
|
-
self.oh = OperationHandler(self.graph
|
|
116
|
+
self.oh = OperationHandler(self.graph,
|
|
117
|
+
wrapper=wrapper)
|
|
117
118
|
self.wrapper = wrapper
|
|
118
119
|
self.get_activation_quantizer_holder = get_activation_quantizer_holder_fn
|
|
119
120
|
|
|
@@ -121,7 +122,7 @@ class KerasModelBuilder(BaseModelBuilder):
|
|
|
121
122
|
def use_activation_holder_during_model_building(self) -> bool:
|
|
122
123
|
"""
|
|
123
124
|
|
|
124
|
-
Returns: Whether the model builder uses
|
|
125
|
+
Returns: Whether the model builder uses KerasActivationQuantizationHolder during
|
|
125
126
|
model building (by adding it as a layer when converting the graph to the Keras model)
|
|
126
127
|
or not. If so - the model builder expects the activation quantizers to not be wrapped
|
|
127
128
|
in KerasQuantizeWrapper that was received in its init.
|
|
@@ -172,11 +173,6 @@ class KerasModelBuilder(BaseModelBuilder):
|
|
|
172
173
|
for
|
|
173
174
|
inode in self.graph.get_inputs()}
|
|
174
175
|
|
|
175
|
-
# Support adding Layer after input layers require us to store it in layer_to_node_dict
|
|
176
|
-
# dict offline (unlike other layers which stored during running).
|
|
177
|
-
for node, layer in self.oh.node_to_fw_op_dict.items():
|
|
178
|
-
if node.type == InputLayer:
|
|
179
|
-
self.oh.layer_to_node_dict[layer] = node
|
|
180
176
|
|
|
181
177
|
# Build a list of the model's input tensors. Switching from a dictionary to a list
|
|
182
178
|
# to keep the tensors input order, since inputs in Graph are ordered by their indices.
|
|
@@ -186,7 +182,8 @@ class KerasModelBuilder(BaseModelBuilder):
|
|
|
186
182
|
|
|
187
183
|
# Build a dictionary from node to its output tensors, by applying the layers sequentially.
|
|
188
184
|
for n in self.oh.node_sort:
|
|
189
|
-
op_func = self.oh.get_node_op_function(n)
|
|
185
|
+
op_func = self.oh.get_node_op_function(n) # Get node operation function
|
|
186
|
+
|
|
190
187
|
input_tensors = self._build_input_tensors_list(n,
|
|
191
188
|
node_to_output_tensors_dict) # Fetch Node inputs
|
|
192
189
|
out_tensors_of_n, out_tensors_of_n_float = self._run_operation(n, # Run node operation and fetch outputs
|
|
@@ -223,22 +220,6 @@ class KerasModelBuilder(BaseModelBuilder):
|
|
|
223
220
|
# Build the model.
|
|
224
221
|
model = tf.keras.Model(inputs=inputs_list, outputs=model_output_tensors)
|
|
225
222
|
|
|
226
|
-
if self.wrapper is not None:
|
|
227
|
-
def _wrap(layer):
|
|
228
|
-
_node = self.oh.layer_to_node_dict.get(layer)
|
|
229
|
-
if _node is not None:
|
|
230
|
-
return self.wrapper(_node,
|
|
231
|
-
layer)
|
|
232
|
-
|
|
233
|
-
elif is_layer_fake_quant(layer) or isinstance(layer, ActivationQuantizationHolder):
|
|
234
|
-
return layer
|
|
235
|
-
|
|
236
|
-
raise Exception( # pragma: no cover
|
|
237
|
-
f'Mismatch between keras model and graph cant find node named: '
|
|
238
|
-
f'{get_node_name_from_layer(layer)}')
|
|
239
|
-
|
|
240
|
-
model = clone_model(model, clone_function=_wrap)
|
|
241
|
-
|
|
242
223
|
return model, self.graph.user_info
|
|
243
224
|
|
|
244
225
|
def _convert_node2name(self, in_node_to_output_tensors_dict):
|
|
@@ -295,8 +276,7 @@ class KerasModelBuilder(BaseModelBuilder):
|
|
|
295
276
|
if len(input_tensors) == 0: # Placeholder handling
|
|
296
277
|
out_tensors_of_n_float = input_nodes_to_input_tensors[n]
|
|
297
278
|
out_tensors_of_n = self._run_operation_activation_quantization(n,
|
|
298
|
-
out_tensors_of_n_float
|
|
299
|
-
op_func)
|
|
279
|
+
out_tensors_of_n_float)
|
|
300
280
|
else:
|
|
301
281
|
input_tensors = [tensor for tensor_list in input_tensors for tensor in tensor_list] # flat list of lists
|
|
302
282
|
# Build a functional node using its args
|
|
@@ -331,24 +311,18 @@ class KerasModelBuilder(BaseModelBuilder):
|
|
|
331
311
|
|
|
332
312
|
def _run_operation_activation_quantization(self,
|
|
333
313
|
node: BaseNode,
|
|
334
|
-
node_outputs: List[TFReference]
|
|
335
|
-
identity_layer: Layer = None):
|
|
314
|
+
node_outputs: List[TFReference]):
|
|
336
315
|
"""
|
|
337
316
|
Quantize node's activations
|
|
338
317
|
|
|
339
318
|
Args:
|
|
340
319
|
node: Node to quantize its activations
|
|
341
320
|
node_outputs: Output tensors of the float node.
|
|
342
|
-
identity_layer: Identity layer (should be passed only when quantizing input layers)
|
|
343
321
|
|
|
344
322
|
Returns:
|
|
345
323
|
Quantized node's outputs.
|
|
346
324
|
"""
|
|
347
325
|
if self.wrapper is not None:
|
|
348
|
-
# If identity layer was passed, use it for inference
|
|
349
|
-
# (needed since wrapping an Input layer can not be wrapped)
|
|
350
|
-
if identity_layer is not None:
|
|
351
|
-
node_outputs = identity_layer(node_outputs)
|
|
352
326
|
|
|
353
327
|
# In case the activation quantizer is attached out of the wrapper we use get_activation_quantizer_holder
|
|
354
328
|
# for the activation quantization holder (if the node's activations are quantized)
|
|
@@ -38,12 +38,16 @@ def node_builder(n: BaseNode) -> Module:
|
|
|
38
38
|
return node_instance
|
|
39
39
|
|
|
40
40
|
|
|
41
|
-
|
|
41
|
+
# todo: remove. It is not used anymore
|
|
42
|
+
def identity_wrapper(node: BaseNode,
|
|
43
|
+
module: Module,
|
|
44
|
+
include_activation_quantizers: bool):
|
|
42
45
|
"""
|
|
43
46
|
A function which takes a computational graph node and a pytorch module and return an identity wrapping which return the layer itself
|
|
44
47
|
Args:
|
|
45
48
|
node: A node of mct graph.
|
|
46
49
|
layer: A pytorch module
|
|
50
|
+
include_activation_quantizers: bool flag.
|
|
47
51
|
Returns: pytorch module
|
|
48
52
|
"""
|
|
49
53
|
return module
|