mct-nightly 1.8.0.4032023.post406__py3-none-any.whl → 1.8.0.4042023.post409__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct_nightly-1.8.0.4032023.post406.dist-info → mct_nightly-1.8.0.4042023.post409.dist-info}/METADATA +7 -7
- {mct_nightly-1.8.0.4032023.post406.dist-info → mct_nightly-1.8.0.4042023.post409.dist-info}/RECORD +63 -59
- {mct_nightly-1.8.0.4032023.post406.dist-info → mct_nightly-1.8.0.4042023.post409.dist-info}/WHEEL +1 -1
- model_compression_toolkit/__init__.py +9 -15
- model_compression_toolkit/core/common/logger.py +10 -2
- model_compression_toolkit/core/keras/back2framework/model_gradients.py +3 -2
- model_compression_toolkit/core/keras/quantization_facade.py +1 -1
- model_compression_toolkit/core/pytorch/back2framework/model_gradients.py +13 -6
- model_compression_toolkit/core/pytorch/constants.py +4 -0
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +16 -2
- model_compression_toolkit/exporter/__init__.py +5 -0
- model_compression_toolkit/exporter/model_exporter/__init__.py +0 -3
- model_compression_toolkit/exporter/model_exporter/tflite/fakely_quant_tflite_exporter.py +1 -1
- model_compression_toolkit/exporter/model_wrapper/__init__.py +4 -8
- model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +45 -39
- model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +39 -24
- model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +50 -42
- model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +43 -36
- model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +24 -5
- model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +25 -18
- model_compression_toolkit/gptq/__init__.py +6 -0
- model_compression_toolkit/gptq/common/gptq_config.py +57 -104
- model_compression_toolkit/gptq/common/gptq_constants.py +0 -7
- model_compression_toolkit/gptq/common/gptq_training.py +28 -38
- model_compression_toolkit/gptq/keras/gptq_training.py +10 -28
- model_compression_toolkit/gptq/keras/graph_info.py +8 -33
- model_compression_toolkit/gptq/keras/quantization_facade.py +6 -12
- model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -1
- model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +2 -2
- model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +45 -0
- model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +112 -0
- model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +38 -135
- model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +11 -41
- model_compression_toolkit/gptq/pytorch/gptq_training.py +9 -24
- model_compression_toolkit/gptq/pytorch/graph_info.py +7 -27
- model_compression_toolkit/gptq/pytorch/quantization_facade.py +9 -22
- model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +1 -0
- model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -20
- model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +10 -1
- model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +2 -2
- model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +45 -0
- model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +115 -0
- model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +30 -117
- model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +196 -0
- model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +9 -31
- model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +30 -37
- model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +27 -36
- model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +21 -21
- model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +25 -26
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_all_subclasses.py +1 -2
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_quantizers.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/__init__.py +4 -0
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/constants.py +1 -0
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantize_wrapper.py +13 -3
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/__init__.py +6 -0
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/constants.py +3 -0
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/base_trainable_quantizer.py +53 -2
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizers.py +2 -1
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/base_keras_quantizer.py +22 -4
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +24 -3
- model_compression_toolkit/gptq/common/gptq_quantizer_config.py +0 -93
- {mct_nightly-1.8.0.4032023.post406.dist-info → mct_nightly-1.8.0.4042023.post409.dist-info}/LICENSE.md +0 -0
- {mct_nightly-1.8.0.4032023.post406.dist-info → mct_nightly-1.8.0.4042023.post409.dist-info}/top_level.txt +0 -0
- /model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/{common → pytorch/quantizers/activation_inferable_quantizers}/activation_lut_pot_inferable_quantizer.py +0 -0
{mct_nightly-1.8.0.4032023.post406.dist-info → mct_nightly-1.8.0.4042023.post409.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: mct-nightly
|
|
3
|
-
Version: 1.8.0.
|
|
3
|
+
Version: 1.8.0.4042023.post409
|
|
4
4
|
Summary: A Model Compression Toolkit for neural networks
|
|
5
5
|
Home-page: UNKNOWN
|
|
6
6
|
License: UNKNOWN
|
|
@@ -49,8 +49,8 @@ MCT is developed by researchers and engineers working at Sony Semiconductor Isra
|
|
|
49
49
|
## Supported Features
|
|
50
50
|
|
|
51
51
|
MCT supports different quantization methods:
|
|
52
|
-
* Post
|
|
53
|
-
* Gradient-based post
|
|
52
|
+
* Post-training quantization (PTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_post_training_quantization_experimental.html#ug-keras-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_post_training_quantization_experimental.html#ug-pytorch-post-training-quantization-experimental)
|
|
53
|
+
* Gradient-based post-training quantization (GPTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_gradient_post_training_quantization_experimental.html#ug-keras-gradient-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_gradient_post_training_quantization_experimental.html#ug-pytorch-gradient-post-training-quantization-experimental)
|
|
54
54
|
* Quantization aware training (QAT)[*](#experimental-features)
|
|
55
55
|
|
|
56
56
|
|
|
@@ -107,15 +107,15 @@ A nightly package is also available (unstable):
|
|
|
107
107
|
pip install mct-nightly
|
|
108
108
|
```
|
|
109
109
|
|
|
110
|
-
###
|
|
110
|
+
### Requirements
|
|
111
111
|
|
|
112
|
-
To run MCT, one of the supported frameworks,
|
|
112
|
+
To run MCT, one of the supported frameworks, Tensorflow/Pytorch, needs to be installed.
|
|
113
113
|
|
|
114
|
-
For
|
|
114
|
+
For use with Tensorflow please install the packages:
|
|
115
115
|
[tensorflow](https://www.tensorflow.org/install),
|
|
116
116
|
[tensorflow-model-optimization](https://www.tensorflow.org/model_optimization/guide/install)
|
|
117
117
|
|
|
118
|
-
For
|
|
118
|
+
For use with PyTorch please install the packages:
|
|
119
119
|
[torch](https://pytorch.org/)
|
|
120
120
|
|
|
121
121
|
Also, a [requirements](requirements.txt) file can be used to set up your environment.
|
{mct_nightly-1.8.0.4032023.post406.dist-info → mct_nightly-1.8.0.4042023.post409.dist-info}/RECORD
RENAMED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
model_compression_toolkit/__init__.py,sha256=
|
|
1
|
+
model_compression_toolkit/__init__.py,sha256=oB0z1Vm7LRYpyc2-nZxeTLSddf3OSbIpXXTbL-bzVuk,3534
|
|
2
2
|
model_compression_toolkit/core/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
3
3
|
model_compression_toolkit/core/analyzer.py,sha256=etuO_VPRkHRlCKvfpQYGe56j8psu4QuXTumneLunj9g,2943
|
|
4
4
|
model_compression_toolkit/core/exporter.py,sha256=U_-ea-zYHsnIt2ydameMLZ_gzDaCMI1dRa5IjA8RUuc,4233
|
|
@@ -11,7 +11,7 @@ model_compression_toolkit/core/common/defaultdict.py,sha256=n-F3dP-VTMnGy9KfCwp7
|
|
|
11
11
|
model_compression_toolkit/core/common/framework_implementation.py,sha256=cW2P4G1Dq4SiQtATWWq3QjfIB9EbRFiWM43aV1ftZjw,22674
|
|
12
12
|
model_compression_toolkit/core/common/framework_info.py,sha256=2mrdVpyTvDgrae8Wf_iAGI81vPlMCp9EwIbNw-Ywbfs,6430
|
|
13
13
|
model_compression_toolkit/core/common/immutable.py,sha256=rSPd3Xpx4Rzt6-EwDA9tXvKygrrt4xvmq01JVCXY0hQ,1723
|
|
14
|
-
model_compression_toolkit/core/common/logger.py,sha256=
|
|
14
|
+
model_compression_toolkit/core/common/logger.py,sha256=b9DVktZ-LymFcRxv2aL_sdiE6S2sSrFGWltx6dgEuUY,4863
|
|
15
15
|
model_compression_toolkit/core/common/memory_computation.py,sha256=fYMU4fiCLlQRN4UIkoabuOvdwNHw4-lS1eNTuCVsBbQ,1217
|
|
16
16
|
model_compression_toolkit/core/common/model_builder_mode.py,sha256=jll9-59OPaE3ug7Y9-lLyV99_FoNHxkGZMgcm0Vkpss,1324
|
|
17
17
|
model_compression_toolkit/core/common/model_collector.py,sha256=9k1814EvteXo4WLaERIjU4KBRG_7WTjcQpAMPiCjAr4,5012
|
|
@@ -145,7 +145,7 @@ model_compression_toolkit/core/keras/keras_implementation.py,sha256=YJ5EAUbbcryj
|
|
|
145
145
|
model_compression_toolkit/core/keras/keras_model_validation.py,sha256=IzlEriDsmTjZeWYIluIOeXNVlhTmaO-UGuFE7PBRG1o,1717
|
|
146
146
|
model_compression_toolkit/core/keras/keras_node_prior_info.py,sha256=WGuyYfm3C2FScrI5WO-xPVeK_A9hJ-PyM4rb-CotsIs,3936
|
|
147
147
|
model_compression_toolkit/core/keras/kpi_data_facade.py,sha256=xLYtLs-j0WiRtUFF_U7P3eH8xLR3606EabwtdklaGeg,8881
|
|
148
|
-
model_compression_toolkit/core/keras/quantization_facade.py,sha256=
|
|
148
|
+
model_compression_toolkit/core/keras/quantization_facade.py,sha256=dluWNMmbUICeASvsSbTfPzmkqgVfuHzhwFyLIT_gLRQ,18032
|
|
149
149
|
model_compression_toolkit/core/keras/tf_tensor_numpy.py,sha256=BauH-Ssoiuv5wu81fk1bm9HO_0yR0oTwKyLkXgE3qSE,2022
|
|
150
150
|
model_compression_toolkit/core/keras/back2framework/__init__.py,sha256=rhIiXg_nBgUZ-baE3M6SzCuQbcnq4iebY1jtJBvKHOM,808
|
|
151
151
|
model_compression_toolkit/core/keras/back2framework/factory_model_builder.py,sha256=MtfhDEou_7OWwFBgJIICxSUB0Flb18P1yM6nST5S9Xs,2231
|
|
@@ -153,7 +153,7 @@ model_compression_toolkit/core/keras/back2framework/float_model_builder.py,sha25
|
|
|
153
153
|
model_compression_toolkit/core/keras/back2framework/instance_builder.py,sha256=V_7jxsA8jbBRNMVVazWeXNDhXeQFBkJORlM0TS5FN4Y,3837
|
|
154
154
|
model_compression_toolkit/core/keras/back2framework/keras_model_builder.py,sha256=vvjdbKxAphIwdwAPcMitaDcfkp7BZSKXmZ9plmNO5Uo,15658
|
|
155
155
|
model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py,sha256=3xeqOajVoL8V508BttX8ZOGyLLnhWLI0TOost-ae5fU,7162
|
|
156
|
-
model_compression_toolkit/core/keras/back2framework/model_gradients.py,sha256=
|
|
156
|
+
model_compression_toolkit/core/keras/back2framework/model_gradients.py,sha256=a4qLD0pICSt6NyMtpC3-rsCxSVGDWdrUab0IfLtg9nw,15340
|
|
157
157
|
model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py,sha256=Qhhhc-JHx8BRupy5yZHU3XauJn9woDmmG8EmpcVZdAQ,2476
|
|
158
158
|
model_compression_toolkit/core/keras/graph_substitutions/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7VclGVOQ63x2p6mAAuba4,698
|
|
159
159
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7VclGVOQ63x2p6mAAuba4,698
|
|
@@ -199,7 +199,7 @@ model_compression_toolkit/core/keras/statistics_correction/__init__.py,sha256=9H
|
|
|
199
199
|
model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py,sha256=ZUUUxCkGUKRsdud0kHeosEhDg5MejDOa1NirRHbQYes,3055
|
|
200
200
|
model_compression_toolkit/core/keras/visualization/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7VclGVOQ63x2p6mAAuba4,698
|
|
201
201
|
model_compression_toolkit/core/pytorch/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
|
|
202
|
-
model_compression_toolkit/core/pytorch/constants.py,sha256=
|
|
202
|
+
model_compression_toolkit/core/pytorch/constants.py,sha256=XC9uCV7zbkP47m0YoLla6VJCL_DBTmMqGe7iXLa-qes,2626
|
|
203
203
|
model_compression_toolkit/core/pytorch/default_framework_info.py,sha256=lBNRSrY8LDJA2Oxk4qiVolUdeNIiDyUK1ek7-2ykc7Y,4219
|
|
204
204
|
model_compression_toolkit/core/pytorch/kpi_data_facade.py,sha256=NtL-bRGF_0UWDUBlZt8mgdmvARYRwHSyWp3t3Y8Kq-U,8782
|
|
205
205
|
model_compression_toolkit/core/pytorch/pytorch_implementation.py,sha256=3ENp_Hr5KN5OiLBqMzsag4AlAfZDywEDVOKCrKNkH-I,25965
|
|
@@ -211,7 +211,7 @@ model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py,s
|
|
|
211
211
|
model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py,sha256=GJWxQh8Lqshfi3vCqyAw0XmyAiDDe9q4YPQsQe9qXNQ,3414
|
|
212
212
|
model_compression_toolkit/core/pytorch/back2framework/instance_builder.py,sha256=OkGH_MkrffaxADCbw75WKPtqVRQSi6AxOeNWoNgzOq0,1680
|
|
213
213
|
model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py,sha256=Lx0w7vK4SmoxaVUeA1orLiQ7JbOknwDUs0kAHJJygrc,5152
|
|
214
|
-
model_compression_toolkit/core/pytorch/back2framework/model_gradients.py,sha256=
|
|
214
|
+
model_compression_toolkit/core/pytorch/back2framework/model_gradients.py,sha256=sGEbv3TjzidMqxBRuQnCtAL2icV6CvNO2JHwTe5FAkg,18238
|
|
215
215
|
model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py,sha256=DJ-NM2rhaRQ1qCX-ySq5CVSOaKDKbElo5DmjS_Se3Es,12685
|
|
216
216
|
model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py,sha256=Cbee-M10q5LaLo43zvndWwPcxvY4wfBiwpaVsI0Y0ng,3704
|
|
217
217
|
model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
@@ -227,7 +227,7 @@ model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_
|
|
|
227
227
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py,sha256=b6T6rA_6RZtaCtmcXn7tC04Bs7XvzkSE68Xq-YNZOP8,38365
|
|
228
228
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/permute_call_method.py,sha256=EMCviyFyJFLEKuAUz3rZHLfB9MAU1kywSBL2XQNzLlg,1953
|
|
229
229
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py,sha256=Gxy_WuFyRhtmw6bSeuhoGwriiu7vcL4bvOTAMWT5SNs,5563
|
|
230
|
-
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py,sha256=
|
|
230
|
+
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py,sha256=u0bcymbcrO7XVRqcBwUqqNDq7PIjlndLp2OS8v0jxNo,4153
|
|
231
231
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py,sha256=GCMXOWnXFQhgGoBUkdiZu08x33E4iJYq8mLlWwxt4vw,2911
|
|
232
232
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py,sha256=XFtU9yuBmoZlX0f0mS6otMPWMk-RcWs94XdvvTNhW8Y,3303
|
|
233
233
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py,sha256=EgBe7ij5LDD2i6yt320aeMl4AoJIAyOeKYg4MOsq7es,9833
|
|
@@ -300,8 +300,8 @@ model_compression_toolkit/core/tpc_models/tflite_tpc/v1/__init__.py,sha256=t4JKs
|
|
|
300
300
|
model_compression_toolkit/core/tpc_models/tflite_tpc/v1/tp_model.py,sha256=3u8pxc6lgbOb_nT6BSSfoOML5LFdE0abXS0s83AZAHI,7770
|
|
301
301
|
model_compression_toolkit/core/tpc_models/tflite_tpc/v1/tpc_keras.py,sha256=kEpZOfjUA5-qzdhuw4DSMNlMJdYjN05rWMQfo5rtM3I,6066
|
|
302
302
|
model_compression_toolkit/core/tpc_models/tflite_tpc/v1/tpc_pytorch.py,sha256=TN51UfSK6lOQ_JRoXNzzglGpetrcYltLeg98Tz9mpC8,4937
|
|
303
|
-
model_compression_toolkit/exporter/__init__.py,sha256=
|
|
304
|
-
model_compression_toolkit/exporter/model_exporter/__init__.py,sha256=
|
|
303
|
+
model_compression_toolkit/exporter/__init__.py,sha256=Ic52ZgHIPuAWsgWX7LuzA9TkRPo_flpQizJAc39ttrc,1083
|
|
304
|
+
model_compression_toolkit/exporter/model_exporter/__init__.py,sha256=9HIBmj8ROdCA-yvkpA8EcN6RHJe_2vEpLLW_gxOJtak,698
|
|
305
305
|
model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
306
306
|
model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py,sha256=iwZZEs_1AwLvClepYG38P_oTrQrA2YXxFTQUNMVoyS4,2022
|
|
307
307
|
model_compression_toolkit/exporter/model_exporter/keras/__init__.py,sha256=uZ2RigbY9O2PJ0Il8wPpS_s7frgg9WUGd_SHeKGyl1A,699
|
|
@@ -314,56 +314,60 @@ model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pyto
|
|
|
314
314
|
model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py,sha256=T3K43YiDGa2g14SbZeDRqhr-3kFYVOyR6EZoSlIlLyc,2892
|
|
315
315
|
model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py,sha256=iB6o2arjkQIiFcXl8oVoTowmEu0WXBbPjrcZQLjVWbQ,3925
|
|
316
316
|
model_compression_toolkit/exporter/model_exporter/tflite/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
317
|
-
model_compression_toolkit/exporter/model_exporter/tflite/fakely_quant_tflite_exporter.py,sha256
|
|
317
|
+
model_compression_toolkit/exporter/model_exporter/tflite/fakely_quant_tflite_exporter.py,sha256=59d__yulPJhD2_dIT2EiFWlw5qMfCLq0NwZarXOOHFU,3074
|
|
318
318
|
model_compression_toolkit/exporter/model_exporter/tflite/int8_tflite_exporter.py,sha256=K_eHtLZ9jTUvEiLDL4uX-X_Q6HLwaIB--uUdNMvJFsE,8169
|
|
319
319
|
model_compression_toolkit/exporter/model_exporter/tflite/tflite_export_facade.py,sha256=MFBd6DwO92Ub58uEXKcw6VQbtr40StokXjpRUA4XV-U,3338
|
|
320
|
-
model_compression_toolkit/exporter/model_wrapper/__init__.py,sha256=
|
|
320
|
+
model_compression_toolkit/exporter/model_wrapper/__init__.py,sha256=7CF2zvpTrIEm8qnbuHnLZyTZkwBBxV24V8QA0oxGbh0,1187
|
|
321
321
|
model_compression_toolkit/exporter/model_wrapper/keras/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
322
|
-
model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py,sha256=
|
|
322
|
+
model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py,sha256=QVSVD3ayI431id5e80BgBDELpUW5rYsHagR6tC2BcGo,3882
|
|
323
323
|
model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
324
|
-
model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py,sha256=
|
|
325
|
-
model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py,sha256=
|
|
324
|
+
model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py,sha256=qhs7renKRKc5S-ad2ctiF-GeIhkpFIlDsL3Co0i9ViI,2988
|
|
325
|
+
model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py,sha256=84jpx_10f3DYFU4svDOHReykrYVj-MXcthxIQW4mwTk,8596
|
|
326
326
|
model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizers.py,sha256=n7VTA-a9TrLFpfdYAqrAKj6PGlAyLq8-xdwnMMpX71k,2077
|
|
327
327
|
model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
|
|
328
|
-
model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py,sha256
|
|
328
|
+
model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py,sha256=-DbO4K3F7awXzCiIKi6vVWgiRqPBe3CaPiVGGfn4iQ4,2036
|
|
329
329
|
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
330
|
-
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py,sha256=
|
|
331
|
-
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py,sha256=
|
|
330
|
+
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py,sha256=XrAqmyUuC1EtoyR010Quy9qre-60RsvnTbqK7dhwU38,2496
|
|
331
|
+
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py,sha256=BlbojVGO4QG0_f7kjdGkGGESQzsXX0AgmJAf6KHHbrU,7674
|
|
332
332
|
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizers.py,sha256=hinP-wtyxZyoW860GdJAk6M3iPjmwwPXQTUxd56yhq8,2086
|
|
333
|
-
model_compression_toolkit/gptq/__init__.py,sha256=
|
|
333
|
+
model_compression_toolkit/gptq/__init__.py,sha256=tPxlcYl8JwK-EWVTy5IVgGOaUJsnG-6PnOKeYNeGJjQ,1250
|
|
334
334
|
model_compression_toolkit/gptq/runner.py,sha256=rB11-U68ZcR_IosgMYNegwyO5mGRjMMgL9OdHEIZOGU,5484
|
|
335
335
|
model_compression_toolkit/gptq/common/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
336
|
-
model_compression_toolkit/gptq/common/gptq_config.py,sha256=
|
|
337
|
-
model_compression_toolkit/gptq/common/gptq_constants.py,sha256=
|
|
336
|
+
model_compression_toolkit/gptq/common/gptq_config.py,sha256=35Ywz9IKWl5iB_oe-aO5dyhNtXXxSm_8gZAlA8wUQng,9507
|
|
337
|
+
model_compression_toolkit/gptq/common/gptq_constants.py,sha256=QSm6laLkIV0LYmU0BLtmKp3Fi3SqDfbncFQWOGA1cGU,611
|
|
338
338
|
model_compression_toolkit/gptq/common/gptq_graph.py,sha256=tXWLUtP52OLgC3WO9M9kaG2OYVDE9imY9L4ef16lAqY,2826
|
|
339
|
-
model_compression_toolkit/gptq/common/
|
|
340
|
-
model_compression_toolkit/gptq/common/gptq_training.py,sha256=DCEVjHXoLPEVzmnGDvzQvPaTSDfog1O1Hq_0Ziuusc4,15250
|
|
339
|
+
model_compression_toolkit/gptq/common/gptq_training.py,sha256=5gTLlvP5NBT-zY7QxW5LmarMFUaHCTL01KZdbvl_-lA,15106
|
|
341
340
|
model_compression_toolkit/gptq/keras/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
342
341
|
model_compression_toolkit/gptq/keras/gptq_loss.py,sha256=rbRkF15MYd6nq4G49kcjb_dPTa-XNq9cTkrb93mXawo,6241
|
|
343
|
-
model_compression_toolkit/gptq/keras/gptq_training.py,sha256=
|
|
344
|
-
model_compression_toolkit/gptq/keras/graph_info.py,sha256=
|
|
345
|
-
model_compression_toolkit/gptq/keras/quantization_facade.py,sha256=
|
|
342
|
+
model_compression_toolkit/gptq/keras/gptq_training.py,sha256=d0M2RkiRvKROZlc3GCr4_SHRSWXNc3RU081K1GF4a0M,15891
|
|
343
|
+
model_compression_toolkit/gptq/keras/graph_info.py,sha256=nYKL3hrd6L3EiyxejrE1xJUeppxSmflHO4nt2fkE-aY,4399
|
|
344
|
+
model_compression_toolkit/gptq/keras/quantization_facade.py,sha256=tNL3_0NdsFUqtCQ9_Qjy4_hYfikXmZZGXdytl-lz8tQ,14112
|
|
346
345
|
model_compression_toolkit/gptq/keras/quantizer/__init__.py,sha256=H76G9W-tYSpHBnqRRgIUoWOjhjKN7XN00njHgjBT_JA,872
|
|
347
|
-
model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py,sha256=
|
|
346
|
+
model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py,sha256=bEIgXk17VzyxDGHBqHVg_Ox9PV8I7zd_39Qkt90XzbQ,4782
|
|
348
347
|
model_compression_toolkit/gptq/keras/quantizer/quant_utils.py,sha256=XT1idm84wl4GDGoqGI8L5XH-H9OjIlhTjGCjY-ylYQw,4604
|
|
349
|
-
model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py,sha256=
|
|
348
|
+
model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py,sha256=Ngqk6rAfS5tWwF2DkpVE_u69Q1Kf15aaSuZ37bwOpBs,4392
|
|
349
|
+
model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py,sha256=iKzHnxl2ZSEp09oatfJVoiDuu6Q_iN36mOxQzDr1cy8,2087
|
|
350
350
|
model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
351
|
-
model_compression_toolkit/gptq/keras/quantizer/soft_rounding/
|
|
351
|
+
model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py,sha256=HqvDvrQKRYvjvjEl4p4a_r0lrMSkO3gAUM1KV4EV5Js,3976
|
|
352
|
+
model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py,sha256=m33BS3VYGwOK-ovh_SBwOlg9lmsyKiB6U23WjWYLVY0,12060
|
|
352
353
|
model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
353
|
-
model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=
|
|
354
|
+
model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=2xjOHUFsy8A0kgmhw5AiXnofN8eSb_gzyLnrGT50Hm8,8539
|
|
354
355
|
model_compression_toolkit/gptq/pytorch/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
355
356
|
model_compression_toolkit/gptq/pytorch/gptq_loss.py,sha256=kDuWw-6zh17wZpYWh4Xa94rpoodf82DksgjQCnL7nBc,2719
|
|
356
|
-
model_compression_toolkit/gptq/pytorch/gptq_training.py,sha256=
|
|
357
|
-
model_compression_toolkit/gptq/pytorch/graph_info.py,sha256=
|
|
358
|
-
model_compression_toolkit/gptq/pytorch/quantization_facade.py,sha256=
|
|
359
|
-
model_compression_toolkit/gptq/pytorch/quantizer/__init__.py,sha256=
|
|
360
|
-
model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py,sha256=
|
|
361
|
-
model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py,sha256=
|
|
362
|
-
model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py,sha256=
|
|
357
|
+
model_compression_toolkit/gptq/pytorch/gptq_training.py,sha256=EaPX7MfaNlzFHogrxN14-G9zGPyt8Bpgya7O0WaUkgk,13516
|
|
358
|
+
model_compression_toolkit/gptq/pytorch/graph_info.py,sha256=Sphpr5wKADgwZ-sLxNqMAcsEiP_jaFEL7q2-zcrtUx8,3791
|
|
359
|
+
model_compression_toolkit/gptq/pytorch/quantization_facade.py,sha256=LPYUDDY393M9sk0iWiWXsIGjuGYPYiUPqiGnfCzmtrY,12468
|
|
360
|
+
model_compression_toolkit/gptq/pytorch/quantizer/__init__.py,sha256=ZHNHo1yzye44m9_ht4UUZfTpK01RiVR3Tr74-vtnOGI,968
|
|
361
|
+
model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py,sha256=IArGYcTb_c7aDnZOOlLGCuuZcV4A0DaxoYO3i-fbVNM,4291
|
|
362
|
+
model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py,sha256=rKRb6TgNko_NhZnwUA2xzwvXAM5qj_mWNjhy_h2SwI8,3888
|
|
363
|
+
model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py,sha256=qprTfTkqqcAijNKsHwKsOlju75Ihu_PDEJxny_A5AD0,4221
|
|
364
|
+
model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py,sha256=9owTzSu_xz29dsjONB-AYXuCZoPo_4nqxTk3yH18a0g,2089
|
|
363
365
|
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py,sha256=lNJ29DYxaLUPDstRDA1PGI5r9Fulq_hvrZMlhst1Z5g,697
|
|
364
|
-
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/
|
|
366
|
+
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py,sha256=EGs3GfC73yl7E1xlDd_sHS7Vof1Td7RAq_Ny7hAbZZA,4178
|
|
367
|
+
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py,sha256=dQ4tqk49V-nED6GZRx2iGqpnObKTatJq60FfcpfiJUg,12180
|
|
368
|
+
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py,sha256=5CmooZOZhIt9pTsgYUOSrJAoEz6YxOr0QtVXP3pZfuw,9191
|
|
365
369
|
model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
|
|
366
|
-
model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=
|
|
370
|
+
model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=T1z8KfcwwnWmq6Rh0Ncyx1JiaeTxKBQAkxVsI4yD4J4,8932
|
|
367
371
|
model_compression_toolkit/ptq/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
368
372
|
model_compression_toolkit/ptq/runner.py,sha256=_c1dSjlPPpsx59Vbg1buhG9bZq__OORz1VlPkwjJzoc,2552
|
|
369
373
|
model_compression_toolkit/ptq/keras/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
@@ -381,8 +385,8 @@ model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py,sha256
|
|
|
381
385
|
model_compression_toolkit/qat/keras/quantizer/quant_utils.py,sha256=rS2z_ozyjzQ07MMczaAFNZ7K6RKwAnBOKyRac4UvF44,2123
|
|
382
386
|
model_compression_toolkit/qat/keras/quantizer/quantization_builder.py,sha256=ESYtJGA6SGT0103Q1r33VGTu60V05pux7fK8JOnRau0,4229
|
|
383
387
|
model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
384
|
-
model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=
|
|
385
|
-
model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py,sha256=
|
|
388
|
+
model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=Cc1ohHnWOlWnjllAxEA2ZrLZqPiXt_oH6YfQWkEhPhI,13599
|
|
389
|
+
model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py,sha256=XGvg0OPQdi1WvYZJ7GA9VUBKGTWIt_VKLzdPoXfw7xQ,10939
|
|
386
390
|
model_compression_toolkit/qat/pytorch/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
387
391
|
model_compression_toolkit/qat/pytorch/quantization_facade.py,sha256=8iH6ENPyVbPFzpJS2FaXPvjZ1VzkSgGWsb-EnHywjRA,11875
|
|
388
392
|
model_compression_toolkit/qat/pytorch/quantizer/__init__.py,sha256=R4vwVcbg6QprCTNzibyF9PtbKKKBsfu9ffypKDNscJQ,859
|
|
@@ -390,25 +394,24 @@ model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py,sh
|
|
|
390
394
|
model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py,sha256=V-oF596RdXW8W8nF8g57bEGsvB8ORvRIFoyrXBwyaWc,4086
|
|
391
395
|
model_compression_toolkit/qat/pytorch/quantizer/quantizer_utils.py,sha256=5XswoF-5aaFangkHssWKAQTsk6lf_zzndzfCsBWBVMs,5004
|
|
392
396
|
model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
|
|
393
|
-
model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=
|
|
394
|
-
model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py,sha256=
|
|
397
|
+
model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=qTVB979OYn7yPGfD319h_E4IgoP_o52EdWw3SSiiFis,9766
|
|
398
|
+
model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py,sha256=_9gD5F-lAh4w7BkkiH4EO8VU_40_ySClXjLXf-7zPek,8810
|
|
395
399
|
model_compression_toolkit/quantizers_infrastructure/__init__.py,sha256=wKRMA6H4aTPnQlvmx_gbe4d2KMZ-40QIGDJCZa-2VCU,1615
|
|
396
400
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
397
401
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
398
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/activation_lut_pot_inferable_quantizer.py,sha256=GH14y6rV3OKbfJTdv8NPee4tHwWPuIbuhwejkchMnn8,4599
|
|
399
402
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/base_inferable_quantizer.py,sha256=Zo2N4lAo1Lz91MSor0s-dng3ueFP6UpZlt9CcoCu5SM,3141
|
|
400
403
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/constants.py,sha256=tYmi5_PpsSJ-LyOcMslU0FEnG_c31VXyujc1_R7-EVk,1664
|
|
401
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_all_subclasses.py,sha256=
|
|
402
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_quantizers.py,sha256=
|
|
404
|
+
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_all_subclasses.py,sha256=6KqemyFcM4a6KoCZ-6dm46iIZ_kusPnj5crH8RTAvuo,1213
|
|
405
|
+
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_quantizers.py,sha256=8GCjwp665nXcUZEv2RBeDT2MMZ1YvvORaxAkXooWJy0,2967
|
|
403
406
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/quant_utils.py,sha256=SDzQuh3q9ugSD80Z9IuaWOPskH5VsRRyuBOeIeWJDdQ,2153
|
|
404
407
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
405
408
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/load_model.py,sha256=H0-I_-4ChTLNk7gwcnft5YV-q9Cg1wue0OUuZ1W4fek,4279
|
|
406
409
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantize_wrapper.py,sha256=uE05T6VbLScwpyZoAXoJsCC_aYauv0LjyxSm89yZu84,14646
|
|
407
410
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizer_utils.py,sha256=Z0SoHkTl5dC0y3hrcj0bC5fSa-oU7IYuGN5sBTb4THA,3440
|
|
408
411
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/validation_functions.py,sha256=0heI5awgysKl9-XhIWxPiCbC_IBC7mANzBP95Tu1G9o,2942
|
|
409
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/__init__.py,sha256=
|
|
412
|
+
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/__init__.py,sha256=Xg2t9VH61BIvqYtJg6GBKkAnDmFe1u4K0i0r2OBAK-I,2742
|
|
410
413
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/base_keras_inferable_quantizer.py,sha256=3ILK53o5RogFy1ZI2gyaJk6TjH2HV77j5uSqkSc_8W0,2228
|
|
411
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/constants.py,sha256=
|
|
414
|
+
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/constants.py,sha256=ItUEs8c7LVxBPMopLD5BO2Ry9DIxFIrk_M7AdSEyBFg,979
|
|
412
415
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
413
416
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py,sha256=aUiK_aOy8YImLyXT6_g3BIQ6Lt9FdPPPYjj1R93qsvc,7485
|
|
414
417
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py,sha256=z-t_WU9OGURVpG3e6uBQ8Mx4JPhOxqS0Tpg_ioa0MhA,3191
|
|
@@ -421,15 +424,16 @@ model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/ker
|
|
|
421
424
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py,sha256=pfoZCZNKsovKq3XiSim5ErSBid3ihK9WORaZh7i8wIg,4376
|
|
422
425
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py,sha256=PWl58Dpoxowz2HFg7IpT74A5o3_GjFKOsmonK2Nk0Uo,8664
|
|
423
426
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
424
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantize_wrapper.py,sha256=
|
|
427
|
+
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantize_wrapper.py,sha256=AXqeWmB570xucOdjUt5P1LbrRIkprdj26hW1q7YHjtE,11272
|
|
425
428
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizer_utils.py,sha256=2KN976TZTObiaEhoUL0-Rpceui-Nifw5LdKLdU7SRY0,5929
|
|
426
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/__init__.py,sha256
|
|
429
|
+
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/__init__.py,sha256=-hiXng1pF3wjI-YYYZqZ-NZ1TStGuec4bci3jxvYVY0,2820
|
|
427
430
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_lut_symmetric_inferable_quantizer.py,sha256=knIkFr8xnWFyBu8VT129eH8_Mj0Osb2jIZx32zQjgLY,4871
|
|
428
431
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_pytorch_inferable_quantizer.py,sha256=jZ8q-vzgsUCXv32OYpZySlCC5GeA07x6NVO5q5K4TsE,1919
|
|
429
432
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_symmetric_inferable_quantizer.py,sha256=gIKmFouy2KAJt7cbRZ1lmSitPtFNNkCgqgLJrnn9gRQ,3070
|
|
430
433
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_uniform_inferable_quantizer.py,sha256=G6ReERM4j9sGuRKuOPXXo-H1WMEMX7_OgQzxAZP0aaE,2501
|
|
431
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/constants.py,sha256=
|
|
434
|
+
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/constants.py,sha256=9bxo6snEJkRv5XWmhBGsV6g8LCe_1NgAE5ufIq2ewYU,1007
|
|
432
435
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/__init__.py,sha256=lNJ29DYxaLUPDstRDA1PGI5r9Fulq_hvrZMlhst1Z5g,697
|
|
436
|
+
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py,sha256=GH14y6rV3OKbfJTdv8NPee4tHwWPuIbuhwejkchMnn8,4599
|
|
433
437
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py,sha256=mu17Qv9I4bzd3IAac7zFg-Goy6uQaNZlEplVRmZBArY,2928
|
|
434
438
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py,sha256=qV2F9wVUAAT0rn9STDN5gbQPbwh9EsAH8pQog-V_scg,3631
|
|
435
439
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py,sha256=qSAo-pAPWnY_-DO-84-xJYUKrc9kih0AUo3KCyre0BY,4855
|
|
@@ -441,19 +445,19 @@ model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pyt
|
|
|
441
445
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py,sha256=fbXuZBlVCGjNKp3hlIp3W9NM-dtzP75c19wkvjbNCMo,5394
|
|
442
446
|
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
443
447
|
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
444
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/base_trainable_quantizer.py,sha256=
|
|
448
|
+
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/base_trainable_quantizer.py,sha256=qnGMQfwttYetjeJJcdKsXJKpcb75Sy_HTS13Oorfgvo,7775
|
|
445
449
|
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizer_config.py,sha256=uQPnn1tkD96EmTcg-zAnxKH5XzY5y9zYb6tJ9ZTm_oI,6333
|
|
446
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizers.py,sha256=
|
|
450
|
+
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizers.py,sha256=RRtxOjiB1gFMiqYMlUC7hqZMdJGN5FFMBj7-sD2aWJ8,3831
|
|
447
451
|
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/quant_utils.py,sha256=zdiew1jwR7tUKm9XWlHnAPxIZsAdKqbzzC2vH02j5wA,1505
|
|
448
452
|
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/trainable_quantizer_config.py,sha256=WBTYdDQtXkscjO8b7leunBVSGG8JC__DhhpINx7lCEA,4774
|
|
449
453
|
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
450
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/base_keras_quantizer.py,sha256=
|
|
454
|
+
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/base_keras_quantizer.py,sha256=VMwT-UDKKjZQtvEj7xEUUKAuGvzXr3ak05fPy64nnsw,4307
|
|
451
455
|
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/config_serialization.py,sha256=Vj-59ImTj0YEGI7MgRgwqJWIeGcIlrdLzPDiedwHV_E,4062
|
|
452
456
|
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/quantizer_utils.py,sha256=MVwXNymmFRB2NXIBx4e2mdJ1RfoHxRPYRgjb1MQP5kY,1797
|
|
453
457
|
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
454
|
-
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/base_pytorch_quantizer.py,sha256=
|
|
455
|
-
mct_nightly-1.8.0.
|
|
456
|
-
mct_nightly-1.8.0.
|
|
457
|
-
mct_nightly-1.8.0.
|
|
458
|
-
mct_nightly-1.8.0.
|
|
459
|
-
mct_nightly-1.8.0.
|
|
458
|
+
model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/base_pytorch_quantizer.py,sha256=V16tvEpBz5-Pfl0h8dkPs4F32DWXyYdEn6HnQHYKCfs,3161
|
|
459
|
+
mct_nightly-1.8.0.4042023.post409.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
|
|
460
|
+
mct_nightly-1.8.0.4042023.post409.dist-info/METADATA,sha256=ykeWnMeaN2geVVupGfQh8pf-KlYPq3p6YvSiQpxdDiI,10971
|
|
461
|
+
mct_nightly-1.8.0.4042023.post409.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
|
462
|
+
mct_nightly-1.8.0.4042023.post409.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
|
|
463
|
+
mct_nightly-1.8.0.4042023.post409.dist-info/RECORD,,
|
|
@@ -14,8 +14,6 @@
|
|
|
14
14
|
# ==============================================================================
|
|
15
15
|
|
|
16
16
|
from model_compression_toolkit.core.common.quantization.debug_config import DebugConfig
|
|
17
|
-
from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, RoundingType, GradientPTQConfigV2
|
|
18
|
-
from model_compression_toolkit.gptq.common.gptq_quantizer_config import GPTQQuantizerConfig, SoftQuantizerConfig
|
|
19
17
|
from model_compression_toolkit.core.common.quantization import quantization_config
|
|
20
18
|
from model_compression_toolkit.core.common.mixed_precision import mixed_precision_quantization_config
|
|
21
19
|
from model_compression_toolkit.core.common.quantization.quantization_config import QuantizationConfig, \
|
|
@@ -36,25 +34,21 @@ from model_compression_toolkit.core.common import network_editors as network_edi
|
|
|
36
34
|
from model_compression_toolkit.core.keras.quantization_facade import keras_post_training_quantization, \
|
|
37
35
|
keras_post_training_quantization_mixed_precision
|
|
38
36
|
from model_compression_toolkit.ptq.keras.quantization_facade import keras_post_training_quantization_experimental
|
|
39
|
-
from model_compression_toolkit.
|
|
40
|
-
|
|
41
|
-
from model_compression_toolkit.
|
|
42
|
-
from model_compression_toolkit.qat.keras.quantization_facade import keras_quantization_aware_training_init, \
|
|
43
|
-
keras_quantization_aware_training_finalize
|
|
44
|
-
from model_compression_toolkit.qat.pytorch.quantization_facade import pytorch_quantization_aware_training_init, \
|
|
45
|
-
pytorch_quantization_aware_training_finalize
|
|
46
|
-
from model_compression_toolkit.core.pytorch.quantization_facade import pytorch_post_training_quantization, \
|
|
47
|
-
pytorch_post_training_quantization_mixed_precision
|
|
37
|
+
from model_compression_toolkit.qat.keras.quantization_facade import keras_quantization_aware_training_init, keras_quantization_aware_training_finalize
|
|
38
|
+
from model_compression_toolkit.qat.pytorch.quantization_facade import pytorch_quantization_aware_training_init, pytorch_quantization_aware_training_finalize
|
|
39
|
+
from model_compression_toolkit.core.pytorch.quantization_facade import pytorch_post_training_quantization, pytorch_post_training_quantization_mixed_precision
|
|
48
40
|
from model_compression_toolkit.ptq.pytorch.quantization_facade import pytorch_post_training_quantization_experimental
|
|
49
|
-
from model_compression_toolkit.gptq.pytorch.quantization_facade import \
|
|
50
|
-
pytorch_gradient_post_training_quantization_experimental
|
|
51
|
-
from model_compression_toolkit.gptq.pytorch.quantization_facade import get_pytorch_gptq_config
|
|
52
41
|
|
|
53
42
|
from model_compression_toolkit.core.keras.kpi_data_facade import keras_kpi_data, keras_kpi_data_experimental
|
|
54
43
|
from model_compression_toolkit.core.pytorch.kpi_data_facade import pytorch_kpi_data, pytorch_kpi_data_experimental
|
|
55
44
|
|
|
56
45
|
from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.keras.load_model import keras_load_quantized_model
|
|
57
46
|
|
|
58
|
-
|
|
47
|
+
|
|
48
|
+
from model_compression_toolkit import exporter
|
|
49
|
+
|
|
50
|
+
from model_compression_toolkit import gptq
|
|
51
|
+
from model_compression_toolkit.gptq import GradientPTQConfig
|
|
52
|
+
|
|
59
53
|
|
|
60
54
|
__version__ = "1.8.0"
|
|
@@ -17,7 +17,6 @@
|
|
|
17
17
|
import logging
|
|
18
18
|
import os
|
|
19
19
|
from datetime import datetime
|
|
20
|
-
from os import path
|
|
21
20
|
from pathlib import Path
|
|
22
21
|
|
|
23
22
|
LOGGER_NAME = 'Constrained Model Optimization'
|
|
@@ -43,7 +42,7 @@ class Logger:
|
|
|
43
42
|
|
|
44
43
|
"""
|
|
45
44
|
|
|
46
|
-
if not path.exists(log_path):
|
|
45
|
+
if not os.path.exists(log_path):
|
|
47
46
|
Path(log_path).mkdir(parents=True, exist_ok=True)
|
|
48
47
|
|
|
49
48
|
@staticmethod
|
|
@@ -93,6 +92,15 @@ class Logger:
|
|
|
93
92
|
|
|
94
93
|
print(f'log file is in {log_name}')
|
|
95
94
|
|
|
95
|
+
@staticmethod
|
|
96
|
+
def shutdown():
|
|
97
|
+
"""
|
|
98
|
+
An orderly command to shutdown by flushing and closing all logging handlers.
|
|
99
|
+
|
|
100
|
+
"""
|
|
101
|
+
Logger.LOG_PATH = None
|
|
102
|
+
logging.shutdown()
|
|
103
|
+
|
|
96
104
|
########################################
|
|
97
105
|
# Delegating methods to wrapped logger
|
|
98
106
|
########################################
|
|
@@ -171,8 +171,9 @@ def keras_iterative_approx_jacobian_trace(graph_float: common.Graph,
|
|
|
171
171
|
|
|
172
172
|
# If the change to the mean Jacobian approximation is insignificant we stop the calculation
|
|
173
173
|
if j > MIN_JACOBIANS_ITER:
|
|
174
|
-
|
|
175
|
-
|
|
174
|
+
new_mean = np.mean([jac_trace_approx, *trace_jv])
|
|
175
|
+
delta = new_mean - np.mean(trace_jv)
|
|
176
|
+
if np.abs(delta) / (np.abs(new_mean) + 1e-6) < JACOBIANS_COMP_TOLERANCE:
|
|
176
177
|
trace_jv.append(jac_trace_approx)
|
|
177
178
|
break
|
|
178
179
|
|
|
@@ -19,7 +19,7 @@ from model_compression_toolkit.core import common
|
|
|
19
19
|
from model_compression_toolkit.core.common import Logger
|
|
20
20
|
from model_compression_toolkit.core.common.constants import TENSORFLOW
|
|
21
21
|
from model_compression_toolkit.core.common.user_info import UserInformation
|
|
22
|
-
from model_compression_toolkit.gptq
|
|
22
|
+
from model_compression_toolkit.gptq import GradientPTQConfig, GradientPTQConfigV2
|
|
23
23
|
from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
|
|
24
24
|
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
|
|
25
25
|
from model_compression_toolkit.core.common.network_editors.actions import EditRule
|
|
@@ -26,8 +26,9 @@ from model_compression_toolkit.core.common.constants import EPS, MIN_JACOBIANS_I
|
|
|
26
26
|
from model_compression_toolkit.core.common.graph.edge import EDGE_SINK_INDEX
|
|
27
27
|
from model_compression_toolkit.core.common.graph.functional_node import FunctionalNode
|
|
28
28
|
from model_compression_toolkit.core.pytorch.back2framework.instance_builder import node_builder
|
|
29
|
-
from model_compression_toolkit.core.pytorch.
|
|
30
|
-
from model_compression_toolkit.core.pytorch.
|
|
29
|
+
from model_compression_toolkit.core.pytorch.constants import BUFFER
|
|
30
|
+
from model_compression_toolkit.core.pytorch.reader.node_holders import DummyPlaceHolder, BufferHolder
|
|
31
|
+
from model_compression_toolkit.core.pytorch.utils import torch_tensor_to_numpy, get_working_device
|
|
31
32
|
from model_compression_toolkit.core.common.logger import Logger
|
|
32
33
|
|
|
33
34
|
|
|
@@ -133,7 +134,13 @@ class PytorchModelGradients(torch.nn.Module):
|
|
|
133
134
|
|
|
134
135
|
for n in self.node_sort:
|
|
135
136
|
if not isinstance(n, FunctionalNode):
|
|
136
|
-
|
|
137
|
+
if n.type == BufferHolder:
|
|
138
|
+
self.add_module(n.name, node_builder(n))
|
|
139
|
+
self.get_submodule(n.name). \
|
|
140
|
+
register_buffer(n.name,
|
|
141
|
+
torch.Tensor(n.get_weights_by_keys(BUFFER)).to(get_working_device()))
|
|
142
|
+
else:
|
|
143
|
+
self.add_module(n.name, node_builder(n))
|
|
137
144
|
|
|
138
145
|
def forward(self,
|
|
139
146
|
*args: Any) -> Any:
|
|
@@ -289,9 +296,9 @@ def pytorch_iterative_approx_jacobian_trace(graph_float: common.Graph,
|
|
|
289
296
|
|
|
290
297
|
# If the change to the mean Jacobian approximation is insignificant we stop the calculation
|
|
291
298
|
if j > MIN_JACOBIANS_ITER:
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
if torch.abs(delta) / (torch.abs(
|
|
299
|
+
new_mean = torch.mean(torch.stack([jac_trace_approx, *trace_jv]))
|
|
300
|
+
delta = new_mean - torch.mean(torch.stack(trace_jv))
|
|
301
|
+
if torch.abs(delta) / (torch.abs(new_mean) + 1e-6) < JACOBIANS_COMP_TOLERANCE:
|
|
295
302
|
trace_jv.append(jac_trace_approx)
|
|
296
303
|
break
|
|
297
304
|
|
|
@@ -92,3 +92,7 @@ IN_PROJ_WEIGHT = 'in_proj_weight'
|
|
|
92
92
|
IN_PROJ_BIAS = 'in_proj_bias'
|
|
93
93
|
BIAS_K = 'bias_k'
|
|
94
94
|
BIAS_V = 'bias_v'
|
|
95
|
+
|
|
96
|
+
# # Batch size value for 'reshape' and 'view' operators,
|
|
97
|
+
# # the value is -1 so the batch size is inferred from the length of the array and remaining dimensions.
|
|
98
|
+
BATCH_DIM_VALUE = -1
|
|
@@ -14,10 +14,13 @@
|
|
|
14
14
|
# ==============================================================================
|
|
15
15
|
from torch import reshape
|
|
16
16
|
import torch
|
|
17
|
+
|
|
18
|
+
from model_compression_toolkit.core.common import Logger
|
|
17
19
|
from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher
|
|
18
20
|
from model_compression_toolkit.core import common
|
|
19
21
|
from model_compression_toolkit.core.common.graph.base_graph import Graph
|
|
20
22
|
from model_compression_toolkit.core.common.graph.base_node import BaseNode
|
|
23
|
+
from model_compression_toolkit.core.pytorch.constants import BATCH_DIM_VALUE
|
|
21
24
|
|
|
22
25
|
|
|
23
26
|
class ReshapeWithStaticShapes(common.BaseSubstitution):
|
|
@@ -47,14 +50,25 @@ class ReshapeWithStaticShapes(common.BaseSubstitution):
|
|
|
47
50
|
Returns:
|
|
48
51
|
Graph after applying the substitution.
|
|
49
52
|
"""
|
|
53
|
+
# we want the batch size value to infer from the length of the array and remaining dimensions
|
|
54
|
+
if len(node.output_shape) == 1:
|
|
55
|
+
node.output_shape[0][0] = BATCH_DIM_VALUE
|
|
56
|
+
else:
|
|
57
|
+
Logger.error('Reshape or view nodes should have a single output shape') # pragma: no cover
|
|
58
|
+
|
|
50
59
|
# configure the new static output shape attribute
|
|
51
60
|
node.op_call_args = node.output_shape
|
|
52
61
|
|
|
53
62
|
# modify the node input info
|
|
54
63
|
node.input_shape = [node.input_shape[0]]
|
|
64
|
+
|
|
65
|
+
# the first input is the tensor to be reshaped, we want his batch size value to infer
|
|
66
|
+
# from the length of the array and remaining dimensions
|
|
67
|
+
node.input_shape[0][0] = BATCH_DIM_VALUE
|
|
68
|
+
|
|
55
69
|
nodes_to_check = []
|
|
56
70
|
for in_edge in graph.incoming_edges(node):
|
|
57
|
-
if in_edge.sink_index > 0:
|
|
71
|
+
if in_edge.sink_index > 0: # the first input is the tensor to be reshaped
|
|
58
72
|
nodes_to_check.append(in_edge.source_node)
|
|
59
73
|
graph.remove_edge(in_edge.source_node, node)
|
|
60
74
|
for n in nodes_to_check:
|
|
@@ -80,4 +94,4 @@ def clean_graph_from_nodes_without_out_edges(graph: Graph,
|
|
|
80
94
|
graph.remove_edge(in_edge.source_node, node)
|
|
81
95
|
graph.remove_node(node)
|
|
82
96
|
for n in nodes_to_check:
|
|
83
|
-
clean_graph_from_nodes_without_out_edges(graph, n)
|
|
97
|
+
clean_graph_from_nodes_without_out_edges(graph, n)
|
|
@@ -12,3 +12,8 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ==============================================================================
|
|
15
|
+
|
|
16
|
+
from model_compression_toolkit.exporter.model_exporter.keras.keras_export_facade import keras_export_model, KerasExportMode
|
|
17
|
+
from model_compression_toolkit.exporter.model_exporter.pytorch.pytorch_export_facade import PyTorchExportMode, pytorch_export_model
|
|
18
|
+
from model_compression_toolkit.exporter.model_exporter.tflite.tflite_export_facade import tflite_export_model, TFLiteExportMode
|
|
19
|
+
|
|
@@ -13,6 +13,3 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ==============================================================================
|
|
15
15
|
|
|
16
|
-
from model_compression_toolkit.exporter.model_exporter.keras.keras_export_facade import keras_export_model, KerasExportMode
|
|
17
|
-
from model_compression_toolkit.exporter.model_exporter.pytorch.pytorch_export_facade import PyTorchExportMode, pytorch_export_model
|
|
18
|
-
from model_compression_toolkit.exporter.model_exporter.tflite.tflite_export_facade import tflite_export_model, TFLiteExportMode
|
|
@@ -19,7 +19,7 @@ from typing import Callable
|
|
|
19
19
|
import keras.models
|
|
20
20
|
import tensorflow as tf
|
|
21
21
|
|
|
22
|
-
from model_compression_toolkit import keras_load_quantized_model
|
|
22
|
+
from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.keras.load_model import keras_load_quantized_model
|
|
23
23
|
from model_compression_toolkit.core.common import Logger
|
|
24
24
|
from model_compression_toolkit.exporter.model_exporter.keras.fakely_quant_keras_exporter import FakelyQuantKerasExporter
|
|
25
25
|
|
|
@@ -13,12 +13,8 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ==============================================================================
|
|
15
15
|
|
|
16
|
-
from model_compression_toolkit.
|
|
16
|
+
from model_compression_toolkit.exporter.model_wrapper.keras.validate_layer import is_keras_layer_exportable
|
|
17
|
+
from model_compression_toolkit.exporter.model_wrapper.keras.builder.fully_quantized_model_builder import get_exportable_keras_model
|
|
17
18
|
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
from model_compression_toolkit.exporter.model_wrapper.keras.builder.fully_quantized_model_builder import get_exportable_keras_model
|
|
21
|
-
|
|
22
|
-
if FOUND_TORCH:
|
|
23
|
-
from model_compression_toolkit.exporter.model_wrapper.pytorch.validate_layer import is_pytorch_layer_exportable
|
|
24
|
-
from model_compression_toolkit.exporter.model_wrapper.pytorch.builder.fully_quantized_model_builder import get_exportable_pytorch_model
|
|
19
|
+
from model_compression_toolkit.exporter.model_wrapper.pytorch.validate_layer import is_pytorch_layer_exportable
|
|
20
|
+
from model_compression_toolkit.exporter.model_wrapper.pytorch.builder.fully_quantized_model_builder import get_exportable_pytorch_model
|