mct-nightly 1.8.0.27022023.post430__py3-none-any.whl → 1.8.0.27032023.post403__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. {mct_nightly-1.8.0.27022023.post430.dist-info → mct_nightly-1.8.0.27032023.post403.dist-info}/METADATA +7 -7
  2. {mct_nightly-1.8.0.27022023.post430.dist-info → mct_nightly-1.8.0.27032023.post403.dist-info}/RECORD +65 -59
  3. {mct_nightly-1.8.0.27022023.post430.dist-info → mct_nightly-1.8.0.27032023.post403.dist-info}/WHEEL +1 -1
  4. model_compression_toolkit/__init__.py +9 -15
  5. model_compression_toolkit/core/common/logger.py +10 -2
  6. model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +6 -1
  7. model_compression_toolkit/core/keras/quantization_facade.py +1 -1
  8. model_compression_toolkit/core/pytorch/constants.py +4 -0
  9. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +4 -10
  10. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +16 -2
  11. model_compression_toolkit/exporter/__init__.py +5 -0
  12. model_compression_toolkit/exporter/model_exporter/__init__.py +0 -3
  13. model_compression_toolkit/exporter/model_exporter/tflite/fakely_quant_tflite_exporter.py +1 -1
  14. model_compression_toolkit/exporter/model_wrapper/__init__.py +4 -8
  15. model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +45 -39
  16. model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +39 -24
  17. model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +50 -42
  18. model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +43 -36
  19. model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +24 -5
  20. model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +25 -18
  21. model_compression_toolkit/gptq/__init__.py +6 -0
  22. model_compression_toolkit/gptq/common/gptq_config.py +60 -106
  23. model_compression_toolkit/gptq/common/gptq_constants.py +0 -7
  24. model_compression_toolkit/gptq/common/gptq_training.py +28 -38
  25. model_compression_toolkit/gptq/keras/gptq_training.py +10 -28
  26. model_compression_toolkit/gptq/keras/graph_info.py +8 -33
  27. model_compression_toolkit/gptq/keras/quantization_facade.py +6 -12
  28. model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -1
  29. model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +2 -2
  30. model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +45 -0
  31. model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +112 -0
  32. model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +22 -128
  33. model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +11 -41
  34. model_compression_toolkit/gptq/pytorch/gptq_training.py +12 -4
  35. model_compression_toolkit/gptq/pytorch/graph_info.py +9 -6
  36. model_compression_toolkit/gptq/pytorch/quantization_facade.py +9 -22
  37. model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +3 -1
  38. model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -20
  39. model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +10 -1
  40. model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +2 -2
  41. model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +45 -0
  42. model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +14 -0
  43. model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +115 -0
  44. model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +236 -0
  45. model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +196 -0
  46. model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +9 -31
  47. model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +30 -37
  48. model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +27 -36
  49. model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +21 -21
  50. model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +25 -26
  51. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_all_subclasses.py +1 -2
  52. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_quantizers.py +1 -1
  53. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantize_wrapper.py +12 -0
  54. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/__init__.py +4 -0
  55. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/constants.py +1 -0
  56. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantize_wrapper.py +12 -0
  57. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/__init__.py +6 -0
  58. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/constants.py +3 -0
  59. model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/base_trainable_quantizer.py +53 -2
  60. model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizers.py +2 -1
  61. model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/base_keras_quantizer.py +22 -4
  62. model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +24 -3
  63. model_compression_toolkit/gptq/common/gptq_quantizer_config.py +0 -93
  64. {mct_nightly-1.8.0.27022023.post430.dist-info → mct_nightly-1.8.0.27032023.post403.dist-info}/LICENSE.md +0 -0
  65. {mct_nightly-1.8.0.27022023.post430.dist-info → mct_nightly-1.8.0.27032023.post403.dist-info}/top_level.txt +0 -0
  66. /model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/{common → pytorch/quantizers/activation_inferable_quantizers}/activation_lut_pot_inferable_quantizer.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 1.8.0.27022023.post430
3
+ Version: 1.8.0.27032023.post403
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -49,8 +49,8 @@ MCT is developed by researchers and engineers working at Sony Semiconductor Isra
49
49
  ## Supported Features
50
50
 
51
51
  MCT supports different quantization methods:
52
- * Post training quantization (PTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_post_training_quantization_experimental.html#ug-keras-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_post_training_quantization_experimental.html#ug-pytorch-post-training-quantization-experimental)
53
- * Gradient-based post training quantization (GPTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_gradient_post_training_quantization_experimental.html#ug-keras-gradient-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_gradient_post_training_quantization_experimental.html#ug-pytorch-gradient-post-training-quantization-experimental)
52
+ * Post-training quantization (PTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_post_training_quantization_experimental.html#ug-keras-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_post_training_quantization_experimental.html#ug-pytorch-post-training-quantization-experimental)
53
+ * Gradient-based post-training quantization (GPTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_gradient_post_training_quantization_experimental.html#ug-keras-gradient-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_gradient_post_training_quantization_experimental.html#ug-pytorch-gradient-post-training-quantization-experimental)
54
54
  * Quantization aware training (QAT)[*](#experimental-features)
55
55
 
56
56
 
@@ -107,15 +107,15 @@ A nightly package is also available (unstable):
107
107
  pip install mct-nightly
108
108
  ```
109
109
 
110
- ### Requierments
110
+ ### Requirements
111
111
 
112
- To run MCT, one of the supported frameworks, Tenosflow/Pytorch, needs to be installed.
112
+ To run MCT, one of the supported frameworks, Tensorflow/Pytorch, needs to be installed.
113
113
 
114
- For using with Tensorflow please install the packages:
114
+ For use with Tensorflow please install the packages:
115
115
  [tensorflow](https://www.tensorflow.org/install),
116
116
  [tensorflow-model-optimization](https://www.tensorflow.org/model_optimization/guide/install)
117
117
 
118
- For using with PyTorch please install the packages:
118
+ For use with PyTorch please install the packages:
119
119
  [torch](https://pytorch.org/)
120
120
 
121
121
  Also, a [requirements](requirements.txt) file can be used to set up your environment.
@@ -1,4 +1,4 @@
1
- model_compression_toolkit/__init__.py,sha256=h7ysLzLClGy_hSX7fQRe3igfjaejc9k9-pMJ3vu_WcE,4254
1
+ model_compression_toolkit/__init__.py,sha256=oB0z1Vm7LRYpyc2-nZxeTLSddf3OSbIpXXTbL-bzVuk,3534
2
2
  model_compression_toolkit/core/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
3
3
  model_compression_toolkit/core/analyzer.py,sha256=etuO_VPRkHRlCKvfpQYGe56j8psu4QuXTumneLunj9g,2943
4
4
  model_compression_toolkit/core/exporter.py,sha256=U_-ea-zYHsnIt2ydameMLZ_gzDaCMI1dRa5IjA8RUuc,4233
@@ -11,7 +11,7 @@ model_compression_toolkit/core/common/defaultdict.py,sha256=n-F3dP-VTMnGy9KfCwp7
11
11
  model_compression_toolkit/core/common/framework_implementation.py,sha256=cW2P4G1Dq4SiQtATWWq3QjfIB9EbRFiWM43aV1ftZjw,22674
12
12
  model_compression_toolkit/core/common/framework_info.py,sha256=2mrdVpyTvDgrae8Wf_iAGI81vPlMCp9EwIbNw-Ywbfs,6430
13
13
  model_compression_toolkit/core/common/immutable.py,sha256=rSPd3Xpx4Rzt6-EwDA9tXvKygrrt4xvmq01JVCXY0hQ,1723
14
- model_compression_toolkit/core/common/logger.py,sha256=Pm5leGZT6CbcBadNvO9W2R2VTjUzVbr99KnAiSQwqgs,4673
14
+ model_compression_toolkit/core/common/logger.py,sha256=b9DVktZ-LymFcRxv2aL_sdiE6S2sSrFGWltx6dgEuUY,4863
15
15
  model_compression_toolkit/core/common/memory_computation.py,sha256=fYMU4fiCLlQRN4UIkoabuOvdwNHw4-lS1eNTuCVsBbQ,1217
16
16
  model_compression_toolkit/core/common/model_builder_mode.py,sha256=jll9-59OPaE3ug7Y9-lLyV99_FoNHxkGZMgcm0Vkpss,1324
17
17
  model_compression_toolkit/core/common/model_collector.py,sha256=9k1814EvteXo4WLaERIjU4KBRG_7WTjcQpAMPiCjAr4,5012
@@ -67,7 +67,7 @@ model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_data.py,sha2
67
67
  model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_functions_mapping.py,sha256=cjLf_g4n1INlT1TE1z-I41hDXUTTy8krUSvhRB57rv0,1602
68
68
  model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_methods.py,sha256=jYrvXvHhfpmFEgafBHwOmeG06_jcHdLeyHbzENvefuQ,19342
69
69
  model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py,sha256=sw7LOPN1bM82o3SkMaklyH0jw-TLGK0-fl2Wq73rffI,697
70
- model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py,sha256=cG2q4M0lOjEAEXrmTynHZLA38C1YmExiQeiCzItF5x8,15272
70
+ model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py,sha256=R_JgukiCV52S9r5i58JSNQsKSDDAmy90vsGema2Is_o,15458
71
71
  model_compression_toolkit/core/common/network_editors/__init__.py,sha256=vZmu55bYqiaOQs3AjfwWDXHmuKZcLHt-wm7uR5fPEqg,1307
72
72
  model_compression_toolkit/core/common/network_editors/actions.py,sha256=xHtZwtMTLvE1n4Lw_kMql3I-Bu8kINXQRj3ZRk0ppIg,17948
73
73
  model_compression_toolkit/core/common/network_editors/edit_network.py,sha256=kT5mxThJRpuv1MXR53jOKAjiO6cFKI3aujYqwI9EKHo,1811
@@ -145,7 +145,7 @@ model_compression_toolkit/core/keras/keras_implementation.py,sha256=YJ5EAUbbcryj
145
145
  model_compression_toolkit/core/keras/keras_model_validation.py,sha256=IzlEriDsmTjZeWYIluIOeXNVlhTmaO-UGuFE7PBRG1o,1717
146
146
  model_compression_toolkit/core/keras/keras_node_prior_info.py,sha256=WGuyYfm3C2FScrI5WO-xPVeK_A9hJ-PyM4rb-CotsIs,3936
147
147
  model_compression_toolkit/core/keras/kpi_data_facade.py,sha256=xLYtLs-j0WiRtUFF_U7P3eH8xLR3606EabwtdklaGeg,8881
148
- model_compression_toolkit/core/keras/quantization_facade.py,sha256=D4ey6IAvDrrNs-90mOajJBzplzkDPqmNckOZ22Ukph4,18051
148
+ model_compression_toolkit/core/keras/quantization_facade.py,sha256=dluWNMmbUICeASvsSbTfPzmkqgVfuHzhwFyLIT_gLRQ,18032
149
149
  model_compression_toolkit/core/keras/tf_tensor_numpy.py,sha256=BauH-Ssoiuv5wu81fk1bm9HO_0yR0oTwKyLkXgE3qSE,2022
150
150
  model_compression_toolkit/core/keras/back2framework/__init__.py,sha256=rhIiXg_nBgUZ-baE3M6SzCuQbcnq4iebY1jtJBvKHOM,808
151
151
  model_compression_toolkit/core/keras/back2framework/factory_model_builder.py,sha256=MtfhDEou_7OWwFBgJIICxSUB0Flb18P1yM6nST5S9Xs,2231
@@ -199,7 +199,7 @@ model_compression_toolkit/core/keras/statistics_correction/__init__.py,sha256=9H
199
199
  model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py,sha256=ZUUUxCkGUKRsdud0kHeosEhDg5MejDOa1NirRHbQYes,3055
200
200
  model_compression_toolkit/core/keras/visualization/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7VclGVOQ63x2p6mAAuba4,698
201
201
  model_compression_toolkit/core/pytorch/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
202
- model_compression_toolkit/core/pytorch/constants.py,sha256=joqCvu_kmF1SrZNgxejNy3Dv1G1aeJalVywrjAy5PD0,2442
202
+ model_compression_toolkit/core/pytorch/constants.py,sha256=XC9uCV7zbkP47m0YoLla6VJCL_DBTmMqGe7iXLa-qes,2626
203
203
  model_compression_toolkit/core/pytorch/default_framework_info.py,sha256=lBNRSrY8LDJA2Oxk4qiVolUdeNIiDyUK1ek7-2ykc7Y,4219
204
204
  model_compression_toolkit/core/pytorch/kpi_data_facade.py,sha256=NtL-bRGF_0UWDUBlZt8mgdmvARYRwHSyWp3t3Y8Kq-U,8782
205
205
  model_compression_toolkit/core/pytorch/pytorch_implementation.py,sha256=3ENp_Hr5KN5OiLBqMzsag4AlAfZDywEDVOKCrKNkH-I,25965
@@ -224,10 +224,10 @@ model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchno
224
224
  model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py,sha256=JDWOaNwYrZG0zTwd3HwoZUM3tKu7zPbzLOrqNQsu8xA,2162
225
225
  model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py,sha256=4mnowFmfDQjKlhHqsNto1iL4WbHyh4cM3Lf67Z-Cnzc,4804
226
226
  model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py,sha256=s_rri67qddkUxsVyQqj5jPqQydHL9N3efMvhmiqV4-g,5809
227
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py,sha256=Npz9RNwjEbNKHGbwBjDYqgSdl9FqnRHyQhVnAq3Aq-s,38525
227
+ model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py,sha256=b6T6rA_6RZtaCtmcXn7tC04Bs7XvzkSE68Xq-YNZOP8,38365
228
228
  model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/permute_call_method.py,sha256=EMCviyFyJFLEKuAUz3rZHLfB9MAU1kywSBL2XQNzLlg,1953
229
229
  model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py,sha256=Gxy_WuFyRhtmw6bSeuhoGwriiu7vcL4bvOTAMWT5SNs,5563
230
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py,sha256=iMW_85CB-f6JAymCL3vhSEEyCCiq6sa0335Y0pCIn6c,3492
230
+ model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py,sha256=u0bcymbcrO7XVRqcBwUqqNDq7PIjlndLp2OS8v0jxNo,4153
231
231
  model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py,sha256=GCMXOWnXFQhgGoBUkdiZu08x33E4iJYq8mLlWwxt4vw,2911
232
232
  model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py,sha256=XFtU9yuBmoZlX0f0mS6otMPWMk-RcWs94XdvvTNhW8Y,3303
233
233
  model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py,sha256=EgBe7ij5LDD2i6yt320aeMl4AoJIAyOeKYg4MOsq7es,9833
@@ -300,8 +300,8 @@ model_compression_toolkit/core/tpc_models/tflite_tpc/v1/__init__.py,sha256=t4JKs
300
300
  model_compression_toolkit/core/tpc_models/tflite_tpc/v1/tp_model.py,sha256=3u8pxc6lgbOb_nT6BSSfoOML5LFdE0abXS0s83AZAHI,7770
301
301
  model_compression_toolkit/core/tpc_models/tflite_tpc/v1/tpc_keras.py,sha256=kEpZOfjUA5-qzdhuw4DSMNlMJdYjN05rWMQfo5rtM3I,6066
302
302
  model_compression_toolkit/core/tpc_models/tflite_tpc/v1/tpc_pytorch.py,sha256=TN51UfSK6lOQ_JRoXNzzglGpetrcYltLeg98Tz9mpC8,4937
303
- model_compression_toolkit/exporter/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
304
- model_compression_toolkit/exporter/model_exporter/__init__.py,sha256=zRTlsEE8qlPaSFbnvGWVrXtJw_r3eYCBvSEdLk4ySkA,1082
303
+ model_compression_toolkit/exporter/__init__.py,sha256=Ic52ZgHIPuAWsgWX7LuzA9TkRPo_flpQizJAc39ttrc,1083
304
+ model_compression_toolkit/exporter/model_exporter/__init__.py,sha256=9HIBmj8ROdCA-yvkpA8EcN6RHJe_2vEpLLW_gxOJtak,698
305
305
  model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
306
306
  model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py,sha256=iwZZEs_1AwLvClepYG38P_oTrQrA2YXxFTQUNMVoyS4,2022
307
307
  model_compression_toolkit/exporter/model_exporter/keras/__init__.py,sha256=uZ2RigbY9O2PJ0Il8wPpS_s7frgg9WUGd_SHeKGyl1A,699
@@ -314,54 +314,60 @@ model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pyto
314
314
  model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py,sha256=T3K43YiDGa2g14SbZeDRqhr-3kFYVOyR6EZoSlIlLyc,2892
315
315
  model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py,sha256=iB6o2arjkQIiFcXl8oVoTowmEu0WXBbPjrcZQLjVWbQ,3925
316
316
  model_compression_toolkit/exporter/model_exporter/tflite/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
317
- model_compression_toolkit/exporter/model_exporter/tflite/fakely_quant_tflite_exporter.py,sha256=-44sgg-NM89xy8ARjG9uvG-6-uZUQzLSR8A6XaJdT0g,3006
317
+ model_compression_toolkit/exporter/model_exporter/tflite/fakely_quant_tflite_exporter.py,sha256=59d__yulPJhD2_dIT2EiFWlw5qMfCLq0NwZarXOOHFU,3074
318
318
  model_compression_toolkit/exporter/model_exporter/tflite/int8_tflite_exporter.py,sha256=K_eHtLZ9jTUvEiLDL4uX-X_Q6HLwaIB--uUdNMvJFsE,8169
319
319
  model_compression_toolkit/exporter/model_exporter/tflite/tflite_export_facade.py,sha256=MFBd6DwO92Ub58uEXKcw6VQbtr40StokXjpRUA4XV-U,3338
320
- model_compression_toolkit/exporter/model_wrapper/__init__.py,sha256=GJg_o-jj48QDlDDX0eEVwWuD33iEx-CaBU4JFbcEYsQ,1314
320
+ model_compression_toolkit/exporter/model_wrapper/__init__.py,sha256=7CF2zvpTrIEm8qnbuHnLZyTZkwBBxV24V8QA0oxGbh0,1187
321
321
  model_compression_toolkit/exporter/model_wrapper/keras/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
322
- model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py,sha256=acSCl7AuPsg0FY4Y2dqOi8x5BEpfYpAEHDEqlaatELY,3335
322
+ model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py,sha256=QVSVD3ayI431id5e80BgBDELpUW5rYsHagR6tC2BcGo,3882
323
323
  model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
324
- model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py,sha256=21ugFfg8kwO4Y9nbphJDqvh-pJyqUz0AG3emdGf38co,2494
325
- model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py,sha256=yPHhu_hVermx33ShHy7iZnFY_NFiAyZRL1u8ig3BDYw,6853
324
+ model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py,sha256=qhs7renKRKc5S-ad2ctiF-GeIhkpFIlDsL3Co0i9ViI,2988
325
+ model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py,sha256=84jpx_10f3DYFU4svDOHReykrYVj-MXcthxIQW4mwTk,8596
326
326
  model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizers.py,sha256=n7VTA-a9TrLFpfdYAqrAKj6PGlAyLq8-xdwnMMpX71k,2077
327
327
  model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
328
- model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py,sha256=qydJE_218J_8k2bzERbE4l305_MhEykrAZbEUtj6lKI,1571
328
+ model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py,sha256=-DbO4K3F7awXzCiIKi6vVWgiRqPBe3CaPiVGGfn4iQ4,2036
329
329
  model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
330
- model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py,sha256=ijnRigugvu7APEsNxS0CMX_BAxv9Vn9m9M0sEecG9D4,2041
331
- model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py,sha256=DLD4ao29pBErnnp-ZT0x9JlUxb_X3IQ_B0cqvrhd6MA,6239
330
+ model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py,sha256=XrAqmyUuC1EtoyR010Quy9qre-60RsvnTbqK7dhwU38,2496
331
+ model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py,sha256=BlbojVGO4QG0_f7kjdGkGGESQzsXX0AgmJAf6KHHbrU,7674
332
332
  model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizers.py,sha256=hinP-wtyxZyoW860GdJAk6M3iPjmwwPXQTUxd56yhq8,2086
333
- model_compression_toolkit/gptq/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
333
+ model_compression_toolkit/gptq/__init__.py,sha256=tPxlcYl8JwK-EWVTy5IVgGOaUJsnG-6PnOKeYNeGJjQ,1250
334
334
  model_compression_toolkit/gptq/runner.py,sha256=rB11-U68ZcR_IosgMYNegwyO5mGRjMMgL9OdHEIZOGU,5484
335
335
  model_compression_toolkit/gptq/common/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
336
- model_compression_toolkit/gptq/common/gptq_config.py,sha256=vEnJ379dUMzV2Ibp_9k2b0LLe11YHCPmX4QjvbYxoqw,11798
337
- model_compression_toolkit/gptq/common/gptq_constants.py,sha256=dTSq70A7ySQaQr2OkDNNe4VidXPwS18CVrYkTEGDfh4,812
336
+ model_compression_toolkit/gptq/common/gptq_config.py,sha256=HLiDHAJUCrp_3PxG4qwLKriYLOQtJSKvX1YdGqjweKo,9690
337
+ model_compression_toolkit/gptq/common/gptq_constants.py,sha256=QSm6laLkIV0LYmU0BLtmKp3Fi3SqDfbncFQWOGA1cGU,611
338
338
  model_compression_toolkit/gptq/common/gptq_graph.py,sha256=tXWLUtP52OLgC3WO9M9kaG2OYVDE9imY9L4ef16lAqY,2826
339
- model_compression_toolkit/gptq/common/gptq_quantizer_config.py,sha256=ACCgISnZ9ir5Qa3t7TdSWTxwbm27o_gt0K-HMLUQqkY,3389
340
- model_compression_toolkit/gptq/common/gptq_training.py,sha256=DCEVjHXoLPEVzmnGDvzQvPaTSDfog1O1Hq_0Ziuusc4,15250
339
+ model_compression_toolkit/gptq/common/gptq_training.py,sha256=5gTLlvP5NBT-zY7QxW5LmarMFUaHCTL01KZdbvl_-lA,15106
341
340
  model_compression_toolkit/gptq/keras/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
342
341
  model_compression_toolkit/gptq/keras/gptq_loss.py,sha256=rbRkF15MYd6nq4G49kcjb_dPTa-XNq9cTkrb93mXawo,6241
343
- model_compression_toolkit/gptq/keras/gptq_training.py,sha256=r61e6-fkfCOqZ5q8GeR20xiHjqzFg_x5VfZNjcGz1DM,16554
344
- model_compression_toolkit/gptq/keras/graph_info.py,sha256=FyHIMTMZCP0hqrSTvs63_q6QP7bLGZfRdP7OpNwvXCs,5174
345
- model_compression_toolkit/gptq/keras/quantization_facade.py,sha256=DSagaoLEljHZKAeKgmomuYifhjjDe5VJTVGAPtvffR0,14341
342
+ model_compression_toolkit/gptq/keras/gptq_training.py,sha256=d0M2RkiRvKROZlc3GCr4_SHRSWXNc3RU081K1GF4a0M,15891
343
+ model_compression_toolkit/gptq/keras/graph_info.py,sha256=nYKL3hrd6L3EiyxejrE1xJUeppxSmflHO4nt2fkE-aY,4399
344
+ model_compression_toolkit/gptq/keras/quantization_facade.py,sha256=tNL3_0NdsFUqtCQ9_Qjy4_hYfikXmZZGXdytl-lz8tQ,14112
346
345
  model_compression_toolkit/gptq/keras/quantizer/__init__.py,sha256=H76G9W-tYSpHBnqRRgIUoWOjhjKN7XN00njHgjBT_JA,872
347
- model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py,sha256=Ch1Mk8M_U9L5tBArFFeg_7g_3iTSr7n65LfQOSAZQ1E,4827
346
+ model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py,sha256=bEIgXk17VzyxDGHBqHVg_Ox9PV8I7zd_39Qkt90XzbQ,4782
348
347
  model_compression_toolkit/gptq/keras/quantizer/quant_utils.py,sha256=XT1idm84wl4GDGoqGI8L5XH-H9OjIlhTjGCjY-ylYQw,4604
349
- model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py,sha256=8fbdLLJlMm8mf8MDjH0BV02G_1HA1gxuS-2qENVJ58U,4391
348
+ model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py,sha256=Ngqk6rAfS5tWwF2DkpVE_u69Q1Kf15aaSuZ37bwOpBs,4392
349
+ model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py,sha256=iKzHnxl2ZSEp09oatfJVoiDuu6Q_iN36mOxQzDr1cy8,2087
350
350
  model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
351
- model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py,sha256=dgtw95nZHMd0VzDN1MKM7TBjxcQZMEzncAFu42C9lAY,15616
351
+ model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py,sha256=HqvDvrQKRYvjvjEl4p4a_r0lrMSkO3gAUM1KV4EV5Js,3976
352
+ model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py,sha256=_r2ijqPseQNP_NtrCK4k92No_UtnFqrzi0OR2Np9vTM,11914
352
353
  model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
353
- model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=XZQkZ8DDxEqt57NP_g096ijBOG--MhiR8S1snCFPA2o,9364
354
+ model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=2xjOHUFsy8A0kgmhw5AiXnofN8eSb_gzyLnrGT50Hm8,8539
354
355
  model_compression_toolkit/gptq/pytorch/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
355
356
  model_compression_toolkit/gptq/pytorch/gptq_loss.py,sha256=kDuWw-6zh17wZpYWh4Xa94rpoodf82DksgjQCnL7nBc,2719
356
- model_compression_toolkit/gptq/pytorch/gptq_training.py,sha256=y2R-iIUmfMqe5UmeehGPuK2AXcyZbXMcMZGoIm3Rb28,13245
357
- model_compression_toolkit/gptq/pytorch/graph_info.py,sha256=MTDa5_FvUf0CjhXNkUeMsNKlb31i0IYdivcSXrlcPec,3441
358
- model_compression_toolkit/gptq/pytorch/quantization_facade.py,sha256=lOEx4Gkdqk4EcpG98m20nEINwe56Kyr1EjfyY55Hfmw,13255
359
- model_compression_toolkit/gptq/pytorch/quantizer/__init__.py,sha256=Rg1UCrVB9y6OaqtqsvNpd6uYGeXriYKZbpRXWaQD1Ko,780
360
- model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py,sha256=VgAyLxeOh24p9tn8H-VNlfZzIM6VMxnarYfIzNWjXsE,4884
361
- model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py,sha256=OxEudXw8liJc45ZnloQtu3P1UWw2C7d0S_VIx6s9UQw,3579
362
- model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py,sha256=NsBKD8gvccZAvSUj98j7ISJc-IoXo9nIB4jJLav34d8,4220
357
+ model_compression_toolkit/gptq/pytorch/gptq_training.py,sha256=EaPX7MfaNlzFHogrxN14-G9zGPyt8Bpgya7O0WaUkgk,13516
358
+ model_compression_toolkit/gptq/pytorch/graph_info.py,sha256=Sphpr5wKADgwZ-sLxNqMAcsEiP_jaFEL7q2-zcrtUx8,3791
359
+ model_compression_toolkit/gptq/pytorch/quantization_facade.py,sha256=LPYUDDY393M9sk0iWiWXsIGjuGYPYiUPqiGnfCzmtrY,12468
360
+ model_compression_toolkit/gptq/pytorch/quantizer/__init__.py,sha256=ZHNHo1yzye44m9_ht4UUZfTpK01RiVR3Tr74-vtnOGI,968
361
+ model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py,sha256=IArGYcTb_c7aDnZOOlLGCuuZcV4A0DaxoYO3i-fbVNM,4291
362
+ model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py,sha256=rKRb6TgNko_NhZnwUA2xzwvXAM5qj_mWNjhy_h2SwI8,3888
363
+ model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py,sha256=qprTfTkqqcAijNKsHwKsOlju75Ihu_PDEJxny_A5AD0,4221
364
+ model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py,sha256=9owTzSu_xz29dsjONB-AYXuCZoPo_4nqxTk3yH18a0g,2089
365
+ model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py,sha256=lNJ29DYxaLUPDstRDA1PGI5r9Fulq_hvrZMlhst1Z5g,697
366
+ model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py,sha256=EGs3GfC73yl7E1xlDd_sHS7Vof1Td7RAq_Ny7hAbZZA,4178
367
+ model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py,sha256=47y7GAJc-bXiKnmoCfpRa71stTVMcurnjSfBKfLR5gI,11761
368
+ model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py,sha256=5CmooZOZhIt9pTsgYUOSrJAoEz6YxOr0QtVXP3pZfuw,9191
363
369
  model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
364
- model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=vW8lRfYdxVg7Afki4wD2rFHk_MA28I6DcEWiFX9JHhw,9532
370
+ model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=T1z8KfcwwnWmq6Rh0Ncyx1JiaeTxKBQAkxVsI4yD4J4,8932
365
371
  model_compression_toolkit/ptq/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
366
372
  model_compression_toolkit/ptq/runner.py,sha256=_c1dSjlPPpsx59Vbg1buhG9bZq__OORz1VlPkwjJzoc,2552
367
373
  model_compression_toolkit/ptq/keras/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
@@ -379,8 +385,8 @@ model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py,sha256
379
385
  model_compression_toolkit/qat/keras/quantizer/quant_utils.py,sha256=rS2z_ozyjzQ07MMczaAFNZ7K6RKwAnBOKyRac4UvF44,2123
380
386
  model_compression_toolkit/qat/keras/quantizer/quantization_builder.py,sha256=ESYtJGA6SGT0103Q1r33VGTu60V05pux7fK8JOnRau0,4229
381
387
  model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
382
- model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=SP-EoQI5H2vnsmA8_T8R5kSv9tdNAgskXps_VqpS6A8,13820
383
- model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py,sha256=pClxv-mjowimJ5jbV3Gtsv8pJx0arQ9nrtTLciQSqu4,11089
388
+ model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=Cc1ohHnWOlWnjllAxEA2ZrLZqPiXt_oH6YfQWkEhPhI,13599
389
+ model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py,sha256=XGvg0OPQdi1WvYZJ7GA9VUBKGTWIt_VKLzdPoXfw7xQ,10939
384
390
  model_compression_toolkit/qat/pytorch/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
385
391
  model_compression_toolkit/qat/pytorch/quantization_facade.py,sha256=8iH6ENPyVbPFzpJS2FaXPvjZ1VzkSgGWsb-EnHywjRA,11875
386
392
  model_compression_toolkit/qat/pytorch/quantizer/__init__.py,sha256=R4vwVcbg6QprCTNzibyF9PtbKKKBsfu9ffypKDNscJQ,859
@@ -388,25 +394,24 @@ model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py,sh
388
394
  model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py,sha256=V-oF596RdXW8W8nF8g57bEGsvB8ORvRIFoyrXBwyaWc,4086
389
395
  model_compression_toolkit/qat/pytorch/quantizer/quantizer_utils.py,sha256=5XswoF-5aaFangkHssWKAQTsk6lf_zzndzfCsBWBVMs,5004
390
396
  model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
391
- model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=ZEcs3IVngv_4TV7Zf6XVs5nD3q3y_FtDBkfBmtQxYu4,9735
392
- model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py,sha256=yU1sijubpt6wXPqgWEB7yr24FYy-tg51Ly4vsN0ROIc,8673
397
+ model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=qTVB979OYn7yPGfD319h_E4IgoP_o52EdWw3SSiiFis,9766
398
+ model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py,sha256=_9gD5F-lAh4w7BkkiH4EO8VU_40_ySClXjLXf-7zPek,8810
393
399
  model_compression_toolkit/quantizers_infrastructure/__init__.py,sha256=wKRMA6H4aTPnQlvmx_gbe4d2KMZ-40QIGDJCZa-2VCU,1615
394
400
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
395
401
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
396
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/activation_lut_pot_inferable_quantizer.py,sha256=GH14y6rV3OKbfJTdv8NPee4tHwWPuIbuhwejkchMnn8,4599
397
402
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/base_inferable_quantizer.py,sha256=Zo2N4lAo1Lz91MSor0s-dng3ueFP6UpZlt9CcoCu5SM,3141
398
403
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/constants.py,sha256=tYmi5_PpsSJ-LyOcMslU0FEnG_c31VXyujc1_R7-EVk,1664
399
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_all_subclasses.py,sha256=6VyiXsp27SpERafPrWbTta0M9BQfH2MJgK44XV8Y6UY,1222
400
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_quantizers.py,sha256=xWd4gthIF_HXDbZvV1FWYyx02g0PGk0LU4-wOWcPSws,2968
404
+ model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_all_subclasses.py,sha256=6KqemyFcM4a6KoCZ-6dm46iIZ_kusPnj5crH8RTAvuo,1213
405
+ model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_quantizers.py,sha256=8GCjwp665nXcUZEv2RBeDT2MMZ1YvvORaxAkXooWJy0,2967
401
406
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/quant_utils.py,sha256=SDzQuh3q9ugSD80Z9IuaWOPskH5VsRRyuBOeIeWJDdQ,2153
402
407
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
403
408
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/load_model.py,sha256=H0-I_-4ChTLNk7gwcnft5YV-q9Cg1wue0OUuZ1W4fek,4279
404
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantize_wrapper.py,sha256=jkQkbHvX8KfqtR_iIzDWVzKMA6sePUuzshoK-wVdQ4k,14240
409
+ model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantize_wrapper.py,sha256=uE05T6VbLScwpyZoAXoJsCC_aYauv0LjyxSm89yZu84,14646
405
410
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizer_utils.py,sha256=Z0SoHkTl5dC0y3hrcj0bC5fSa-oU7IYuGN5sBTb4THA,3440
406
411
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/validation_functions.py,sha256=0heI5awgysKl9-XhIWxPiCbC_IBC7mANzBP95Tu1G9o,2942
407
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/__init__.py,sha256=Be0s33EOWPYCyyk1VsqU6FY6yeVPFdD5WH51CEPbODw,2112
412
+ model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/__init__.py,sha256=Xg2t9VH61BIvqYtJg6GBKkAnDmFe1u4K0i0r2OBAK-I,2742
408
413
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/base_keras_inferable_quantizer.py,sha256=3ILK53o5RogFy1ZI2gyaJk6TjH2HV77j5uSqkSc_8W0,2228
409
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/constants.py,sha256=fxsgfxqcDHMloaPOaJU3FYjV3o_XS-yM7AeisIQpvPg,944
414
+ model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/constants.py,sha256=ItUEs8c7LVxBPMopLD5BO2Ry9DIxFIrk_M7AdSEyBFg,979
410
415
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
411
416
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py,sha256=aUiK_aOy8YImLyXT6_g3BIQ6Lt9FdPPPYjj1R93qsvc,7485
412
417
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py,sha256=z-t_WU9OGURVpG3e6uBQ8Mx4JPhOxqS0Tpg_ioa0MhA,3191
@@ -419,15 +424,16 @@ model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/ker
419
424
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py,sha256=pfoZCZNKsovKq3XiSim5ErSBid3ihK9WORaZh7i8wIg,4376
420
425
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py,sha256=PWl58Dpoxowz2HFg7IpT74A5o3_GjFKOsmonK2Nk0Uo,8664
421
426
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
422
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantize_wrapper.py,sha256=4xRw5ySkhVPjLvKqQ5zYQPnh2XYJCx4YsBieU_iycno,10937
427
+ model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantize_wrapper.py,sha256=spIpfw06VjoHH4AckhvMDLfOQRg3XyeRN9BJzbW-frk,11346
423
428
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizer_utils.py,sha256=2KN976TZTObiaEhoUL0-Rpceui-Nifw5LdKLdU7SRY0,5929
424
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/__init__.py,sha256=g6N2eOWr7UT56X4OoR5G4h0YF1hUqM-u9_xXLdlMQxc,2166
429
+ model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/__init__.py,sha256=-hiXng1pF3wjI-YYYZqZ-NZ1TStGuec4bci3jxvYVY0,2820
425
430
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_lut_symmetric_inferable_quantizer.py,sha256=knIkFr8xnWFyBu8VT129eH8_Mj0Osb2jIZx32zQjgLY,4871
426
431
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_pytorch_inferable_quantizer.py,sha256=jZ8q-vzgsUCXv32OYpZySlCC5GeA07x6NVO5q5K4TsE,1919
427
432
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_symmetric_inferable_quantizer.py,sha256=gIKmFouy2KAJt7cbRZ1lmSitPtFNNkCgqgLJrnn9gRQ,3070
428
433
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_uniform_inferable_quantizer.py,sha256=G6ReERM4j9sGuRKuOPXXo-H1WMEMX7_OgQzxAZP0aaE,2501
429
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/constants.py,sha256=2K_ABBFM7BjXDLPwkhzvA3QxdsV_pTaBAowTW5wQTeo,920
434
+ model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/constants.py,sha256=9bxo6snEJkRv5XWmhBGsV6g8LCe_1NgAE5ufIq2ewYU,1007
430
435
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/__init__.py,sha256=lNJ29DYxaLUPDstRDA1PGI5r9Fulq_hvrZMlhst1Z5g,697
436
+ model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py,sha256=GH14y6rV3OKbfJTdv8NPee4tHwWPuIbuhwejkchMnn8,4599
431
437
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py,sha256=mu17Qv9I4bzd3IAac7zFg-Goy6uQaNZlEplVRmZBArY,2928
432
438
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py,sha256=qV2F9wVUAAT0rn9STDN5gbQPbwh9EsAH8pQog-V_scg,3631
433
439
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py,sha256=qSAo-pAPWnY_-DO-84-xJYUKrc9kih0AUo3KCyre0BY,4855
@@ -439,19 +445,19 @@ model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pyt
439
445
  model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py,sha256=fbXuZBlVCGjNKp3hlIp3W9NM-dtzP75c19wkvjbNCMo,5394
440
446
  model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
441
447
  model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
442
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/base_trainable_quantizer.py,sha256=GMKp8NG_67lIHz1SyrteKuZK4wGMbNHQ5z8MrMVP01Y,6408
448
+ model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/base_trainable_quantizer.py,sha256=qnGMQfwttYetjeJJcdKsXJKpcb75Sy_HTS13Oorfgvo,7775
443
449
  model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizer_config.py,sha256=uQPnn1tkD96EmTcg-zAnxKH5XzY5y9zYb6tJ9ZTm_oI,6333
444
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizers.py,sha256=JMlDcOxuTgd9jVrtESq-dzBX24gbHs0PmYOzTkW88Sw,3789
450
+ model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizers.py,sha256=RRtxOjiB1gFMiqYMlUC7hqZMdJGN5FFMBj7-sD2aWJ8,3831
445
451
  model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/quant_utils.py,sha256=zdiew1jwR7tUKm9XWlHnAPxIZsAdKqbzzC2vH02j5wA,1505
446
452
  model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/trainable_quantizer_config.py,sha256=WBTYdDQtXkscjO8b7leunBVSGG8JC__DhhpINx7lCEA,4774
447
453
  model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
448
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/base_keras_quantizer.py,sha256=2xAPiOCb2kXb6w8DML0fH17Yk425yAVmbvC6lTs9IUE,3411
454
+ model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/base_keras_quantizer.py,sha256=VMwT-UDKKjZQtvEj7xEUUKAuGvzXr3ak05fPy64nnsw,4307
449
455
  model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/config_serialization.py,sha256=Vj-59ImTj0YEGI7MgRgwqJWIeGcIlrdLzPDiedwHV_E,4062
450
456
  model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/quantizer_utils.py,sha256=MVwXNymmFRB2NXIBx4e2mdJ1RfoHxRPYRgjb1MQP5kY,1797
451
457
  model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
452
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/base_pytorch_quantizer.py,sha256=YmnoDMJUYkvkDV05aPVfr3Y73mR11CUz9YWSSAJ2mcI,2266
453
- mct_nightly-1.8.0.27022023.post430.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
454
- mct_nightly-1.8.0.27022023.post430.dist-info/METADATA,sha256=J1p_ZYOso0ZR-r24frT6XsQOrTcKdhqpUyFwXALJzSg,10975
455
- mct_nightly-1.8.0.27022023.post430.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
456
- mct_nightly-1.8.0.27022023.post430.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
457
- mct_nightly-1.8.0.27022023.post430.dist-info/RECORD,,
458
+ model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/base_pytorch_quantizer.py,sha256=V16tvEpBz5-Pfl0h8dkPs4F32DWXyYdEn6HnQHYKCfs,3161
459
+ mct_nightly-1.8.0.27032023.post403.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
460
+ mct_nightly-1.8.0.27032023.post403.dist-info/METADATA,sha256=vdvMp40Bki1dv21VBJ5-kg-nCldb-DsNkQenezY1bCw,10972
461
+ mct_nightly-1.8.0.27032023.post403.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
462
+ mct_nightly-1.8.0.27032023.post403.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
463
+ mct_nightly-1.8.0.27032023.post403.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.38.4)
2
+ Generator: bdist_wheel (0.40.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -14,8 +14,6 @@
14
14
  # ==============================================================================
15
15
 
16
16
  from model_compression_toolkit.core.common.quantization.debug_config import DebugConfig
17
- from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, RoundingType, GradientPTQConfigV2
18
- from model_compression_toolkit.gptq.common.gptq_quantizer_config import GPTQQuantizerConfig, SoftQuantizerConfig
19
17
  from model_compression_toolkit.core.common.quantization import quantization_config
20
18
  from model_compression_toolkit.core.common.mixed_precision import mixed_precision_quantization_config
21
19
  from model_compression_toolkit.core.common.quantization.quantization_config import QuantizationConfig, \
@@ -36,25 +34,21 @@ from model_compression_toolkit.core.common import network_editors as network_edi
36
34
  from model_compression_toolkit.core.keras.quantization_facade import keras_post_training_quantization, \
37
35
  keras_post_training_quantization_mixed_precision
38
36
  from model_compression_toolkit.ptq.keras.quantization_facade import keras_post_training_quantization_experimental
39
- from model_compression_toolkit.gptq.keras.quantization_facade import \
40
- keras_gradient_post_training_quantization_experimental
41
- from model_compression_toolkit.gptq.keras.quantization_facade import get_keras_gptq_config
42
- from model_compression_toolkit.qat.keras.quantization_facade import keras_quantization_aware_training_init, \
43
- keras_quantization_aware_training_finalize
44
- from model_compression_toolkit.qat.pytorch.quantization_facade import pytorch_quantization_aware_training_init, \
45
- pytorch_quantization_aware_training_finalize
46
- from model_compression_toolkit.core.pytorch.quantization_facade import pytorch_post_training_quantization, \
47
- pytorch_post_training_quantization_mixed_precision
37
+ from model_compression_toolkit.qat.keras.quantization_facade import keras_quantization_aware_training_init, keras_quantization_aware_training_finalize
38
+ from model_compression_toolkit.qat.pytorch.quantization_facade import pytorch_quantization_aware_training_init, pytorch_quantization_aware_training_finalize
39
+ from model_compression_toolkit.core.pytorch.quantization_facade import pytorch_post_training_quantization, pytorch_post_training_quantization_mixed_precision
48
40
  from model_compression_toolkit.ptq.pytorch.quantization_facade import pytorch_post_training_quantization_experimental
49
- from model_compression_toolkit.gptq.pytorch.quantization_facade import \
50
- pytorch_gradient_post_training_quantization_experimental
51
- from model_compression_toolkit.gptq.pytorch.quantization_facade import get_pytorch_gptq_config
52
41
 
53
42
  from model_compression_toolkit.core.keras.kpi_data_facade import keras_kpi_data, keras_kpi_data_experimental
54
43
  from model_compression_toolkit.core.pytorch.kpi_data_facade import pytorch_kpi_data, pytorch_kpi_data_experimental
55
44
 
56
45
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.keras.load_model import keras_load_quantized_model
57
46
 
58
- from model_compression_toolkit.exporter.model_exporter import tflite_export_model, TFLiteExportMode, keras_export_model, KerasExportMode, pytorch_export_model, PyTorchExportMode
47
+
48
+ from model_compression_toolkit import exporter
49
+
50
+ from model_compression_toolkit import gptq
51
+ from model_compression_toolkit.gptq import GradientPTQConfig
52
+
59
53
 
60
54
  __version__ = "1.8.0"
@@ -17,7 +17,6 @@
17
17
  import logging
18
18
  import os
19
19
  from datetime import datetime
20
- from os import path
21
20
  from pathlib import Path
22
21
 
23
22
  LOGGER_NAME = 'Constrained Model Optimization'
@@ -43,7 +42,7 @@ class Logger:
43
42
 
44
43
  """
45
44
 
46
- if not path.exists(log_path):
45
+ if not os.path.exists(log_path):
47
46
  Path(log_path).mkdir(parents=True, exist_ok=True)
48
47
 
49
48
  @staticmethod
@@ -93,6 +92,15 @@ class Logger:
93
92
 
94
93
  print(f'log file is in {log_name}')
95
94
 
95
+ @staticmethod
96
+ def shutdown():
97
+ """
98
+ An orderly command to shutdown by flushing and closing all logging handlers.
99
+
100
+ """
101
+ Logger.LOG_PATH = None
102
+ logging.shutdown()
103
+
96
104
  ########################################
97
105
  # Delegating methods to wrapped logger
98
106
  ########################################
@@ -22,6 +22,8 @@ from model_compression_toolkit.core.common import Logger
22
22
  from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI, KPITarget
23
23
  from model_compression_toolkit.core.common.mixed_precision.mixed_precision_search_manager import MixedPrecisionSearchManager
24
24
 
25
+ # Limit ILP solver runtime in seconds
26
+ SOLVER_TIME_LIMIT = 60
25
27
 
26
28
  def mp_integer_programming_search(search_manager: MixedPrecisionSearchManager,
27
29
  target_kpi: KPI = None) -> List[int]:
@@ -64,7 +66,10 @@ def mp_integer_programming_search(search_manager: MixedPrecisionSearchManager,
64
66
  target_kpi,
65
67
  search_manager)
66
68
 
67
- lp_problem.solve() # Try to solve the problem.
69
+ # Use default PULP solver. Limit runtime in seconds
70
+ solver = PULP_CBC_CMD(timeLimit=SOLVER_TIME_LIMIT)
71
+ lp_problem.solve(solver=solver) # Try to solve the problem.
72
+
68
73
  assert lp_problem.status == LpStatusOptimal, Logger.critical(
69
74
  "No solution was found during solving the LP problem")
70
75
  Logger.info(LpStatus[lp_problem.status])
@@ -19,7 +19,7 @@ from model_compression_toolkit.core import common
19
19
  from model_compression_toolkit.core.common import Logger
20
20
  from model_compression_toolkit.core.common.constants import TENSORFLOW
21
21
  from model_compression_toolkit.core.common.user_info import UserInformation
22
- from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, GradientPTQConfigV2
22
+ from model_compression_toolkit.gptq import GradientPTQConfig, GradientPTQConfigV2
23
23
  from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
24
24
  from model_compression_toolkit.core.common.framework_info import FrameworkInfo
25
25
  from model_compression_toolkit.core.common.network_editors.actions import EditRule
@@ -92,3 +92,7 @@ IN_PROJ_WEIGHT = 'in_proj_weight'
92
92
  IN_PROJ_BIAS = 'in_proj_bias'
93
93
  BIAS_K = 'bias_k'
94
94
  BIAS_V = 'bias_v'
95
+
96
+ # # Batch size value for 'reshape' and 'view' operators,
97
+ # # the value is -1 so the batch size is inferred from the length of the array and remaining dimensions.
98
+ BATCH_DIM_VALUE = -1
@@ -58,21 +58,15 @@ class MHAParams:
58
58
 
59
59
  # Check if Add Bias KV feature is Active
60
60
  if BIAS_K and BIAS_V in mha_node.weights.keys():
61
- if mha_node.weights[BIAS_K] and mha_node.weights[BIAS_V] is not None:
61
+ if mha_node.weights[BIAS_K] is not None and mha_node.weights[BIAS_V] is not None:
62
62
  Logger.error('Add BIAS_KV feature is Not Implemented') # pragma: no cover
63
63
 
64
64
  self.embed_dim = mha_node.framework_attr[EMBED_DIM]
65
65
  self.num_heads = mha_node.framework_attr[NUM_HEADS]
66
66
 
67
- if KEY_DIM in mha_node.framework_attr:
68
- self.kdim = mha_node.framework_attr[KEY_DIM]
69
- else:
70
- self.kdim = False
67
+ self.kdim = mha_node.framework_attr[KEY_DIM]
71
68
 
72
- if VALUE_DIM in mha_node.framework_attr:
73
- self.vdim = mha_node.framework_attr[VALUE_DIM]
74
- else:
75
- self.vdim = False
69
+ self.vdim = mha_node.framework_attr[VALUE_DIM]
76
70
 
77
71
  self.qdim = int(self.embed_dim / self.num_heads)
78
72
 
@@ -708,7 +702,7 @@ class MultiHeadAttentionDecomposition(common.BaseSubstitution):
708
702
  """
709
703
 
710
704
  if mha_node.reuse:
711
- raise Exception("MCT doesn't support reuse of MultiHeadAttention layer")
705
+ raise Exception("MCT doesn't support reuse of MultiHeadAttention layer") # pragma: no cover
712
706
  params = MHAParams(mha_node)
713
707
 
714
708
  # project
@@ -14,10 +14,13 @@
14
14
  # ==============================================================================
15
15
  from torch import reshape
16
16
  import torch
17
+
18
+ from model_compression_toolkit.core.common import Logger
17
19
  from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher
18
20
  from model_compression_toolkit.core import common
19
21
  from model_compression_toolkit.core.common.graph.base_graph import Graph
20
22
  from model_compression_toolkit.core.common.graph.base_node import BaseNode
23
+ from model_compression_toolkit.core.pytorch.constants import BATCH_DIM_VALUE
21
24
 
22
25
 
23
26
  class ReshapeWithStaticShapes(common.BaseSubstitution):
@@ -47,14 +50,25 @@ class ReshapeWithStaticShapes(common.BaseSubstitution):
47
50
  Returns:
48
51
  Graph after applying the substitution.
49
52
  """
53
+ # we want the batch size value to infer from the length of the array and remaining dimensions
54
+ if len(node.output_shape) == 1:
55
+ node.output_shape[0][0] = BATCH_DIM_VALUE
56
+ else:
57
+ Logger.error('Reshape or view nodes should have a single output shape') # pragma: no cover
58
+
50
59
  # configure the new static output shape attribute
51
60
  node.op_call_args = node.output_shape
52
61
 
53
62
  # modify the node input info
54
63
  node.input_shape = [node.input_shape[0]]
64
+
65
+ # the first input is the tensor to be reshaped, we want his batch size value to infer
66
+ # from the length of the array and remaining dimensions
67
+ node.input_shape[0][0] = BATCH_DIM_VALUE
68
+
55
69
  nodes_to_check = []
56
70
  for in_edge in graph.incoming_edges(node):
57
- if in_edge.sink_index > 0: # the first input is the tensor to be reshaped
71
+ if in_edge.sink_index > 0: # the first input is the tensor to be reshaped
58
72
  nodes_to_check.append(in_edge.source_node)
59
73
  graph.remove_edge(in_edge.source_node, node)
60
74
  for n in nodes_to_check:
@@ -80,4 +94,4 @@ def clean_graph_from_nodes_without_out_edges(graph: Graph,
80
94
  graph.remove_edge(in_edge.source_node, node)
81
95
  graph.remove_node(node)
82
96
  for n in nodes_to_check:
83
- clean_graph_from_nodes_without_out_edges(graph, n)
97
+ clean_graph_from_nodes_without_out_edges(graph, n)
@@ -12,3 +12,8 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
+
16
+ from model_compression_toolkit.exporter.model_exporter.keras.keras_export_facade import keras_export_model, KerasExportMode
17
+ from model_compression_toolkit.exporter.model_exporter.pytorch.pytorch_export_facade import PyTorchExportMode, pytorch_export_model
18
+ from model_compression_toolkit.exporter.model_exporter.tflite.tflite_export_facade import tflite_export_model, TFLiteExportMode
19
+
@@ -13,6 +13,3 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
 
16
- from model_compression_toolkit.exporter.model_exporter.keras.keras_export_facade import keras_export_model, KerasExportMode
17
- from model_compression_toolkit.exporter.model_exporter.pytorch.pytorch_export_facade import PyTorchExportMode, pytorch_export_model
18
- from model_compression_toolkit.exporter.model_exporter.tflite.tflite_export_facade import tflite_export_model, TFLiteExportMode
@@ -19,7 +19,7 @@ from typing import Callable
19
19
  import keras.models
20
20
  import tensorflow as tf
21
21
 
22
- from model_compression_toolkit import keras_load_quantized_model
22
+ from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.keras.load_model import keras_load_quantized_model
23
23
  from model_compression_toolkit.core.common import Logger
24
24
  from model_compression_toolkit.exporter.model_exporter.keras.fakely_quant_keras_exporter import FakelyQuantKerasExporter
25
25
 
@@ -13,12 +13,8 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
 
16
- from model_compression_toolkit.core.common.constants import FOUND_TF, FOUND_TORCH
16
+ from model_compression_toolkit.exporter.model_wrapper.keras.validate_layer import is_keras_layer_exportable
17
+ from model_compression_toolkit.exporter.model_wrapper.keras.builder.fully_quantized_model_builder import get_exportable_keras_model
17
18
 
18
- if FOUND_TF:
19
- from model_compression_toolkit.exporter.model_wrapper.keras.validate_layer import is_keras_layer_exportable
20
- from model_compression_toolkit.exporter.model_wrapper.keras.builder.fully_quantized_model_builder import get_exportable_keras_model
21
-
22
- if FOUND_TORCH:
23
- from model_compression_toolkit.exporter.model_wrapper.pytorch.validate_layer import is_pytorch_layer_exportable
24
- from model_compression_toolkit.exporter.model_wrapper.pytorch.builder.fully_quantized_model_builder import get_exportable_pytorch_model
19
+ from model_compression_toolkit.exporter.model_wrapper.pytorch.validate_layer import is_pytorch_layer_exportable
20
+ from model_compression_toolkit.exporter.model_wrapper.pytorch.builder.fully_quantized_model_builder import get_exportable_pytorch_model