mct-nightly 1.11.0.20240130.post401__py3-none-any.whl → 1.11.0.20240201.post434__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. {mct_nightly-1.11.0.20240130.post401.dist-info → mct_nightly-1.11.0.20240201.post434.dist-info}/METADATA +1 -1
  2. {mct_nightly-1.11.0.20240130.post401.dist-info → mct_nightly-1.11.0.20240201.post434.dist-info}/RECORD +44 -44
  3. model_compression_toolkit/__init__.py +1 -1
  4. model_compression_toolkit/constants.py +1 -0
  5. model_compression_toolkit/core/__init__.py +0 -1
  6. model_compression_toolkit/core/common/framework_info.py +1 -2
  7. model_compression_toolkit/core/common/graph/base_graph.py +1 -1
  8. model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +4 -2
  9. model_compression_toolkit/core/common/quantization/node_quantization_config.py +11 -6
  10. model_compression_toolkit/core/common/quantization/quantization_config.py +5 -13
  11. model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +1 -1
  12. model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +40 -17
  13. model_compression_toolkit/core/common/substitutions/scale_equalization.py +1 -1
  14. model_compression_toolkit/core/keras/default_framework_info.py +1 -1
  15. model_compression_toolkit/core/pytorch/default_framework_info.py +2 -2
  16. model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +14 -7
  17. model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +4 -1
  18. model_compression_toolkit/gptq/common/gptq_config.py +1 -3
  19. model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +1 -1
  20. model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +1 -1
  21. model_compression_toolkit/target_platform_capabilities/constants.py +18 -1
  22. model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +1 -1
  23. model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +175 -33
  24. model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +7 -3
  25. model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +11 -1
  26. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +64 -18
  27. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +24 -8
  28. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +15 -2
  29. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +64 -20
  30. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +24 -7
  31. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +15 -2
  32. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +60 -18
  33. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +24 -7
  34. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +15 -2
  35. model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +52 -11
  36. model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +25 -8
  37. model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +15 -2
  38. model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +52 -13
  39. model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +12 -1
  40. model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +12 -1
  41. {mct_nightly-1.11.0.20240130.post401.dist-info → mct_nightly-1.11.0.20240201.post434.dist-info}/LICENSE.md +0 -0
  42. {mct_nightly-1.11.0.20240130.post401.dist-info → mct_nightly-1.11.0.20240201.post434.dist-info}/WHEEL +0 -0
  43. {mct_nightly-1.11.0.20240130.post401.dist-info → mct_nightly-1.11.0.20240201.post434.dist-info}/top_level.txt +0 -0
  44. /model_compression_toolkit/{core/common/defaultdict.py → defaultdict.py} +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 1.11.0.20240130.post401
3
+ Version: 1.11.0.20240201.post434
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -1,7 +1,8 @@
1
- model_compression_toolkit/__init__.py,sha256=RpajIMANR3W_WilffQTpf-uaNmxRJQ4EekMVJFLUWRM,3709
2
- model_compression_toolkit/constants.py,sha256=DDHbvFQDK04xFaWQOB1PJtV17pk-vKighwwXSnGrGpk,4103
1
+ model_compression_toolkit/__init__.py,sha256=WXRBerevhP4sQ4NIHd-tcHcMEan7Qx_Wz1sTtF-HuQc,3697
2
+ model_compression_toolkit/constants.py,sha256=5aystyH4YQv3J9X3Xx3eQvnfFBpo1NDju8jwfqH4z2A,4131
3
+ model_compression_toolkit/defaultdict.py,sha256=LSc-sbZYXENMCw3U9F4GiXuv67IKpdn0Qm7Fr11jy-4,2277
3
4
  model_compression_toolkit/logger.py,sha256=b9DVktZ-LymFcRxv2aL_sdiE6S2sSrFGWltx6dgEuUY,4863
4
- model_compression_toolkit/core/__init__.py,sha256=qnBA6aaojI7RpEQZU2vXWiELHfVJf-MnAP-4T0tcFDY,2008
5
+ model_compression_toolkit/core/__init__.py,sha256=pRP8FZ_46vpd6MVrcec5O5wnoByQqRzq_tMzaDRiMmM,1934
5
6
  model_compression_toolkit/core/analyzer.py,sha256=dbsD61pakp_9JXNyAScLdtJvcXny9jr_cMbET0Bd3Sg,2975
6
7
  model_compression_toolkit/core/exporter.py,sha256=U_-ea-zYHsnIt2ydameMLZ_gzDaCMI1dRa5IjA8RUuc,4233
7
8
  model_compression_toolkit/core/graph_prep_runner.py,sha256=3xp0WYqyeRdlBkf5R6uD2zWubg_JPttOwS7JRhKykBY,10043
@@ -10,9 +11,8 @@ model_compression_toolkit/core/runner.py,sha256=RgN9l0v7aFYu6MTuIZGAB2syr6NBqG_v
10
11
  model_compression_toolkit/core/common/__init__.py,sha256=Wh127PbXcETZX_d1PQqZ71ETK3J9XO5A-HpadGUbj6o,1447
11
12
  model_compression_toolkit/core/common/base_substitutions.py,sha256=xDFSmVVs_iFSZfajytI0cuQaNRNcwHX3uqOoHgVUvxQ,1666
12
13
  model_compression_toolkit/core/common/data_loader.py,sha256=7YF5Mqz64Xb4rVwY3knrdIZ4JEHybXxiQqx0deR_c5k,4017
13
- model_compression_toolkit/core/common/defaultdict.py,sha256=LSc-sbZYXENMCw3U9F4GiXuv67IKpdn0Qm7Fr11jy-4,2277
14
14
  model_compression_toolkit/core/common/framework_implementation.py,sha256=XzRiDWi-pXfR8MhTbC3vi2Uaa6pC4BeS0YnyWpqNkNU,21220
15
- model_compression_toolkit/core/common/framework_info.py,sha256=hwmstv7IuBRfa6IxDbeG4y-7AxKx4bwCyI_Exi2C7mo,6424
15
+ model_compression_toolkit/core/common/framework_info.py,sha256=1ZMMGS9ip-kSflqkartyNRt9aQ5ub1WepuTRcTy-YSQ,6337
16
16
  model_compression_toolkit/core/common/memory_computation.py,sha256=ixoSpV5ZYZGyzhre3kQcvR2sNA8KBsPZ3lgbkDnw9Cs,1205
17
17
  model_compression_toolkit/core/common/model_builder_mode.py,sha256=jll9-59OPaE3ug7Y9-lLyV99_FoNHxkGZMgcm0Vkpss,1324
18
18
  model_compression_toolkit/core/common/model_collector.py,sha256=pNmJsU7QPCQ8-YUrzz__85YwF7Mk4Q27gozDSYCpzrg,5005
@@ -32,7 +32,7 @@ model_compression_toolkit/core/common/collectors/statistics_collector_generator.
32
32
  model_compression_toolkit/core/common/fusion/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
33
33
  model_compression_toolkit/core/common/fusion/layer_fusing.py,sha256=tIsWFYc771o59uvq5fxAaBmOCnd_gd-_xMbQI9SupQA,5479
34
34
  model_compression_toolkit/core/common/graph/__init__.py,sha256=Xr-Lt_qXMdrCnnOaUS_OJP_3iTTGfPCLf8_vSrQgCs0,773
35
- model_compression_toolkit/core/common/graph/base_graph.py,sha256=-1e0EmopZH3fFSKB8vvmMrK39Vy_5hXwi_tZVxSBPZ0,36484
35
+ model_compression_toolkit/core/common/graph/base_graph.py,sha256=GrkXtTl2UxGMbdpucBH_S63g-RJlyV7rXGtGmQJPFoQ,36511
36
36
  model_compression_toolkit/core/common/graph/base_node.py,sha256=Lq26s4W4ybA7a83Os74Lxl96c_6Oe-M8xuPG93-Qyzc,22042
37
37
  model_compression_toolkit/core/common/graph/edge.py,sha256=K6Wc2hBcIqig5PbbLhbjtTgYtkyZEohfgj4Wn_J5yEA,3733
38
38
  model_compression_toolkit/core/common/graph/functional_node.py,sha256=0TpYNa2ODZ0M9lQ2z_GsStqAbrg1Muwdni74LjphAh0,2922
@@ -99,18 +99,18 @@ model_compression_toolkit/core/common/pruning/mask/__init__.py,sha256=huHoBUcKNB
99
99
  model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py,sha256=hkm8xU4o9LvFeCc_KRg7PGYd_eQa6Kbjx-rGHvgajnA,5054
100
100
  model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py,sha256=gmzD32xsfJH8vkkqaspS7vYa6VWayk1GJe-NfoAEugQ,5901
101
101
  model_compression_toolkit/core/common/quantization/__init__.py,sha256=sw7LOPN1bM82o3SkMaklyH0jw-TLGK0-fl2Wq73rffI,697
102
- model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py,sha256=lC6XIcIeyAcbi_sgfXPtmpAwuL95gy5pNsAt4fwm3U4,3022
102
+ model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py,sha256=2iu35iI5gnWLHBKSaLVsPQWr1ssly6Z-gbaNCauvcQM,3223
103
103
  model_compression_toolkit/core/common/quantization/core_config.py,sha256=8DRM4Ar4Er-bllo56LG-Lcx9U2Ebd3jJctf4t2hOcXc,2021
104
104
  model_compression_toolkit/core/common/quantization/debug_config.py,sha256=HtkMmneN-EmAzgZK4Vp4M8Sqm5QKdrvNyyZMpaVqYzY,1482
105
105
  model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py,sha256=4GCr4Z6pRMbxIAnq4s7YtdMSqwbRwUzTzCFfs2ahVfk,6137
106
- model_compression_toolkit/core/common/quantization/node_quantization_config.py,sha256=_M26CiatGaaThBbqfweK_Q_CoixUxs2R64T0Dbxz8Os,16405
106
+ model_compression_toolkit/core/common/quantization/node_quantization_config.py,sha256=Q634XzMtjqReiLni8974y13apzbZ9nref-XBGjH17-0,16761
107
107
  model_compression_toolkit/core/common/quantization/quantization_analyzer.py,sha256=crap6d2_v97V_2VhkQr17nhUquglcxoVmi29EwF1zao,3462
108
- model_compression_toolkit/core/common/quantization/quantization_config.py,sha256=0S4hWygAGC7jMYCQz00itqrZMM-vaWV_Csc7m6-yLWo,7247
108
+ model_compression_toolkit/core/common/quantization/quantization_config.py,sha256=0547euhiaEX4vgpvIwHd7-pZ3iI7QmIc6Y_qHV4Y5sY,6713
109
109
  model_compression_toolkit/core/common/quantization/quantization_fn_selection.py,sha256=9GWuVMW9ifWnflUjLZoHZtcd8NpHyHTUzCdJFKIaGlo,2352
110
110
  model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py,sha256=sEPDeClFxh0uHEGznX7E3bSOJ_t0kUvyWcdxcyZJdwA,4090
111
111
  model_compression_toolkit/core/common/quantization/quantize_graph_weights.py,sha256=_OQEFAdYDTHu2Qp-qs02Z1CDxugUKG6k5eCePS1WpXY,2939
112
112
  model_compression_toolkit/core/common/quantization/quantize_node.py,sha256=UK_YshvZI0-LrKeT9gFGYcMA7pma1kaR5JAfzJH3HNw,3614
113
- model_compression_toolkit/core/common/quantization/set_node_quantization_config.py,sha256=KuYd3fHdTKK8Pg1hLw8zB1CpexyltJOpQMcKMvLJmB8,10683
113
+ model_compression_toolkit/core/common/quantization/set_node_quantization_config.py,sha256=3Z5XiR6bESBrPPkvq9JetndiJ-R7hwXNQgMtxo_P2mc,12302
114
114
  model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py,sha256=_U4IFPuzGyyAymjDjsPl2NF6UbFggqBaiA1Td3sug3I,1608
115
115
  model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py,sha256=rwCedE0zggamSBY50rqh-xqZpIMrn8o96YH_jMCuPrk,16505
116
116
  model_compression_toolkit/core/common/quantization/quantization_params_generation/kmeans_params.py,sha256=qDfJbvY64KLOG6n18ddEPTFGrKHlaXzZ136TrVpgH9s,2917
@@ -120,7 +120,7 @@ model_compression_toolkit/core/common/quantization/quantization_params_generatio
120
120
  model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py,sha256=noEdvGiyyW7acgQ2OFWLedCODibTGYJifC9qo8YIU5U,4558
121
121
  model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py,sha256=MR17AECmtQSlmD3emYDM7FYQdObnfl9x_bg3NSIh3fY,4628
122
122
  model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py,sha256=3gUOfFRxJBC1AIXa6j1O4Y7DLuDZrygbxOsVyetYzuw,41685
123
- model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py,sha256=s2m09heGcj7DyHpFDyogAXrvk06ZA3Z8hEq41iRNaBg,5092
123
+ model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py,sha256=0uYqipl8TQVWX6lzQ1prHX2TpdjBZd4AU_MxjvG0g4k,5080
124
124
  model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py,sha256=53OFL3IZxtH-UPakf3h_LZkaZAa3cgc2oqgMUe3Sg8o,9689
125
125
  model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py,sha256=oiJn1twYpTaq_z5qX4d8_nnk_jouYWHq8z0WAknl5oE,7879
126
126
  model_compression_toolkit/core/common/quantization/quantizers/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7VclGVOQ63x2p6mAAuba4,698
@@ -141,7 +141,7 @@ model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py,sha256
141
141
  model_compression_toolkit/core/common/substitutions/linear_collapsing.py,sha256=iEtzbWCDXP6EDkTZCtREQ0rpMxhQ2kM9zlcP_0KLq9I,12367
142
142
  model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py,sha256=uoauhmncQqUBNvD-qCLIXsIbl_IzrbxSKdxiMig-5W4,2406
143
143
  model_compression_toolkit/core/common/substitutions/residual_collapsing.py,sha256=doErjlMq-uSObYMSjA6IywSHb3Hz3QCc0HKU68ccrQ4,4767
144
- model_compression_toolkit/core/common/substitutions/scale_equalization.py,sha256=nmb5QC_YiQJRbsEIq6uF50y1IRWhmRAUKaeUE9hnoNw,10978
144
+ model_compression_toolkit/core/common/substitutions/scale_equalization.py,sha256=p57u25qdW2pimxzGwgMXEBV4S-LzXuTVAlIM7830WfU,10966
145
145
  model_compression_toolkit/core/common/substitutions/shift_negative_activation.py,sha256=QbT6LMt4Eit4i1bLGIizHhE6R_tLeJf2Ix2qVod2bcw,28749
146
146
  model_compression_toolkit/core/common/substitutions/softmax_shift.py,sha256=R-0ZqhYAuZLEFWHvB2UTPm52L6gWHGdRdEnwGxKSeGI,2625
147
147
  model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py,sha256=s-uLbm6sG9xbEMTYxJwTkmOJI_MlY54S80YVTpbdI1Q,3400
@@ -153,7 +153,7 @@ model_compression_toolkit/core/common/visualization/tensorboard_writer.py,sha256
153
153
  model_compression_toolkit/core/keras/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7VclGVOQ63x2p6mAAuba4,698
154
154
  model_compression_toolkit/core/keras/constants.py,sha256=YhuzRqXAdkRFzLT5lRD_jtLVYcUb-d4fUm-D49z5XOg,3158
155
155
  model_compression_toolkit/core/keras/custom_layer_validation.py,sha256=f-b14wuiIgitBe7d0MmofYhDCTO3IhwJgwrh-Hq_t_U,1192
156
- model_compression_toolkit/core/keras/default_framework_info.py,sha256=REYNaByat2QImKv1P1Ag2H5xtmu2AbwlPCaNj9eMWgw,4973
156
+ model_compression_toolkit/core/keras/default_framework_info.py,sha256=Ha4HTHuiw_KTS5Po1Xnv6GyK9eprpDhYWf-eooS62Ys,4961
157
157
  model_compression_toolkit/core/keras/keras_implementation.py,sha256=zJw9NdlQLj8a6ZWMCdLcYwHNPbpvo3V_kG3v7BUU-NY,29061
158
158
  model_compression_toolkit/core/keras/keras_model_validation.py,sha256=1wNV2clFdC9BzIELRLSO2uKf0xqjLqlkTJudwtCeaJk,1722
159
159
  model_compression_toolkit/core/keras/keras_node_prior_info.py,sha256=Aqh31wOPaiZcJIOm-uJwzev0eTMdJyXaOk97rs4z7BU,3879
@@ -214,7 +214,7 @@ model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_c
214
214
  model_compression_toolkit/core/keras/visualization/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7VclGVOQ63x2p6mAAuba4,698
215
215
  model_compression_toolkit/core/pytorch/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
216
216
  model_compression_toolkit/core/pytorch/constants.py,sha256=3YNrwYhjsxFUy-kytqXJ7c3-v0Zre18B22U9T2RTd5I,2528
217
- model_compression_toolkit/core/pytorch/default_framework_info.py,sha256=qee8TFcDro2lfyXe_fujjX2OlxELTyKSsLlZ7QkzeXU,4200
217
+ model_compression_toolkit/core/pytorch/default_framework_info.py,sha256=r1XyzUFvrjGcJHQM5ETLsMZIG2yHCr9HMjqf0ti9inw,4175
218
218
  model_compression_toolkit/core/pytorch/kpi_data_facade.py,sha256=J0IDOtFMVFSFyBXDzNGbwJfHu89iRBJFdid1_wFB-xQ,8482
219
219
  model_compression_toolkit/core/pytorch/pytorch_device_config.py,sha256=IoMvTch5awAEPvB6Tg6ANhFGXvfSgv7JLsUBlxpMwk4,4330
220
220
  model_compression_toolkit/core/pytorch/pytorch_implementation.py,sha256=m55qXvxV9Kw6_I4hahFnD5km2tpegvcQoj9IbuMIKVE,26973
@@ -307,14 +307,14 @@ model_compression_toolkit/exporter/model_exporter/keras/__init__.py,sha256=uZ2Ri
307
307
  model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py,sha256=-wr2n0yRlmFixXBeZuxg6Rzlvz-ZFUX-PJgSXhgMrEo,1593
308
308
  model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py,sha256=v_-rOsWDFI-3k8CoJIr-XzT7ny8WXpAMteWRWtTzaeg,963
309
309
  model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py,sha256=E_1IqFYAGUMOrt3U_JK1k--8D0WzWPbjZH_IRLGw_wY,11478
310
- model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py,sha256=lwwbEAmPmlU89DXmBjmDT6Yk1W6ss96LQ52oAHxvtFw,3209
310
+ model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py,sha256=sqzqQ8US24WgDbg_FoP1NQBgqCbSVwrVTWrxcyY0nPA,3514
311
311
  model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py,sha256=nGtpDTeH5Tdp7sjyuXsy_9TPpijDYp4nkz366DUUJ0Q,8048
312
312
  model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py,sha256=O-GApieS7_zLkpygnN0YvDK-HkCChwA4bSExbI5jvQ8,5998
313
313
  model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py,sha256=qXXkv3X_wb7t622EOHwXIxfGLGaDqh0T0y4UxREi4Bo,1976
314
314
  model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py,sha256=uZ2RigbY9O2PJ0Il8wPpS_s7frgg9WUGd_SHeKGyl1A,699
315
315
  model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py,sha256=UPVkEUQCMZ4Lld6CRnEOPEmlfe5vcQZG0Q3FwRBodD4,4021
316
316
  model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py,sha256=bPevy6OBqng41PqytBR55e6cBEuyrUS0H8dWX4zgjQ4,967
317
- model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py,sha256=qYteysaJzMhm5OgK2SBn4zL8Avx7obD-vk8CkS2ppoU,5094
317
+ model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py,sha256=HtVt0EPogBg6p-N4A0JDNx6fDZtbEtjlWBgr691-fwU,5268
318
318
  model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py,sha256=ksWV2A-Njo-wAxQ_Ye2sLIZXBWJ_WNyjT7-qFFwvV2o,2897
319
319
  model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py,sha256=ULaFuY0flk7zStUIEJrpya6jcOHA0-8Y9qkmHRyNYK8,5998
320
320
  model_compression_toolkit/exporter/model_wrapper/__init__.py,sha256=7CF2zvpTrIEm8qnbuHnLZyTZkwBBxV24V8QA0oxGbh0,1187
@@ -333,7 +333,7 @@ model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantiz
333
333
  model_compression_toolkit/gptq/__init__.py,sha256=YDtDmPgp20Lp4_CrjodyEV0LV7XVfGqD09xvQwaQeG8,1275
334
334
  model_compression_toolkit/gptq/runner.py,sha256=Y30pzsCI9g85BqrEl8-RdcDPyXpo7eIljFv7CG7uQps,6117
335
335
  model_compression_toolkit/gptq/common/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
336
- model_compression_toolkit/gptq/common/gptq_config.py,sha256=uCb3I47S3FoFLfTwZjW3EQUW6HYaZK6Am0-CT5GjIx0,9554
336
+ model_compression_toolkit/gptq/common/gptq_config.py,sha256=0wqRQVISQEOnXo0maeGB-ECZ-QD7kQUjaUpmj_hSEm4,9391
337
337
  model_compression_toolkit/gptq/common/gptq_constants.py,sha256=QSm6laLkIV0LYmU0BLtmKp3Fi3SqDfbncFQWOGA1cGU,611
338
338
  model_compression_toolkit/gptq/common/gptq_framework_implementation.py,sha256=n3mSf4J92kFjekzyGyrJULylI-8Jf5OVWJ5AFoVnEx0,1266
339
339
  model_compression_toolkit/gptq/common/gptq_graph.py,sha256=LfxpkMJb87h1NF1q4HoC88wA_0MW-B820alpiuZpZFo,2826
@@ -354,7 +354,7 @@ model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.
354
354
  model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py,sha256=BBSDWLmeywjSM5N6oJkMgcuo7zrXTesB4zLwRGG8QB0,12159
355
355
  model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py,sha256=pyhlVpoauHM-zuixHsIGPHFgQoXppL8TlDFCjPE2RuY,10377
356
356
  model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
357
- model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=nI1jxpNEaCxSuwEd29Oobr-p8htKMj5wYcg52aKzDOQ,8368
357
+ model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=pgZADwaNWUwm9QTrYaW6yXE3-zfedPZSa9TKBVedNd4,8356
358
358
  model_compression_toolkit/gptq/pytorch/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
359
359
  model_compression_toolkit/gptq/pytorch/gptq_loss.py,sha256=kDuWw-6zh17wZpYWh4Xa94rpoodf82DksgjQCnL7nBc,2719
360
360
  model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py,sha256=tECPTavxn8EEwgLaP2zvxdJH6Vg9jC0YOIMJ7857Sdc,1268
@@ -371,7 +371,7 @@ model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_re
371
371
  model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py,sha256=kLVQC1hXzDpP4Jx7AwnA764oGnY5AMEuvUUhAvhz09M,12347
372
372
  model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py,sha256=FgPSKoV8p8y-gLNz359XdOPD6w_wpDvcJFtTNLWqYb0,9099
373
373
  model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
374
- model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=vJHjrMNEe3Yh7NrPpUS81UMcMTW8dHwoHfqnQgwqbHM,8780
374
+ model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=6uxq_w62jn8DDOt9T7VtA6jZ8jTAPcbTufKFOYpVUm4,8768
375
375
  model_compression_toolkit/legacy/__init__.py,sha256=lNJ29DYxaLUPDstRDA1PGI5r9Fulq_hvrZMlhst1Z5g,697
376
376
  model_compression_toolkit/legacy/keras_quantization_facade.py,sha256=2pNJoc1mKMbikBS_uebLgFAbTqfA0y9ofDUNCVogSKI,18444
377
377
  model_compression_toolkit/legacy/pytorch_quantization_facade.py,sha256=p-ZGKdGeRIJsR5XmFYgjs3VN49NrwHumNtTY2OSDW-4,17874
@@ -412,12 +412,12 @@ model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py,sha256=
412
412
  model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=4xmLmg7yN2A7iKnifwkWddgJTWMUiIjFilIuorJeK1A,9657
413
413
  model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py,sha256=HshW016iVAMx7iMkUwlONN2P3K4XgDIu-2AnJnBVSGo,8778
414
414
  model_compression_toolkit/target_platform_capabilities/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
415
- model_compression_toolkit/target_platform_capabilities/constants.py,sha256=mYUESjXeN7EPomZKgqHZE031KNOO4wmFdRB8gA4m50U,920
415
+ model_compression_toolkit/target_platform_capabilities/constants.py,sha256=iJXGy5um7vhC84Me4ld6EHMhy7jPks0T9ItZX23si6s,1519
416
416
  model_compression_toolkit/target_platform_capabilities/immutable.py,sha256=rSPd3Xpx4Rzt6-EwDA9tXvKygrrt4xvmq01JVCXY0hQ,1723
417
- model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py,sha256=_LzyDupsTDiJvIsVA-L-M_fRrW8ePcul8mr60L8DW9g,1574
417
+ model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py,sha256=s0UAemzT_AyPbs-XWUi5VA3TpxQU-nn1_1WCyebuvsc,1603
418
418
  model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py,sha256=5Bu5MkOYYDGzZgTu-PBQ4xVCnso1mtssc9zz1pZjl7o,2010
419
419
  model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py,sha256=NIKUE2AtRv4CFOhpwjVvfG3rLfvd6p7DYBSuK0SKo4s,2353
420
- model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py,sha256=XqJYM0od8SyjQdkSW1Qlf8_9EDYDaeOZgBuCzhIe-y8,8879
420
+ model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py,sha256=H2M0HqrDp1kKkJylM3PSZ1lyY0zVcvO9N3HEdji8QnQ,14351
421
421
  model_compression_toolkit/target_platform_capabilities/target_platform/operators.py,sha256=rRmrmPBY4rxCWVpEc6FxeOPUFh8MkfwgQsqD82U9a7w,3108
422
422
  model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py,sha256=rEC9VsWR14GPtCCgnh1YlXCbGWJfdk3hQ2c3juuLmbo,9309
423
423
  model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py,sha256=TDbNQwmF7Id-FoIQZlR7ZOcz_nRb4XKBmDihAgKT0u8,1392
@@ -425,8 +425,8 @@ model_compression_toolkit/target_platform_capabilities/target_platform/targetpla
425
425
  model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py,sha256=-riVk2KPy94nYuviaZzZPc6j5vObhD9-6fGryuSLZ9c,8759
426
426
  model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py,sha256=GcLSXZLxtcE9SxSKdlvo10ba9mqVk_MBiwrvvjSH8H0,2046
427
427
  model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py,sha256=Cl6-mACpje2jM8RJkibbqE3hvTkFR3r26-lW021mIiA,4019
428
- model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py,sha256=6wYAB0w_5IRXUuNi3ID98icXEaY9ifpjD-GtVVSWtSQ,6375
429
- model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py,sha256=ehlHPm0cqQtXo-qJ8-oTKeDPZJFikj2164nMoQuzfs0,9176
428
+ model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py,sha256=HpJ_zzYHpSMbJ5K-IDhmP-8mwCYconaK17NSIJ3R6iI,6743
429
+ model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py,sha256=m6p9pO_xqGcp-0jAVRaOJww67oSQ6gChCD45_W833Gw,9819
430
430
  model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py,sha256=FvrYI0Qy7DCmDp2gyUYyCZq5pY84JgLtJqSIiVTJ8Ss,1030
431
431
  model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
432
432
  model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py,sha256=aHoAu5Iye9YVn2HLwNb4X9cUDX1WJt20R5GsNGIAk9E,3337
@@ -434,31 +434,31 @@ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__i
434
434
  model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py,sha256=1O6q93V268cqUBq2tACRP8ZMk2hm7QjmDmrPubDW-jU,2959
435
435
  model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py,sha256=F5RG4MnuAwKcNXbfVbPFLQu30-lNax-7knqu20B6udQ,1522
436
436
  model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py,sha256=1mMOREEMoNHu_KTMGDp4crN61opKWX6aFn1DrDLvqcc,717
437
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py,sha256=4UTL7txzlY5gcfZlNtfZO_Gd5a5v77yETz2lvHSLWrc,8118
438
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py,sha256=aTmLEha4ZHqq2P9gQQP68KeuPbD6Hb-nPwqG0CvDSy4,5185
439
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py,sha256=ao5TI3eg97YQWbzqsnjyU_CbstKxapYiqZFcayQl68U,4789
437
+ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py,sha256=S-GwMI-JiuPpbtOdd6TSOEjiUFiIs6M2RAiJNJ3O950,10883
438
+ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py,sha256=bPBWxopMUHFgiaJjaAfoompwShvfH2wHAouN56PQn0A,6484
439
+ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py,sha256=hm1VAc9W3bKf57BacRGI5ty61WZfZ29Bt7eLRlwH3xc,5661
440
440
  model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py,sha256=vFDyiMymNZSRCdTgAyWn4A-tZD3vzze_PTLBSF2OYe8,721
441
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py,sha256=ifvY_i70WPpba6mJpGtmD2UQ04csN_wbSKUC-77MEJA,8366
442
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py,sha256=2LT1u7D9N8dJurQx20P-yUQNTTs195AxoPav0aLIcFk,5193
443
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py,sha256=cjv2lWUrcypKBgPs3CmnSZ8mMitdpDsO4XoKM7_vBbU,4797
441
+ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py,sha256=i9pRGgMdsY8pzV1Dj0Qzg4mcSyIokQzm0lbLzLhkVaM,10616
442
+ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py,sha256=bU74t-ZIkIptXuNaPI_YIC5w9TX6nDgJUpJwxHAPOSI,6493
443
+ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py,sha256=u8HW__YoUpM_ezxDZjlP3tjMcIZyWfFXrUwbNq_YSG8,5669
444
444
  model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py,sha256=NUuczImqUxzdfflqSdqkeAN8aCU6Tuiu6U0Fnj9Tzmw,721
445
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py,sha256=1iZgQ5zyrXDJFkUatxIpuTWA8Yfokyy2tCu9lQRPCSc,8062
446
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py,sha256=N1agnOoW0lRm7QjH2LRfkjOw6PhbHIHEK1ftoy8VjKM,5205
447
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py,sha256=9bK0iVuGslFjVtERokOI9cZ-jbe-Q8xkiyUlTp8_ZA4,4816
445
+ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py,sha256=o1KloA8WPw1MbtZ-4p-kxQuroBAL67z77dPpliZyH9o,10369
446
+ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py,sha256=NkAGCZbSgXYeRAiJRzt19h2cxkrVQJaHu8-2jHZLOYg,6505
447
+ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py,sha256=kfAPuvMpFrxuszKrLYNsj_P4mxK3mfb1TXBIlXMQIyA,5688
448
448
  model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
449
449
  model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py,sha256=lnhJcwvTF0t7ybeiTleIS1p0aD8xzFZxVPx4ISk5uWQ,2090
450
450
  model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py,sha256=UUvUCcTots_sehdRnDfgkaE8WPQ7dPbeuhDF4Qy2nzw,1510
451
451
  model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py,sha256=t4JKsPcor-7KSCKzIwuaBv0NLNwfhuewAQGlDl6iBeo,717
452
- model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py,sha256=cfFGsXksaMeXZf-wqU8FsA-zrCWnWMVuOkrt-JrKawE,5970
453
- model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py,sha256=rIhqfc_Ks1SKzk5ppfomP88fGhx3Oh2w498dSW3p-ZY,3183
454
- model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py,sha256=o5w_ylMr7KCJCbSP1jx8Qs8HFysEEg4e_aRM_LXlexM,2942
452
+ model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py,sha256=InHTtUwHxh8KeC430SX9gP17hUvXJ-REGCXIUCQBVhI,8106
453
+ model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py,sha256=h_hePXCggG2qktLuoNAOE1XNtc0qEwMyky7om1c8eC8,4483
454
+ model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py,sha256=65WJPRCjliXEUL4AjZRxcyVS3y7KHTMDdkqy6D95kRw,3814
455
455
  model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
456
456
  model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py,sha256=C42RJBQERjjkqFa1pRsHZXseofceOtrp_fJxs4wqWvo,2085
457
457
  model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py,sha256=sK9PnyB2R9g0rqHr_9vyUFX7wSyrZe7x9yqYUlbaiqo,1505
458
458
  model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py,sha256=t4JKsPcor-7KSCKzIwuaBv0NLNwfhuewAQGlDl6iBeo,717
459
- model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py,sha256=LyNBwwH3q7GXllwUMuk4dDcg_-1w3K-NbwClRIJomag,7830
460
- model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py,sha256=KeRMMdcqWvZ2MACbYe2Lg_M_RJ-VZkKWHni-GcDZBNs,5920
461
- model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py,sha256=KH_kdxyX9pPeLqkIj9_E59ZhAIFZ8bll0sfwDuILVdU,5068
459
+ model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py,sha256=PSoSEgn2Gprc3TpLyl2BCcn7tYW0KrFN-CXM7l8dwOo,9864
460
+ model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py,sha256=-4vNf2Q6c_rgaac19AFO8hG4ANaPfgNPf0kN44mL6TQ,6830
461
+ model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py,sha256=YVJJvqGPBdkKnug99p9bjqtbfecDXZKIB2iWVCe7RUY,5960
462
462
  model_compression_toolkit/trainable_infrastructure/__init__.py,sha256=DwWh0lXiLNNzqHHNEy-Py6_5OtseNGJDGNV3SYm8rYQ,1224
463
463
  model_compression_toolkit/trainable_infrastructure/common/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
464
464
  model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py,sha256=ON5X6a4p46ofXzRcXyIgOGSgO7JXG85frE9vTjOZu2o,7564
@@ -475,8 +475,8 @@ model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py,sha
475
475
  model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py,sha256=MVwXNymmFRB2NXIBx4e2mdJ1RfoHxRPYRgjb1MQP5kY,1797
476
476
  model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
477
477
  model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py,sha256=SbvRlIdE32PEBsINt1bhSqvrKL_zbM9V-aeSkOn-sw4,3083
478
- mct_nightly-1.11.0.20240130.post401.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
479
- mct_nightly-1.11.0.20240130.post401.dist-info/METADATA,sha256=TMu5ThCW0w_pOpXpbeGoZVzQenzLaAvo3PGgxDM28yA,17187
480
- mct_nightly-1.11.0.20240130.post401.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
481
- mct_nightly-1.11.0.20240130.post401.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
482
- mct_nightly-1.11.0.20240130.post401.dist-info/RECORD,,
478
+ mct_nightly-1.11.0.20240201.post434.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
479
+ mct_nightly-1.11.0.20240201.post434.dist-info/METADATA,sha256=0Tl_BcNaR7t_Nlfm78_fWvLEXo9yn3lOEJnNOim4XX8,17187
480
+ mct_nightly-1.11.0.20240201.post434.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
481
+ mct_nightly-1.11.0.20240201.post434.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
482
+ mct_nightly-1.11.0.20240201.post434.dist-info/RECORD,,
@@ -13,6 +13,7 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
 
16
+ from model_compression_toolkit.defaultdict import DefaultDict
16
17
  from model_compression_toolkit.target_platform_capabilities import target_platform
17
18
  from model_compression_toolkit.target_platform_capabilities.tpc_models.get_target_platform_capabilities import get_target_platform_capabilities
18
19
  from model_compression_toolkit import core
@@ -40,7 +41,6 @@ from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quant
40
41
  from model_compression_toolkit.logger import set_log_folder
41
42
  from model_compression_toolkit.core.common.data_loader import FolderImageLoader
42
43
  from model_compression_toolkit.core.common.framework_info import FrameworkInfo, ChannelAxis
43
- from model_compression_toolkit.core.common.defaultdict import DefaultDict
44
44
  from model_compression_toolkit.legacy.keras_quantization_facade import keras_post_training_quantization, keras_post_training_quantization_mixed_precision
45
45
  from model_compression_toolkit.legacy.pytorch_quantization_facade import pytorch_post_training_quantization, pytorch_post_training_quantization_mixed_precision
46
46
  from model_compression_toolkit.core.keras.kpi_data_facade import keras_kpi_data
@@ -114,6 +114,7 @@ WEIGHTS_QUANTIZATION_FN = 'weights_quantization_fn'
114
114
  ACTIVATION_QUANT_PARAMS_FN = 'activation_quantization_params_fn'
115
115
  WEIGHTS_QUANT_PARAMS_FN = 'weights_quantization_params_fn'
116
116
  WEIGHTS_CHANNELS_AXIS = 'weights_channels_axis'
117
+ WEIGHTS_CFG = 'weights_cfg'
117
118
 
118
119
  # Memory graph constants
119
120
  DUMMY_NODE = 'dummy_node'
@@ -15,7 +15,6 @@
15
15
 
16
16
  from model_compression_toolkit.core.common.data_loader import FolderImageLoader
17
17
  from model_compression_toolkit.core.common.framework_info import FrameworkInfo, ChannelAxis
18
- from model_compression_toolkit.core.common.defaultdict import DefaultDict
19
18
  from model_compression_toolkit.core.common import network_editors as network_editor
20
19
  from model_compression_toolkit.core.common.quantization.debug_config import DebugConfig
21
20
  from model_compression_toolkit.core.common.quantization import quantization_config
@@ -20,8 +20,7 @@ from typing import Dict, Any, List
20
20
 
21
21
 
22
22
 
23
- from model_compression_toolkit.core.common.defaultdict import DefaultDict
24
- from model_compression_toolkit.core.common.graph.base_node import BaseNode
23
+ from model_compression_toolkit.defaultdict import DefaultDict
25
24
  from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
26
25
 
27
26
 
@@ -104,7 +104,7 @@ class Graph(nx.MultiDiGraph, GraphSearches):
104
104
  if not is_node_in_tpc:
105
105
  Logger.error(f'MCT does not support optimizing Keras custom layers, but found layer of type {n.type}. '
106
106
  f'Please add the custom layer to TPC or file a feature request or an issue if you believe this is an issue.')
107
- if any([qc.enable_weights_quantization for qc in n.get_qco(tpc).quantization_config_list]):
107
+ if any([qc.default_weight_attr_config.enable_weights_quantization for qc in n.get_qco(tpc).quantization_config_list]):
108
108
  Logger.error(f'MCT does not support optimizing Keras custom layers with weights quantization. Layer: {n.type}')
109
109
 
110
110
  self.tpc = tpc
@@ -14,9 +14,10 @@
14
14
  # ==============================================================================
15
15
  from model_compression_toolkit.constants import ACTIVATION_QUANTIZATION_CFG, WEIGHTS_QUANTIZATION_CFG, QC, \
16
16
  OP_CFG, ACTIVATION_QUANTIZATION_FN, WEIGHTS_QUANTIZATION_FN, ACTIVATION_QUANT_PARAMS_FN, WEIGHTS_QUANT_PARAMS_FN, \
17
- WEIGHTS_CHANNELS_AXIS
17
+ WEIGHTS_CHANNELS_AXIS, WEIGHTS_CFG
18
18
  from model_compression_toolkit.core.common.quantization.node_quantization_config import BaseNodeQuantizationConfig, \
19
19
  NodeWeightsQuantizationConfig, NodeActivationQuantizationConfig
20
+ from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR
20
21
 
21
22
 
22
23
  ##########################################
@@ -47,4 +48,5 @@ class CandidateNodeQuantizationConfig(BaseNodeQuantizationConfig):
47
48
  kwargs.get(OP_CFG),
48
49
  kwargs.get(WEIGHTS_QUANTIZATION_FN),
49
50
  kwargs.get(WEIGHTS_QUANT_PARAMS_FN),
50
- kwargs.get(WEIGHTS_CHANNELS_AXIS))
51
+ kwargs.get(WEIGHTS_CHANNELS_AXIS),
52
+ kwargs.get(WEIGHTS_CFG))
@@ -24,7 +24,8 @@ from model_compression_toolkit.core.common.quantization.quantization_params_fn_s
24
24
 
25
25
  from model_compression_toolkit.core.common.quantization.quantization_config import QuantizationConfig, \
26
26
  QuantizationErrorMethod
27
- from model_compression_toolkit.target_platform_capabilities.target_platform import OpQuantizationConfig
27
+ from model_compression_toolkit.target_platform_capabilities.target_platform import OpQuantizationConfig, \
28
+ AttributeQuantizationConfig
28
29
 
29
30
 
30
31
  ##########################################
@@ -236,7 +237,8 @@ class NodeWeightsQuantizationConfig(BaseNodeQuantizationConfig):
236
237
  op_cfg: OpQuantizationConfig,
237
238
  weights_quantization_fn: Callable,
238
239
  weights_quantization_params_fn: Callable,
239
- weights_channels_axis: int):
240
+ weights_channels_axis: int,
241
+ weights_cfg: AttributeQuantizationConfig):
240
242
  """
241
243
 
242
244
  Args:
@@ -245,19 +247,22 @@ class NodeWeightsQuantizationConfig(BaseNodeQuantizationConfig):
245
247
  weights_quantization_fn: Function to use when quantizing the node's weights.
246
248
  weights_quantization_params_fn: Function to use when computing the threshold for quantizing a node's weights.
247
249
  weights_channels_axis: Axis to quantize a node's kernel when quantizing per-channel.
250
+ weights_cfg: Weights attribute quantization config.
248
251
  """
249
252
 
253
+ # TODO: after refactoring to enable attributes quantization, all weights quantization arguments
254
+ # should be taken per attribute, and not from the weights config
250
255
  self.weights_quantization_fn = weights_quantization_fn
251
256
  self.weights_quantization_params_fn = weights_quantization_params_fn
252
257
  self.weights_channels_axis = weights_channels_axis
253
258
  self.weights_quantization_params = {}
254
- self.weights_quantization_method = op_cfg.weights_quantization_method
259
+ self.weights_quantization_method = weights_cfg.weights_quantization_method
255
260
  self.weights_error_method = qc.weights_error_method
256
- self.weights_n_bits = op_cfg.weights_n_bits
261
+ self.weights_n_bits = weights_cfg.weights_n_bits
257
262
  self.weights_bias_correction = qc.weights_bias_correction
258
263
  self.weights_second_moment_correction = qc.weights_second_moment_correction
259
- self.weights_per_channel_threshold = op_cfg.weights_per_channel_threshold
260
- self.enable_weights_quantization = op_cfg.enable_weights_quantization
264
+ self.weights_per_channel_threshold = weights_cfg.weights_per_channel_threshold
265
+ self.enable_weights_quantization = weights_cfg.enable_weights_quantization
261
266
  self.min_threshold = qc.min_threshold
262
267
  self.l_p_value = qc.l_p_value
263
268
  self.simd_size = op_cfg.simd_size
@@ -50,7 +50,6 @@ class QuantizationConfig:
50
50
  weights_error_method: QuantizationErrorMethod = QuantizationErrorMethod.MSE,
51
51
  relu_bound_to_power_of_2: bool = False,
52
52
  weights_bias_correction: bool = True,
53
- weights_per_channel_threshold: bool = True,
54
53
  weights_second_moment_correction: bool = False,
55
54
  input_scaling: bool = False,
56
55
  softmax_shift: bool = False,
@@ -73,7 +72,6 @@ class QuantizationConfig:
73
72
  relu_bound_to_power_of_2 (bool): Whether to use relu to power of 2 scaling correction or not.
74
73
  weights_bias_correction (bool): Whether to use weights bias correction or not.
75
74
  weights_second_moment_correction (bool): Whether to use weights second_moment correction or not.
76
- weights_per_channel_threshold (bool): Whether to quantize the weights per-channel or not (per-tensor).
77
75
  input_scaling (bool): Whether to use input scaling or not.
78
76
  softmax_shift (bool): Whether to use softmax shift or not.
79
77
  shift_negative_activation_correction (bool): Whether to use shifting negative activation correction or not.
@@ -90,11 +88,11 @@ class QuantizationConfig:
90
88
  One may create a quantization configuration to quantize a model according to.
91
89
  For example, to quantize a model's weights and activation using thresholds, such that
92
90
  weights threshold selection is done using MSE, activation threshold selection is done using NOCLIPPING (min/max),
93
- enabling relu_bound_to_power_of_2, weights_bias_correction, and quantizing the weights per-channel,
91
+ enabling relu_bound_to_power_of_2, weights_bias_correction,
94
92
  one can instantiate a quantization configuration:
95
93
 
96
94
  >>> import model_compression_toolkit as mct
97
- >>> qc = mct.core.QuantizationConfig(activation_error_method=mct.core.QuantizationErrorMethod.NOCLIPPING,weights_error_method=mct.core.QuantizationErrorMethod.MSE,relu_bound_to_power_of_2=True,weights_bias_correction=True,weights_per_channel_threshold=True)
95
+ >>> qc = mct.core.QuantizationConfig(activation_error_method=mct.core.QuantizationErrorMethod.NOCLIPPING, weights_error_method=mct.core.QuantizationErrorMethod.MSE, relu_bound_to_power_of_2=True, weights_bias_correction=True)
98
96
 
99
97
 
100
98
  The QuantizationConfig instanse can then be passed to
@@ -107,7 +105,6 @@ class QuantizationConfig:
107
105
  self.relu_bound_to_power_of_2 = relu_bound_to_power_of_2
108
106
  self.weights_bias_correction = weights_bias_correction
109
107
  self.weights_second_moment_correction = weights_second_moment_correction
110
- self.weights_per_channel_threshold = weights_per_channel_threshold
111
108
  self.activation_channel_equalization = activation_channel_equalization
112
109
  self.input_scaling = input_scaling
113
110
  self.softmax_shift = softmax_shift
@@ -126,11 +123,6 @@ class QuantizationConfig:
126
123
 
127
124
 
128
125
  # Default quantization configuration the library use.
129
- DEFAULTCONFIG = QuantizationConfig(QuantizationErrorMethod.MSE,
130
- QuantizationErrorMethod.MSE,
131
- relu_bound_to_power_of_2=False,
132
- weights_bias_correction=True,
133
- weights_second_moment_correction=False,
134
- weights_per_channel_threshold=True,
135
- input_scaling=False,
136
- softmax_shift=False)
126
+ DEFAULTCONFIG = QuantizationConfig(QuantizationErrorMethod.MSE, QuantizationErrorMethod.MSE,
127
+ relu_bound_to_power_of_2=False, weights_bias_correction=True,
128
+ weights_second_moment_correction=False, input_scaling=False, softmax_shift=False)
@@ -17,7 +17,7 @@ from typing import Dict, Any, Tuple
17
17
  import numpy as np
18
18
 
19
19
  from model_compression_toolkit.logger import Logger
20
- from model_compression_toolkit.core.common.defaultdict import DefaultDict
20
+ from model_compression_toolkit.defaultdict import DefaultDict
21
21
  from model_compression_toolkit.core.common.framework_info import FrameworkInfo
22
22
  from model_compression_toolkit.core.common.quantization.node_quantization_config import NodeWeightsQuantizationConfig
23
23
 
@@ -81,6 +81,7 @@ def set_quantization_configs_to_node(node: BaseNode,
81
81
  fw_info,
82
82
  weight_channel_axis,
83
83
  node_qc_options,
84
+ node.type,
84
85
  mixed_precision_enable=mixed_precision_enable)
85
86
 
86
87
  for candidate_qc in node.candidates_quantization_cfg:
@@ -118,10 +119,11 @@ def create_node_activation_qc(qc: QuantizationConfig,
118
119
  activation_quantization_params_fn)
119
120
 
120
121
 
121
- def create_node_qc_candidate(qc: QuantizationConfig,
122
- fw_info: FrameworkInfo,
123
- weight_channel_axis: int,
124
- op_cfg: OpQuantizationConfig) -> CandidateNodeQuantizationConfig:
122
+ def _create_node_single_candidate_qc(qc: QuantizationConfig,
123
+ fw_info: FrameworkInfo,
124
+ weight_channel_axis: int,
125
+ op_cfg: OpQuantizationConfig,
126
+ kernel_attr: str) -> CandidateNodeQuantizationConfig:
125
127
  """
126
128
  Create quantization configuration candidate from a QuantizationConfig object.
127
129
  Creates both weights and activation quantization configurations
@@ -133,18 +135,26 @@ def create_node_qc_candidate(qc: QuantizationConfig,
133
135
  weights/activations should be quantized)
134
136
  weight_channel_axis: Output channel index of the node's kernel.
135
137
  op_cfg: OpQuantizationConfig of the node with quantizers types to use when creating node quantization configuration.
138
+ kernel_attr: The name of the kernel attribute of the node,
139
+ TODO: kernel_attr should be removed once enabling attributes quantization (because this function would create
140
+ candidate for all attributes not specifically for the kernel
136
141
 
137
142
  Returns: a CandidateNodeQuantizationConfig object with both weights and activation quantization config objects.
138
143
 
139
144
  """
140
145
 
141
- # get attributes for weights quantization
142
- weights_quantization_fn = get_weights_quantization_fn(op_cfg.weights_quantization_method)
146
+ # get attributes for weights quantization.
147
+ # if the node doesn't have a specified kernel config we use the default attribute config for quantization.
148
+ # TODO: This should be the behavior for all attributes that are not specified in the attribute config mapping,
149
+ # which currently disables the quantization of the weights attribute.
150
+ weights_cfg = op_cfg.attr_weights_configs_mapping.get(kernel_attr, op_cfg.default_weight_attr_config)
151
+
152
+ weights_quantization_fn = get_weights_quantization_fn(weights_cfg.weights_quantization_method)
143
153
 
144
154
  if weights_quantization_fn is None:
145
- Logger.critical('Unknown quantization method for weights') # pragma: no cover
155
+ Logger.critical(f'Unknown quantization method for weights for quantizing attribute: {kernel_attr}') # pragma: no cover
146
156
 
147
- weights_quantization_params_fn = get_weights_quantization_params_fn(op_cfg.weights_quantization_method)
157
+ weights_quantization_params_fn = get_weights_quantization_params_fn(weights_cfg.weights_quantization_method)
148
158
 
149
159
  # get attributes for activation quantization
150
160
  activation_quantization_fn = fw_info.activation_quantizer_mapping.get(op_cfg.activation_quantization_method)
@@ -159,13 +169,15 @@ def create_node_qc_candidate(qc: QuantizationConfig,
159
169
  activation_quantization_params_fn=activation_quantization_params_fn,
160
170
  weights_quantization_fn=weights_quantization_fn,
161
171
  weights_quantization_params_fn=weights_quantization_params_fn,
162
- weight_channel_axis=weight_channel_axis)
172
+ weight_channel_axis=weight_channel_axis,
173
+ weights_cfg=weights_cfg)
163
174
 
164
175
 
165
176
  def _create_node_candidates_qc(qc: QuantizationConfig,
166
177
  fw_info: FrameworkInfo,
167
178
  weight_channel_axis: int,
168
179
  node_qc_options: QuantizationConfigOptions,
180
+ node_type: type,
169
181
  mixed_precision_enable: bool = False) -> List[CandidateNodeQuantizationConfig]:
170
182
  """
171
183
  Create a list of candidates of weights and activation quantization configurations for a node.
@@ -175,6 +187,7 @@ def _create_node_candidates_qc(qc: QuantizationConfig,
175
187
  fw_info: Framework information (e.g., which layers should have their kernels' quantized).
176
188
  weight_channel_axis: Output channel index of the node's kernel.
177
189
  node_qc_options: QuantizationConfigOptions for the node with quantization candidates information.
190
+ node_type: The type of the layer that the node represents.
178
191
  mixed_precision_enable: is mixed precision enabled
179
192
 
180
193
  Returns:
@@ -182,21 +195,31 @@ def _create_node_candidates_qc(qc: QuantizationConfig,
182
195
  """
183
196
 
184
197
  candidates = []
198
+
199
+ # TODO: Currently, we are using fw_info to get the kernel attribute, but this would changed once we enable multi
200
+ # attribute quantization via AttributeQuantizationConfig class (needs to be implemented)
201
+
202
+ kernel_attr = fw_info.get_kernel_op_attributes(node_type)
203
+ assert len(kernel_attr) == 1
204
+ kernel_attr = kernel_attr[0]
205
+
185
206
  if mixed_precision_enable:
186
207
  for op_cfg in node_qc_options.quantization_config_list:
187
208
  candidate_nbits_qc = copy.deepcopy(qc)
188
- candidates.append(create_node_qc_candidate(candidate_nbits_qc,
189
- fw_info,
190
- weight_channel_axis,
191
- op_cfg))
209
+ candidates.append(_create_node_single_candidate_qc(candidate_nbits_qc,
210
+ fw_info,
211
+ weight_channel_axis,
212
+ op_cfg,
213
+ kernel_attr))
192
214
  # sorting the candidates by weights number of bits first and then by activation number of bits
193
215
  # (in reversed order)
194
216
  candidates.sort(key=lambda c: (c.weights_quantization_cfg.weights_n_bits,
195
217
  c.activation_quantization_cfg.activation_n_bits), reverse=True)
196
218
  else:
197
- candidates.append(create_node_qc_candidate(qc,
198
- fw_info,
199
- weight_channel_axis,
200
- node_qc_options.base_config))
219
+ candidates.append(_create_node_single_candidate_qc(qc,
220
+ fw_info,
221
+ weight_channel_axis,
222
+ node_qc_options.base_config,
223
+ kernel_attr))
201
224
 
202
225
  return candidates
@@ -20,7 +20,7 @@ import scipy
20
20
 
21
21
  from model_compression_toolkit.core import common
22
22
  from model_compression_toolkit.core.common import Graph, BaseNode
23
- from model_compression_toolkit.core.common.defaultdict import DefaultDict
23
+ from model_compression_toolkit.defaultdict import DefaultDict
24
24
  from model_compression_toolkit.core.common.framework_info import FrameworkInfo
25
25
  from model_compression_toolkit.core.common.quantization.quantization_config import QuantizationConfig
26
26
 
@@ -24,7 +24,7 @@ if version.parse(tf.__version__) >= version.parse("2.13"):
24
24
  else:
25
25
  from keras.layers import Conv2D, DepthwiseConv2D, Dense, Conv2DTranspose, Softmax, ELU
26
26
 
27
- from model_compression_toolkit.core.common.defaultdict import DefaultDict
27
+ from model_compression_toolkit.defaultdict import DefaultDict
28
28
  from model_compression_toolkit.core.common.framework_info import FrameworkInfo
29
29
  from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
30
30
  from model_compression_toolkit.constants import SOFTMAX_THRESHOLD
@@ -17,8 +17,8 @@ from torch.nn.functional import hardsigmoid, relu, relu6, softmax
17
17
  from torch.nn import Conv2d, ConvTranspose2d, Linear
18
18
  from torch import sigmoid
19
19
 
20
- from model_compression_toolkit.core.common.defaultdict import DefaultDict
21
- from model_compression_toolkit.core.common.framework_info import FrameworkInfo, ChannelAxis
20
+ from model_compression_toolkit.defaultdict import DefaultDict
21
+ from model_compression_toolkit.core.common.framework_info import FrameworkInfo
22
22
  from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
23
23
  from model_compression_toolkit.constants import SOFTMAX_THRESHOLD
24
24
  from model_compression_toolkit.core.pytorch.constants import KERNEL