mct-nightly 2.0.0.20240418.439__py3-none-any.whl → 2.0.0.20240419.358__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct_nightly-2.0.0.20240418.439.dist-info → mct_nightly-2.0.0.20240419.358.dist-info}/METADATA +1 -1
- {mct_nightly-2.0.0.20240418.439.dist-info → mct_nightly-2.0.0.20240419.358.dist-info}/RECORD +39 -39
- model_compression_toolkit/__init__.py +1 -1
- model_compression_toolkit/core/common/graph/base_graph.py +2 -2
- model_compression_toolkit/core/common/graph/base_node.py +25 -8
- model_compression_toolkit/core/common/graph/functional_node.py +18 -1
- model_compression_toolkit/core/common/network_editors/node_filters.py +4 -3
- model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -5
- model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +4 -3
- model_compression_toolkit/core/common/similarity_analyzer.py +2 -2
- model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +4 -1
- model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +7 -7
- model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +1 -1
- model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +1 -1
- model_compression_toolkit/core/keras/keras_implementation.py +10 -10
- model_compression_toolkit/core/keras/keras_node_prior_info.py +4 -4
- model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +4 -5
- model_compression_toolkit/core/keras/reader/common.py +2 -2
- model_compression_toolkit/core/keras/reader/node_builder.py +28 -9
- model_compression_toolkit/core/keras/tf_tensor_numpy.py +5 -2
- model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +34 -21
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +8 -8
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +2 -2
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +1 -1
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +4 -4
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +1 -1
- model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +8 -8
- model_compression_toolkit/core/pytorch/pytorch_implementation.py +4 -5
- model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +2 -2
- model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +5 -1
- model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +9 -2
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +1 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +20 -6
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +1 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +22 -8
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +1 -1
- {mct_nightly-2.0.0.20240418.439.dist-info → mct_nightly-2.0.0.20240419.358.dist-info}/LICENSE.md +0 -0
- {mct_nightly-2.0.0.20240418.439.dist-info → mct_nightly-2.0.0.20240419.358.dist-info}/WHEEL +0 -0
- {mct_nightly-2.0.0.20240418.439.dist-info → mct_nightly-2.0.0.20240419.358.dist-info}/top_level.txt +0 -0
{mct_nightly-2.0.0.20240418.439.dist-info → mct_nightly-2.0.0.20240419.358.dist-info}/RECORD
RENAMED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
model_compression_toolkit/__init__.py,sha256=
|
|
1
|
+
model_compression_toolkit/__init__.py,sha256=Z7k-QRaYZPcLBEUjLrvduEEioZo1wiTlhZ9VQyP7PFk,1573
|
|
2
2
|
model_compression_toolkit/constants.py,sha256=yIJyJ-e1WrDeKD9kG15qkqfYnoj7J1J2CxnJDt008ik,3756
|
|
3
3
|
model_compression_toolkit/defaultdict.py,sha256=LSc-sbZYXENMCw3U9F4GiXuv67IKpdn0Qm7Fr11jy-4,2277
|
|
4
4
|
model_compression_toolkit/logger.py,sha256=3DByV41XHRR3kLTJNbpaMmikL8icd9e1N-nkQAY9oDk,4567
|
|
@@ -17,7 +17,7 @@ model_compression_toolkit/core/common/model_builder_mode.py,sha256=jll9-59OPaE3u
|
|
|
17
17
|
model_compression_toolkit/core/common/model_collector.py,sha256=ofcepKtxc3j2Ouz6BpAKXTzPgjABnpRP47ndmJCXAkk,8352
|
|
18
18
|
model_compression_toolkit/core/common/model_validation.py,sha256=LaG8wd6aZl0OJgieE3SeiVDEPxtk8IHq9-3wSnmWhY4,1214
|
|
19
19
|
model_compression_toolkit/core/common/node_prior_info.py,sha256=WXX_PrGVG9M9I_REG5ZzFBohwmV4yf356sZnrja_FLo,2832
|
|
20
|
-
model_compression_toolkit/core/common/similarity_analyzer.py,sha256=
|
|
20
|
+
model_compression_toolkit/core/common/similarity_analyzer.py,sha256=FikcIqgQQpfiXr9VJvgl-wk8OyH7-LvC8ku7TkhJfJM,9200
|
|
21
21
|
model_compression_toolkit/core/common/user_info.py,sha256=dSRMnT-oewmdOziIpEuW-s9K7vTSeyUBxT4z9neXurI,1648
|
|
22
22
|
model_compression_toolkit/core/common/back2framework/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
23
23
|
model_compression_toolkit/core/common/back2framework/base_model_builder.py,sha256=V1oShKzbSkdcTvREn8VnQQBzvm-tTHkWMXqMkYozF2s,2023
|
|
@@ -30,10 +30,10 @@ model_compression_toolkit/core/common/collectors/statistics_collector.py,sha256=
|
|
|
30
30
|
model_compression_toolkit/core/common/fusion/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
|
|
31
31
|
model_compression_toolkit/core/common/fusion/layer_fusing.py,sha256=lOubqpc18TslhXZijWUJQAa1c3jIB2S-M-5HK78wJPQ,5548
|
|
32
32
|
model_compression_toolkit/core/common/graph/__init__.py,sha256=Xr-Lt_qXMdrCnnOaUS_OJP_3iTTGfPCLf8_vSrQgCs0,773
|
|
33
|
-
model_compression_toolkit/core/common/graph/base_graph.py,sha256=
|
|
34
|
-
model_compression_toolkit/core/common/graph/base_node.py,sha256=
|
|
33
|
+
model_compression_toolkit/core/common/graph/base_graph.py,sha256=VKw50YwgGfyA7sdtEeU9qaSDnN31p4fHX1JVx7wNF9c,38265
|
|
34
|
+
model_compression_toolkit/core/common/graph/base_node.py,sha256=IaHrvgtUZvPshhUjS4j2lqIP22Mlny82BQZaL2iVJew,29079
|
|
35
35
|
model_compression_toolkit/core/common/graph/edge.py,sha256=buoSEUZwilWBK3WeBKpJ-GeDaUA1SDdOHxDpxU_bGpk,3784
|
|
36
|
-
model_compression_toolkit/core/common/graph/functional_node.py,sha256=
|
|
36
|
+
model_compression_toolkit/core/common/graph/functional_node.py,sha256=71_4TrCdqR_r0mtgxmAyqI05iP5YoQQGeSmDgynuzTw,3902
|
|
37
37
|
model_compression_toolkit/core/common/graph/graph_matchers.py,sha256=CrDoHYq4iPaflgJWmoJ1K4ziLrRogJvFTVWg8P0UcDU,4744
|
|
38
38
|
model_compression_toolkit/core/common/graph/graph_searches.py,sha256=2oKuW6L8hP-oL0lFO9PhQFt9fEFgVJwpc1u4fHExAtE,5128
|
|
39
39
|
model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py,sha256=3el-A7j1oyoo1_9zq3faQp7IeRsFXFCvnrb3zZFXpU0,9803
|
|
@@ -78,7 +78,7 @@ model_compression_toolkit/core/common/mixed_precision/search_methods/linear_prog
|
|
|
78
78
|
model_compression_toolkit/core/common/network_editors/__init__.py,sha256=vZmu55bYqiaOQs3AjfwWDXHmuKZcLHt-wm7uR5fPEqg,1307
|
|
79
79
|
model_compression_toolkit/core/common/network_editors/actions.py,sha256=nid0_j-Cn10xvmztT8yCKW_6uA7JEnom9SW9syx7wc0,19594
|
|
80
80
|
model_compression_toolkit/core/common/network_editors/edit_network.py,sha256=dfgawi-nB0ocAJ0xcGn9E-Zv203oUnQLuMiXpX8vTgA,1748
|
|
81
|
-
model_compression_toolkit/core/common/network_editors/node_filters.py,sha256=
|
|
81
|
+
model_compression_toolkit/core/common/network_editors/node_filters.py,sha256=Pc_MCohCIbibIKI8Sz8RuQjEAHn-vRZMpuWCCliMqFk,3236
|
|
82
82
|
model_compression_toolkit/core/common/pruning/__init__.py,sha256=DGJybkDQtKMSMFoZ-nZ3ZifA8uJ6G_D20wHhKHNlmU0,699
|
|
83
83
|
model_compression_toolkit/core/common/pruning/channels_grouping.py,sha256=4jsr1xEBNpV2c4ipi366IfHoHCJVqoRUTTOJdlRomvc,3892
|
|
84
84
|
model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py,sha256=cKcSkrQkTFm6Ns7Uq9IXWEMDPQfr9Ew9RvZXmr4p4cM,7928
|
|
@@ -101,7 +101,7 @@ model_compression_toolkit/core/common/quantization/candidate_node_quantization_c
|
|
|
101
101
|
model_compression_toolkit/core/common/quantization/core_config.py,sha256=KYdyfSmjSL4ye24nKlC_c4_AxYb14qoqaeMnZj4-8kE,2257
|
|
102
102
|
model_compression_toolkit/core/common/quantization/debug_config.py,sha256=HtkMmneN-EmAzgZK4Vp4M8Sqm5QKdrvNyyZMpaVqYzY,1482
|
|
103
103
|
model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py,sha256=fwF4VILaX-u3ZaFd81xjbJuhg8Ef-JX_KfMXW0TPV-I,7136
|
|
104
|
-
model_compression_toolkit/core/common/quantization/node_quantization_config.py,sha256=
|
|
104
|
+
model_compression_toolkit/core/common/quantization/node_quantization_config.py,sha256=q5e1XRIZc2APtdHRW_8bucoFqxaoc0srKWMUgYJ6dAM,26723
|
|
105
105
|
model_compression_toolkit/core/common/quantization/quantization_config.py,sha256=Y76BZ-X2vE_PXeM9r7D93VsFnbC_evoHhN7zYuvFdzw,7041
|
|
106
106
|
model_compression_toolkit/core/common/quantization/quantization_fn_selection.py,sha256=T1nVWdRJfBQ_iuMQYQSIkjfkR-2n3lAOKGAz_rUZZN0,2190
|
|
107
107
|
model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py,sha256=MwIOBZ4BlZSTIOG75PDvlI3JmZ6t8YjPc1VP9Adei60,3847
|
|
@@ -110,7 +110,7 @@ model_compression_toolkit/core/common/quantization/quantize_node.py,sha256=cdzGN
|
|
|
110
110
|
model_compression_toolkit/core/common/quantization/set_node_quantization_config.py,sha256=O4qFJw3nBYUD4cGbO8haGXZ2-piSqoRpDKDD74iXSxw,12417
|
|
111
111
|
model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py,sha256=eCDGwsWYLU6z7qbEVb4TozMW_nd5VEP_iCJ6PcvyEPw,1486
|
|
112
112
|
model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py,sha256=4x6rgQ5bCz2kysVkjBXxbb2dNEC9N1S2TE46kOFXU_c,23305
|
|
113
|
-
model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py,sha256=
|
|
113
|
+
model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py,sha256=FWyOcjENAK-bFPpVjgczDiGAWZi--OgJ60jZjPUPqzo,8059
|
|
114
114
|
model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py,sha256=9gnfJV89jpGwAx8ImJ5E9NjCv3lDtbyulP4OtgWb62M,1772
|
|
115
115
|
model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py,sha256=ejc_obamUndJsv3F1FuOGMrIibS__qDUbAia1H9vwUM,9487
|
|
116
116
|
model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py,sha256=noEdvGiyyW7acgQ2OFWLedCODibTGYJifC9qo8YIU5U,4558
|
|
@@ -150,33 +150,33 @@ model_compression_toolkit/core/keras/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7V
|
|
|
150
150
|
model_compression_toolkit/core/keras/constants.py,sha256=Uv3c0UdW55pIVQNW_1HQlgl-dHXREkltOLyzp8G1mTQ,3163
|
|
151
151
|
model_compression_toolkit/core/keras/custom_layer_validation.py,sha256=f-b14wuiIgitBe7d0MmofYhDCTO3IhwJgwrh-Hq_t_U,1192
|
|
152
152
|
model_compression_toolkit/core/keras/default_framework_info.py,sha256=Ha4HTHuiw_KTS5Po1Xnv6GyK9eprpDhYWf-eooS62Ys,4961
|
|
153
|
-
model_compression_toolkit/core/keras/keras_implementation.py,sha256=
|
|
153
|
+
model_compression_toolkit/core/keras/keras_implementation.py,sha256=04vZfpSzDq8jwUlsT88FXp4ngTnTIxKVdIxiYWAwmbg,29673
|
|
154
154
|
model_compression_toolkit/core/keras/keras_model_validation.py,sha256=1wNV2clFdC9BzIELRLSO2uKf0xqjLqlkTJudwtCeaJk,1722
|
|
155
|
-
model_compression_toolkit/core/keras/keras_node_prior_info.py,sha256=
|
|
155
|
+
model_compression_toolkit/core/keras/keras_node_prior_info.py,sha256=TIqazcbeoMxZgneQ-i24myCPyBpy_RJ7GcDxxWHbb4U,3907
|
|
156
156
|
model_compression_toolkit/core/keras/resource_utilization_data_facade.py,sha256=Xmk2ZL5CaYdb7iG62HdtZ1F64vap7ffnrsuR3e3G5hc,4851
|
|
157
|
-
model_compression_toolkit/core/keras/tf_tensor_numpy.py,sha256=
|
|
157
|
+
model_compression_toolkit/core/keras/tf_tensor_numpy.py,sha256=moMFP7hOagAbd09bWa2AX5uxN7XOJqLB-mV3mA30kLQ,2651
|
|
158
158
|
model_compression_toolkit/core/keras/back2framework/__init__.py,sha256=rhIiXg_nBgUZ-baE3M6SzCuQbcnq4iebY1jtJBvKHOM,808
|
|
159
159
|
model_compression_toolkit/core/keras/back2framework/factory_model_builder.py,sha256=urpfyHvIzD08QzPBWusVBT_dKZ8ZUf1I1zIQNb4qe5Y,2233
|
|
160
160
|
model_compression_toolkit/core/keras/back2framework/float_model_builder.py,sha256=9SFHhX-JnkB8PvYIIHRYlReBDI_RkZY9LditzW_ElLk,2444
|
|
161
161
|
model_compression_toolkit/core/keras/back2framework/instance_builder.py,sha256=fBj13c6zkVoWX4JJG18_uXPptiEJqXClE_zFbaFB6Q8,4517
|
|
162
|
-
model_compression_toolkit/core/keras/back2framework/keras_model_builder.py,sha256=
|
|
162
|
+
model_compression_toolkit/core/keras/back2framework/keras_model_builder.py,sha256=7xEasszjyqbkv5UJuBfEr2kxRPtcrdNP26ivELoIVI4,16227
|
|
163
163
|
model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py,sha256=psJzQJ_Hv_wyHuXZotN5dopwoKIu87-xnaILfINWXf0,15567
|
|
164
164
|
model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py,sha256=5wFb4nx_F0Wu4c8pLf6n6OzxOHtpOJ6_3mQsNSXIudU,2481
|
|
165
165
|
model_compression_toolkit/core/keras/graph_substitutions/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7VclGVOQ63x2p6mAAuba4,698
|
|
166
166
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7VclGVOQ63x2p6mAAuba4,698
|
|
167
167
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py,sha256=3FVqVfEeEve8vRejmMex079HZDnoRsF21kNY53ZjIM8,5140
|
|
168
|
-
model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py,sha256=
|
|
168
|
+
model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py,sha256=Oe2QNsU49rp6qW1jUuOG_fKrE3DqXXDmVWN0fiyWo4c,8207
|
|
169
169
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py,sha256=GR1a3mCZpNUu4WxixJXF_aSm57phAdxaRoHecNx3hxw,3168
|
|
170
170
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py,sha256=5df_xGfXkqNub4xVRnCWQvSohWqdv12axjJ6edVU2H0,2478
|
|
171
171
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py,sha256=Hl4LEQ_bw_Vpmf3ZqHujYUqVdvTNsPlEMvr9dZhwg2U,2806
|
|
172
172
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py,sha256=R3U7cjc2E0zheMem16GHygp5jZFGSaomkNOTxTjcAgw,5794
|
|
173
173
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py,sha256=V6hp67CkS_A3WqdsjLjs0ETtdZAOo4P9mhy4aT7W5FE,5940
|
|
174
|
-
model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py,sha256=
|
|
174
|
+
model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py,sha256=CjBbw20BcD9NFBftc9d28TYLmBI9wUzjfZ33c22hgHU,8157
|
|
175
175
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py,sha256=kjwlKtm5yhNgWVVcW6mN-hn7enwAnn_8-TUZvxZBiQs,4112
|
|
176
176
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py,sha256=l9PUREBf4aRwWILiybdteveeUbh7js-i-hLt8Ma0e4c,26771
|
|
177
177
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py,sha256=IdKOg6AWZWMcmDbOuNdxetS5_zTarXIIffdYL7JTdvk,3872
|
|
178
178
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py,sha256=z2J2Xk7b_w_fEgJmK87lwwBmEoAZpGxPmsBrR24IkZs,2035
|
|
179
|
-
model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py,sha256=
|
|
179
|
+
model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py,sha256=MdRl2CLF_EsjHcpqa_kzgD7gKZI1kM4E1qDLarCH9Cw,3188
|
|
180
180
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py,sha256=ryes9y1ie-vjBGso2TeO4EXxVk69Ew3iSAhshPz1Ou4,5542
|
|
181
181
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py,sha256=TEaHlIbXj_ZjIdT5TmAICD3WLD3u_7g0fLWQcNzTJuM,7941
|
|
182
182
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py,sha256=JCK--hQMKzbx4MOQZBPZqK015JWZELUO5YdA30IU4bI,11149
|
|
@@ -191,15 +191,15 @@ model_compression_toolkit/core/keras/mixed_precision/__init__.py,sha256=sw7LOPN1
|
|
|
191
191
|
model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py,sha256=aW8wR13fK6P6xzbU9XGU60IO1yYzXSo_Hk4qeq486kg,5137
|
|
192
192
|
model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py,sha256=Ziydik2j-LvNBXP3TSfUD6rEezPAikzQGib0_IXkmGM,6729
|
|
193
193
|
model_compression_toolkit/core/keras/pruning/__init__.py,sha256=3Lkr37Exk9u8811hw8hVqkGcbTQGcLjd3LLuLC3fa_E,698
|
|
194
|
-
model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py,sha256=
|
|
194
|
+
model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py,sha256=EJkblZ4OAjI5l29GKsUraam5Jn58Sogld47_rFFyr3k,12777
|
|
195
195
|
model_compression_toolkit/core/keras/quantizer/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7VclGVOQ63x2p6mAAuba4,698
|
|
196
196
|
model_compression_toolkit/core/keras/quantizer/base_quantizer.py,sha256=eMRjAUU189-AVwNGMlV0M-ZlL48ZYmILzutheUT00xU,1628
|
|
197
197
|
model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py,sha256=ymBSCN7IeJnS305fpVO866WJxW456yV8cZAD4kMD308,6274
|
|
198
198
|
model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py,sha256=Up3-sbuAcaJ6kfe7Sz3XN6iiJ9hlxzOMncLCFEXJFjk,4475
|
|
199
199
|
model_compression_toolkit/core/keras/reader/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7VclGVOQ63x2p6mAAuba4,698
|
|
200
|
-
model_compression_toolkit/core/keras/reader/common.py,sha256=
|
|
200
|
+
model_compression_toolkit/core/keras/reader/common.py,sha256=lVy_dRfIfx4mrijdh8HO9HAx4Jq7QyLY9VGB6dxwDx8,2518
|
|
201
201
|
model_compression_toolkit/core/keras/reader/connectivity_handler.py,sha256=AgF6qXZOJMeXvc-pBnGY23BJz7wPBx2aTYxHiO8efec,11303
|
|
202
|
-
model_compression_toolkit/core/keras/reader/node_builder.py,sha256=
|
|
202
|
+
model_compression_toolkit/core/keras/reader/node_builder.py,sha256=URmE3lM9CskS-9a3TuqfReLdHh36Dti08RL8qxzrBjc,10471
|
|
203
203
|
model_compression_toolkit/core/keras/reader/reader.py,sha256=wS9UQ2wJKnkZYe9JHwQp7ygDr6CRlzrxmIyLDv1Qz6U,8109
|
|
204
204
|
model_compression_toolkit/core/keras/reader/nested_model/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7VclGVOQ63x2p6mAAuba4,698
|
|
205
205
|
model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py,sha256=K6KAH9o8KSG6baLmhKoCrYK-i-wb6gRKiZmoijFqEYA,7906
|
|
@@ -213,8 +213,8 @@ model_compression_toolkit/core/pytorch/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKW
|
|
|
213
213
|
model_compression_toolkit/core/pytorch/constants.py,sha256=NI-J7REuxn06oEIHsmJ4GqtNC3TbV8xlkJjt5Ar-c4U,2626
|
|
214
214
|
model_compression_toolkit/core/pytorch/default_framework_info.py,sha256=r1XyzUFvrjGcJHQM5ETLsMZIG2yHCr9HMjqf0ti9inw,4175
|
|
215
215
|
model_compression_toolkit/core/pytorch/pytorch_device_config.py,sha256=IoMvTch5awAEPvB6Tg6ANhFGXvfSgv7JLsUBlxpMwk4,4330
|
|
216
|
-
model_compression_toolkit/core/pytorch/pytorch_implementation.py,sha256=
|
|
217
|
-
model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py,sha256=
|
|
216
|
+
model_compression_toolkit/core/pytorch/pytorch_implementation.py,sha256=sEtlxpWdt0rzuTN3R0bNCC_l75Xy7rIBMUWY7LuhYKI,27351
|
|
217
|
+
model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py,sha256=2LDQ7qupglHQ7o1Am7LWdfYVacfQnl-aW2N6l9det1w,3264
|
|
218
218
|
model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py,sha256=E6ifk1HdO60k4IRH2EFBzAYWtwUlrGqJoQ66nknpHoQ,4983
|
|
219
219
|
model_compression_toolkit/core/pytorch/utils.py,sha256=dRPiteBg2dBNsHwZyYzXiCIAjnelSoeZZsDXlsTw5JQ,2880
|
|
220
220
|
model_compression_toolkit/core/pytorch/back2framework/__init__.py,sha256=H_WixgN0elVWf3exgGYsi58imPoYDj5eYPeh6x4yfug,813
|
|
@@ -222,27 +222,27 @@ model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py,s
|
|
|
222
222
|
model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py,sha256=tLrlUyYhxVKVjkad1ZAtbRra0HedB3iVfIkZ_dYnQ-4,3419
|
|
223
223
|
model_compression_toolkit/core/pytorch/back2framework/instance_builder.py,sha256=BxX0e6kKoV29UNT-Yee28f7rXID5_KBz1RiSDYo2Vjk,1848
|
|
224
224
|
model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py,sha256=D7lU1r9Uq_7fdNuKk2BMF8ho5GrsY-8gyGN6yYoHaVg,15060
|
|
225
|
-
model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py,sha256=
|
|
225
|
+
model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py,sha256=i0U4EQfaTbbBHMrv9oJHJ5ltybfGTGz17KETnNpci3k,18299
|
|
226
226
|
model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py,sha256=qZNNOlNTTV4ZKPG3q5GDXkIVTPUEr8dvxAS_YiMORmg,3456
|
|
227
227
|
model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
228
228
|
model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py,sha256=q2JDw10NKng50ee2i9faGzWZ-IydnR2aOMGSn9RoZmc,5773
|
|
229
229
|
model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py,sha256=F2hH2nbFQHtuS8CcG2GmNYfJ9gdrpHccnijHsX_CYgM,1640
|
|
230
230
|
model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
|
|
231
231
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
|
|
232
|
-
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py,sha256=
|
|
232
|
+
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py,sha256=tWlGdjQxkcIokoIIhYhzAFniyJWtw6bVlSjxAFjZyww,8360
|
|
233
233
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py,sha256=B7aC2TZNrQJ2oQVGBFhKAVqdUU5lYVJSMmwKhjxOHWk,2822
|
|
234
234
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py,sha256=JDWOaNwYrZG0zTwd3HwoZUM3tKu7zPbzLOrqNQsu8xA,2162
|
|
235
235
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py,sha256=SBrR24ZAnWPftLinv4FuIqdBGjfYtfXbYQJN5mgy5V4,2861
|
|
236
|
-
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py,sha256=
|
|
236
|
+
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py,sha256=Fs2YQBD4KJV-pGLOMqm-p485bfq2JDYgCzFroRljCoM,3933
|
|
237
237
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py,sha256=iX8bLHtw2osP42-peNLTRmbpX3cUxdGsAbEfw7NLpx0,3935
|
|
238
238
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py,sha256=zKSgtVw_P9fUvdq4e7P9yaLDPG_vZ0cecM9sVPtm1ns,3799
|
|
239
|
-
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py,sha256=
|
|
239
|
+
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py,sha256=8KtuyqKgu7VTk6zffTmIteZ5Eq90ffPXgYWqca0NZj0,5829
|
|
240
240
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py,sha256=VNg-VgzCxSyqy2J3neEPl6U0SPO8UIVU_T47bGhz4FE,38459
|
|
241
241
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/permute_call_method.py,sha256=EMCviyFyJFLEKuAUz3rZHLfB9MAU1kywSBL2XQNzLlg,1953
|
|
242
|
-
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py,sha256=
|
|
242
|
+
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py,sha256=q1a3HieQtaOmWG2WGXp6GHYAvxa3CZ9dJUx9dqMAsS8,5695
|
|
243
243
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py,sha256=joHjwiUxccypMHkTy46rI91VyapLn9yJ2YRo5ISnOH4,1987
|
|
244
244
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py,sha256=jOqlelGhADEZiYUEyYj9oJZ5YLXx8jWNUlVTG6Td79Y,4919
|
|
245
|
-
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py,sha256=
|
|
245
|
+
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py,sha256=flHJU-hSvih3bi8c24SYArqN00UnOVImu8HZBZAzzVo,2909
|
|
246
246
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py,sha256=XFtU9yuBmoZlX0f0mS6otMPWMk-RcWs94XdvvTNhW8Y,3303
|
|
247
247
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py,sha256=lOPl5zDU3FoR9WmlxO04Pfi65MimK0gmnuHzQJodQdY,10668
|
|
248
248
|
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py,sha256=05lV4pIL3hJkZl4JQPV4wk_EFD0eYLG5b8cdzvZk4P8,1588
|
|
@@ -256,7 +256,7 @@ model_compression_toolkit/core/pytorch/mixed_precision/__init__.py,sha256=Rf1RcY
|
|
|
256
256
|
model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py,sha256=-6oep2WJ85-JmIxZa-e2AmBpbORoKe4Xdduz2ZidwvM,4871
|
|
257
257
|
model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py,sha256=KVZTKCYzJqqzF5nFEiuGMv_sNeVuBTxhmxWMFacKOxE,6337
|
|
258
258
|
model_compression_toolkit/core/pytorch/pruning/__init__.py,sha256=RAe8mgIr1V8dRIQtLf_dSG5zTUCKuQzxyybYx1dzEAs,697
|
|
259
|
-
model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py,sha256=
|
|
259
|
+
model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py,sha256=VfEEVwWEXKpVlZFnr7N6mvEjcpq85ROLg05ZvXfD1Pg,14764
|
|
260
260
|
model_compression_toolkit/core/pytorch/quantizer/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
|
|
261
261
|
model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py,sha256=rox-f5wbRyxU1UHeHyaoIDXB9r9fCXm1dPN4FVwHqTc,6464
|
|
262
262
|
model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py,sha256=uyeBtNokyDUikk-YkDP_mN_2DX0J5oPm3kSfdSUT2Ck,4420
|
|
@@ -324,12 +324,12 @@ model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quant
|
|
|
324
324
|
model_compression_toolkit/exporter/model_wrapper/keras/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
325
325
|
model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py,sha256=YffgbVYJG5LKeIsW84Pi7NqzQcvJMeQRnAKQCCmIL6c,3776
|
|
326
326
|
model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
327
|
-
model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py,sha256=
|
|
327
|
+
model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py,sha256=T2wgd7b86cpA5Ffq5eVCb8YlmnJ7vDxtmFeRkZtpLZc,5422
|
|
328
328
|
model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py,sha256=uL6tJWC4s2IWUy8GJVwtMWpwZZioRRztfKyPJHo14xI,9442
|
|
329
329
|
model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
|
|
330
330
|
model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py,sha256=uTQcnzvP44CgPO0twsUdiMmTBE_Td6ZdQtz5U0GZuPI,3464
|
|
331
331
|
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
332
|
-
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py,sha256=
|
|
332
|
+
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py,sha256=YT9IVdpKaJbAW3msYRoQNIgqRSEVwSarRy6qlWCrBfk,5389
|
|
333
333
|
model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py,sha256=4sN5z-6BXrTE5Dp2FX_jKO9ty5iZ2r4RM7XvXtDVLSI,9348
|
|
334
334
|
model_compression_toolkit/gptq/__init__.py,sha256=YKg-tMj9D4Yd0xW9VRD5EN1J5JrmlRbNEF2fOSgodqA,1228
|
|
335
335
|
model_compression_toolkit/gptq/runner.py,sha256=PQoLK3WhdRuUwZMd1VbtA7KZ9c-zWig_0ShmTtvJSHY,5970
|
|
@@ -438,7 +438,7 @@ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/
|
|
|
438
438
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py,sha256=bPBWxopMUHFgiaJjaAfoompwShvfH2wHAouN56PQn0A,6484
|
|
439
439
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py,sha256=iCBfBmIRozoeGVPC3MjZpVyp-Nx4fC94_PKILC82K-Y,5731
|
|
440
440
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py,sha256=vFDyiMymNZSRCdTgAyWn4A-tZD3vzze_PTLBSF2OYe8,721
|
|
441
|
-
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py,sha256=
|
|
441
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py,sha256=InFx2Uh6dG30U9YIvVH_1jb34r1wJw7ms3J295lRKvM,10619
|
|
442
442
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py,sha256=bU74t-ZIkIptXuNaPI_YIC5w9TX6nDgJUpJwxHAPOSI,6493
|
|
443
443
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py,sha256=09fbd5vEnSQDWfCkMRtYZYy7kIYiWkXDcH_dT1cAmoY,5739
|
|
444
444
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py,sha256=NUuczImqUxzdfflqSdqkeAN8aCU6Tuiu6U0Fnj9Tzmw,721
|
|
@@ -446,12 +446,12 @@ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_
|
|
|
446
446
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py,sha256=NkAGCZbSgXYeRAiJRzt19h2cxkrVQJaHu8-2jHZLOYg,6505
|
|
447
447
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py,sha256=X853xDEF-3rcPoqxbrlYN28vvW3buSdM36c_eN_LKx8,5758
|
|
448
448
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py,sha256=vKWAoQ2KkhuptS5HZB50zHG6KY8wHpHTxPugw_nGCRo,717
|
|
449
|
-
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py,sha256=
|
|
450
|
-
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py,sha256=
|
|
449
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py,sha256=9LWG0GjFdtxdCbng8P6lCCqCKt8ou0Jb3VuH62a2HUQ,12037
|
|
450
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py,sha256=U5lYwk6vJkRt5fo5v_1_h5POTwf9zfia1XQ_cDoOZAI,6587
|
|
451
451
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py,sha256=jAyTXhcChO124odtWC3bYKRH4ZyqLPkKQluJFOoyPIM,5726
|
|
452
452
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py,sha256=wUk4Xsg7jpxOWYjq2K3WUwLcI185p_sVPK-ttG0ydhA,721
|
|
453
|
-
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py,sha256=
|
|
454
|
-
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py,sha256=
|
|
453
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py,sha256=GBKLH6RfwSVIyvtFZ83BkbJOYu4MBcg5-n5_9MsE9TU,11770
|
|
454
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py,sha256=6PVKQKGpJpM2B1qvmf6fID_-MACaSQZkaL_9J_fj2SQ,6595
|
|
455
455
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py,sha256=dFQjzFlLDwoUqKNP1at1fS1N1WJadSSasRyzHl6vaB8,5733
|
|
456
456
|
model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
|
|
457
457
|
model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py,sha256=lnhJcwvTF0t7ybeiTleIS1p0aD8xzFZxVPx4ISk5uWQ,2090
|
|
@@ -483,8 +483,8 @@ model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py,sha
|
|
|
483
483
|
model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py,sha256=MVwXNymmFRB2NXIBx4e2mdJ1RfoHxRPYRgjb1MQP5kY,1797
|
|
484
484
|
model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
|
|
485
485
|
model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py,sha256=7bbzqJN8ZAycVDvZr_5xC-niTAR5df8f03Kooev_pfg,3047
|
|
486
|
-
mct_nightly-2.0.0.
|
|
487
|
-
mct_nightly-2.0.0.
|
|
488
|
-
mct_nightly-2.0.0.
|
|
489
|
-
mct_nightly-2.0.0.
|
|
490
|
-
mct_nightly-2.0.0.
|
|
486
|
+
mct_nightly-2.0.0.20240419.358.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
|
|
487
|
+
mct_nightly-2.0.0.20240419.358.dist-info/METADATA,sha256=uV2pmWXST6Ex8kXxTv99ak6eArTAmCbkX4zXWfJpVbY,18795
|
|
488
|
+
mct_nightly-2.0.0.20240419.358.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
489
|
+
mct_nightly-2.0.0.20240419.358.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
|
|
490
|
+
mct_nightly-2.0.0.20240419.358.dist-info/RECORD,,
|
|
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
|
|
|
27
27
|
from model_compression_toolkit import pruning
|
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
|
29
29
|
|
|
30
|
-
__version__ = "2.0.0.
|
|
30
|
+
__version__ = "2.0.0.20240419.000358"
|
|
@@ -98,8 +98,8 @@ class Graph(nx.MultiDiGraph, GraphSearches):
|
|
|
98
98
|
tpc_layers = tpc.op_sets_to_layers.get_layers()
|
|
99
99
|
tpc_filtered_layers = [layer for layer in tpc_layers if isinstance(layer, LayerFilterParams)]
|
|
100
100
|
for n in self.nodes:
|
|
101
|
-
is_node_in_tpc = n.
|
|
102
|
-
|
|
101
|
+
is_node_in_tpc = any([n.is_match_type(_type) for _type in tpc_layers]) or \
|
|
102
|
+
any([n.is_match_filter_params(filtered_layer) for filtered_layer in tpc_filtered_layers])
|
|
103
103
|
if n.is_custom:
|
|
104
104
|
if not is_node_in_tpc:
|
|
105
105
|
Logger.critical(f'MCT does not support optimizing Keras custom layers. Found a layer of type {n.type}. '
|
|
@@ -151,7 +151,21 @@ class BaseNode:
|
|
|
151
151
|
"""
|
|
152
152
|
return self.reuse or self.reuse_group is not None
|
|
153
153
|
|
|
154
|
-
def
|
|
154
|
+
def _get_weight_name(self, name: Union[str, int]) -> List[Union[str, int]]:
|
|
155
|
+
"""
|
|
156
|
+
Get weight names that match argument name (either string weights or integer for
|
|
157
|
+
positional weights).
|
|
158
|
+
Args:
|
|
159
|
+
name: weight name
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
A list of weight names that match input "name"
|
|
163
|
+
|
|
164
|
+
"""
|
|
165
|
+
return [k for k in self.weights.keys()
|
|
166
|
+
if (isinstance(k, int) and name == k) or (isinstance(k, str) and name in k)]
|
|
167
|
+
|
|
168
|
+
def get_weights_by_keys(self, name: Union[str, int]) -> np.ndarray:
|
|
155
169
|
"""
|
|
156
170
|
Get a node's weight by its name.
|
|
157
171
|
Args:
|
|
@@ -163,7 +177,7 @@ class BaseNode:
|
|
|
163
177
|
if name is None:
|
|
164
178
|
return None
|
|
165
179
|
|
|
166
|
-
res =
|
|
180
|
+
res = self._get_weight_name(name)
|
|
167
181
|
if len(res) == 1: # Make sure there are no duplicates
|
|
168
182
|
return self.weights[res[0]]
|
|
169
183
|
else:
|
|
@@ -179,7 +193,7 @@ class BaseNode:
|
|
|
179
193
|
|
|
180
194
|
"""
|
|
181
195
|
|
|
182
|
-
res =
|
|
196
|
+
res = self._get_weight_name(name)
|
|
183
197
|
if len(res) == 1:
|
|
184
198
|
self.weights[res[0]] = tensor
|
|
185
199
|
else: # Add if not exist
|
|
@@ -552,14 +566,17 @@ class BaseNode:
|
|
|
552
566
|
for fl, qco in tpc.filterlayer2qco.items():
|
|
553
567
|
if self.is_match_filter_params(fl):
|
|
554
568
|
return qco
|
|
555
|
-
|
|
556
|
-
|
|
569
|
+
# Extract qco with is_match_type to overcome mismatch of function types in TF 2.15
|
|
570
|
+
matching_qcos = [_qco for _type, _qco in tpc.layer2qco.items() if self.is_match_type(_type)]
|
|
571
|
+
if matching_qcos:
|
|
572
|
+
if len(matching_qcos) > 1:
|
|
573
|
+
Logger.error('Found duplicate qco types!')
|
|
574
|
+
return matching_qcos[0]
|
|
557
575
|
return tpc.tp_model.default_qco
|
|
558
576
|
|
|
559
577
|
def is_match_type(self, _type: Type) -> bool:
|
|
560
578
|
"""
|
|
561
|
-
Check if input type matches the node type, either in instance type or in type name.
|
|
562
|
-
name string is required because of function types changes that occurred in TF 2.15.
|
|
579
|
+
Check if input type matches the node type, either in instance type or in type name.
|
|
563
580
|
|
|
564
581
|
Args:
|
|
565
582
|
_type: other node type
|
|
@@ -567,7 +584,7 @@ class BaseNode:
|
|
|
567
584
|
Whether _type matches the self node type
|
|
568
585
|
|
|
569
586
|
"""
|
|
570
|
-
return _type == self.type
|
|
587
|
+
return _type == self.type
|
|
571
588
|
|
|
572
589
|
def is_match_filter_params(self, layer_filter_params: LayerFilterParams) -> bool:
|
|
573
590
|
"""
|
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
from typing import Dict, Any, Tuple,
|
|
1
|
+
from typing import Dict, Any, Tuple, Type
|
|
2
2
|
|
|
3
|
+
from model_compression_toolkit.constants import FOUND_TF
|
|
3
4
|
from model_compression_toolkit.core.common.graph.base_node import BaseNode
|
|
4
5
|
import numpy as np
|
|
5
6
|
|
|
@@ -71,3 +72,19 @@ class FunctionalNode(BaseNode):
|
|
|
71
72
|
:return: the node's functional_op
|
|
72
73
|
"""
|
|
73
74
|
return self.functional_op
|
|
75
|
+
|
|
76
|
+
def is_match_type(self, _type: Type) -> bool:
|
|
77
|
+
"""
|
|
78
|
+
Check if input type matches the node type, either in instance type or in type name. Checking the
|
|
79
|
+
name string is required because of function types changes that occurred in TF 2.15, because it
|
|
80
|
+
changes the "function" attribute object (e.g. a different tf.add function that will fail the
|
|
81
|
+
equal operation).
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
_type: other node type
|
|
85
|
+
Returns:
|
|
86
|
+
Whether _type matches the self node type
|
|
87
|
+
|
|
88
|
+
"""
|
|
89
|
+
names_match = _type.__name__ == self.type.__name__ if FOUND_TF else False
|
|
90
|
+
return super().is_match_type(_type) or names_match
|
|
@@ -15,6 +15,7 @@
|
|
|
15
15
|
|
|
16
16
|
from typing import Any
|
|
17
17
|
from model_compression_toolkit.core.common.matchers.node_matcher import BaseNodeMatcher
|
|
18
|
+
from model_compression_toolkit.core.common.graph.base_node import BaseNode
|
|
18
19
|
|
|
19
20
|
|
|
20
21
|
class NodeTypeFilter(BaseNodeMatcher):
|
|
@@ -30,7 +31,7 @@ class NodeTypeFilter(BaseNodeMatcher):
|
|
|
30
31
|
"""
|
|
31
32
|
self.node_type = node_type
|
|
32
33
|
|
|
33
|
-
def apply(self, input_object:
|
|
34
|
+
def apply(self, input_object: BaseNode) -> bool:
|
|
34
35
|
"""
|
|
35
36
|
Check if input_object is of the type that NodeTypeFilter contains.
|
|
36
37
|
|
|
@@ -38,9 +39,9 @@ class NodeTypeFilter(BaseNodeMatcher):
|
|
|
38
39
|
input_object: Node object to check for its type.
|
|
39
40
|
|
|
40
41
|
Returns:
|
|
41
|
-
True if the node
|
|
42
|
+
True if the node is of the type that was passed during the initialization of NodeTypeFilter.
|
|
42
43
|
"""
|
|
43
|
-
if input_object.
|
|
44
|
+
if input_object.is_match_type(self.node_type):
|
|
44
45
|
return True
|
|
45
46
|
|
|
46
47
|
|
|
@@ -265,8 +265,6 @@ class WeightsAttrQuantizationConfig:
|
|
|
265
265
|
self.enable_weights_quantization = weights_attr_cfg.enable_weights_quantization
|
|
266
266
|
self.l_p_value = qc.l_p_value
|
|
267
267
|
|
|
268
|
-
|
|
269
|
-
|
|
270
268
|
@property
|
|
271
269
|
def weights_error_method(self) -> QuantizationErrorMethod:
|
|
272
270
|
"""
|
|
@@ -412,9 +410,6 @@ class NodeWeightsQuantizationConfig(BaseNodeQuantizationConfig):
|
|
|
412
410
|
for attr in node_attrs_list:
|
|
413
411
|
if isinstance(attr, int):
|
|
414
412
|
# this is a positional attribute, so it needs to be handled separately.
|
|
415
|
-
# we assume that a positional attribute is quantized with the default configuration provided in the TPC.
|
|
416
|
-
if op_cfg.default_weight_attr_config.enable_weights_quantization:
|
|
417
|
-
Logger.critical(f"Quantizing constant weights is not supported.")
|
|
418
413
|
self.pos_attributes_config_mapping[attr] = WeightsAttrQuantizationConfig(qc=qc,
|
|
419
414
|
weights_attr_cfg=op_cfg.default_weight_attr_config,
|
|
420
415
|
weights_channels_axis=weights_channels_axis)
|
|
@@ -13,6 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ==============================================================================
|
|
15
15
|
|
|
16
|
+
from typing import Dict
|
|
16
17
|
import numpy as np
|
|
17
18
|
from sklearn.cluster import KMeans
|
|
18
19
|
|
|
@@ -38,10 +39,10 @@ def lut_kmeans_tensor(tensor_data: np.ndarray,
|
|
|
38
39
|
n_iter: int = 10,
|
|
39
40
|
min_threshold: float = MIN_THRESHOLD,
|
|
40
41
|
quant_error_method: qc.QuantizationErrorMethod = None,
|
|
41
|
-
is_symmetric=False,
|
|
42
|
+
is_symmetric: bool = False,
|
|
42
43
|
node=None,
|
|
43
44
|
hessian_info_service: HessianInfoService = None,
|
|
44
|
-
num_hessian_samples: int = NUM_QPARAM_HESSIAN_SAMPLES) ->
|
|
45
|
+
num_hessian_samples: int = NUM_QPARAM_HESSIAN_SAMPLES) -> Dict:
|
|
45
46
|
"""
|
|
46
47
|
The quantizer first finds the closest max value per channel of tensor_data.
|
|
47
48
|
Now, we divide tensor_data with the threshold vector per channel. In addition, we scale the result to the range
|
|
@@ -101,7 +102,7 @@ def lut_kmeans_histogram(bins: np.ndarray,
|
|
|
101
102
|
constrained: bool = True,
|
|
102
103
|
n_iter: int = 20,
|
|
103
104
|
min_threshold: float = MIN_THRESHOLD,
|
|
104
|
-
quant_error_method: qc.QuantizationErrorMethod = qc.QuantizationErrorMethod.MSE) ->
|
|
105
|
+
quant_error_method: qc.QuantizationErrorMethod = qc.QuantizationErrorMethod.MSE) -> Dict:
|
|
105
106
|
"""
|
|
106
107
|
Finds quantization cluster points for non-uniform activation quantization.
|
|
107
108
|
The quantizer first finds the closest power-of-two number to the max value of the given histogram,
|
|
@@ -235,7 +235,7 @@ def compute_kl_divergence(float_tensor: np.ndarray, fxp_tensor: np.ndarray, batc
|
|
|
235
235
|
axis: int = None) -> float:
|
|
236
236
|
"""
|
|
237
237
|
Compute the similarity between two tensor using KL-divergence.
|
|
238
|
-
The returned values is between 0
|
|
238
|
+
The returned values is between 0 and 1: the smaller returned value,
|
|
239
239
|
the greater similarity there is between the two tensors.
|
|
240
240
|
|
|
241
241
|
Args:
|
|
@@ -257,6 +257,6 @@ def compute_kl_divergence(float_tensor: np.ndarray, fxp_tensor: np.ndarray, batc
|
|
|
257
257
|
non_zero_fxp_tensor[non_zero_fxp_tensor == 0] = EPS
|
|
258
258
|
|
|
259
259
|
prob_distance = np.where(float_flat != 0, float_flat * np.log(float_flat / non_zero_fxp_tensor), 0)
|
|
260
|
-
# The sum is part of the KL-
|
|
260
|
+
# The sum is part of the KL-Divergence function.
|
|
261
261
|
# The mean is to aggregate the distance between each output probability vectors.
|
|
262
262
|
return np.mean(np.sum(prob_distance, axis=-1), axis=-1)
|
|
@@ -39,6 +39,7 @@ from model_compression_toolkit.core.common import BaseNode
|
|
|
39
39
|
from model_compression_toolkit.core.common.graph.edge import EDGE_SINK_INDEX
|
|
40
40
|
from model_compression_toolkit.core.keras.back2framework.instance_builder import OperationHandler
|
|
41
41
|
from model_compression_toolkit.core.keras.reader.connectivity_handler import OutTensor
|
|
42
|
+
from mct_quantizers import KerasQuantizationWrapper
|
|
42
43
|
|
|
43
44
|
# In tf2.3 fake quant node is implemented as TensorFlowOpLayer, while in tf2.4 as TFOpLambda.
|
|
44
45
|
FQ_NODE_OP_V2_3 = 'FakeQuantWithMinMaxVars'
|
|
@@ -270,7 +271,9 @@ class KerasModelBuilder(BaseModelBuilder):
|
|
|
270
271
|
out_tensors_of_n_float)
|
|
271
272
|
else:
|
|
272
273
|
input_tensors = [tensor for tensor_list in input_tensors for tensor in tensor_list] # flat list of lists
|
|
273
|
-
|
|
274
|
+
if not isinstance(op_func, KerasQuantizationWrapper):
|
|
275
|
+
# The KerasQuantizationWrapper will insert the quantized positional weights internally.
|
|
276
|
+
input_tensors = n.insert_positional_weights_to_input_list(input_tensors)
|
|
274
277
|
# Build a functional node using its args
|
|
275
278
|
if isinstance(n, FunctionalNode):
|
|
276
279
|
if n.inputs_as_list: # If the first argument should be a list of tensors:
|
|
@@ -70,9 +70,9 @@ def update_kernel_for_bn_folding_fn(conv_node: BaseNode,
|
|
|
70
70
|
Returns:
|
|
71
71
|
The modified convolution node's weight/kernel/
|
|
72
72
|
"""
|
|
73
|
-
if conv_node.
|
|
73
|
+
if conv_node.is_match_type(DepthwiseConv2D):
|
|
74
74
|
kernel = kernel * weights_scale.reshape((1, 1, kernel.shape[-2], kernel.shape[-1]))
|
|
75
|
-
elif conv_node.
|
|
75
|
+
elif conv_node.is_match_type(Conv2DTranspose):
|
|
76
76
|
kernel = kernel * weights_scale.reshape((1, 1, -1, 1))
|
|
77
77
|
else:
|
|
78
78
|
kernel = kernel * weights_scale.reshape((1, 1, 1, -1))
|
|
@@ -98,10 +98,10 @@ def update_weights_for_bn_forward_folding_fn(conv_node: BaseNode,
|
|
|
98
98
|
Returns:
|
|
99
99
|
The modified convolution node's weight/kernel/
|
|
100
100
|
"""
|
|
101
|
-
if conv_node.
|
|
101
|
+
if conv_node.is_match_type(DepthwiseConv2D):
|
|
102
102
|
bias_update = kernel * bias_factor.reshape((1, 1, -1, 1))
|
|
103
103
|
kernel = kernel * weights_scale.reshape((1, 1, -1, 1))
|
|
104
|
-
elif conv_node.
|
|
104
|
+
elif conv_node.is_match_type(Conv2DTranspose):
|
|
105
105
|
bias_update = (kernel * bias_factor.reshape((1, 1, 1, -1))).sum(3)
|
|
106
106
|
kernel = kernel * weights_scale.reshape((1, 1, 1, -1))
|
|
107
107
|
else:
|
|
@@ -133,7 +133,7 @@ def is_group_conv_fn(node: BaseNode) -> bool:
|
|
|
133
133
|
Returns:
|
|
134
134
|
True if the node is a group convolution, else False
|
|
135
135
|
"""
|
|
136
|
-
return (node.
|
|
136
|
+
return (node.is_match_type(Conv2D)) and node.framework_attr[GROUPS] > 1
|
|
137
137
|
|
|
138
138
|
|
|
139
139
|
def get_foldable_node_type_and_validity_fn(node: BaseNode) -> [bool, bool]:
|
|
@@ -147,8 +147,8 @@ def get_foldable_node_type_and_validity_fn(node: BaseNode) -> [bool, bool]:
|
|
|
147
147
|
is_bn: True if the node is a batch norm, else False
|
|
148
148
|
is_dw_valid: True if the node is a dw-convolution valid for folding or a batch-norm node, else False
|
|
149
149
|
"""
|
|
150
|
-
is_bn = node.
|
|
151
|
-
is_dw = node.
|
|
150
|
+
is_bn = node.is_match_type(BatchNormalization)
|
|
151
|
+
is_dw = node.is_match_type(DepthwiseConv2D)
|
|
152
152
|
is_dw_valid = is_dw and np.all(np.array(node.get_weights_by_keys(DEPTHWISE_KERNEL).shape[:2]) == 1)
|
|
153
153
|
return is_bn, is_dw_valid
|
|
154
154
|
|
|
@@ -58,7 +58,7 @@ def conv2d_collapsing_fn(first_node: BaseNode,
|
|
|
58
58
|
Returns:
|
|
59
59
|
The modified layer node's weights: kernel, bias
|
|
60
60
|
"""
|
|
61
|
-
if first_node.
|
|
61
|
+
if first_node.is_match_type(Conv2D) and second_node.is_match_type(Conv2D):
|
|
62
62
|
# Get nodes attributes
|
|
63
63
|
kernel1 = first_node.get_weights_by_keys(kernel_str)
|
|
64
64
|
kernel2 = second_node.get_weights_by_keys(kernel_str)
|
model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py
CHANGED
|
@@ -49,7 +49,7 @@ def residual_collapsing_fn(first_node: BaseNode,
|
|
|
49
49
|
Returns:
|
|
50
50
|
The modified layer node's weights: kernel
|
|
51
51
|
"""
|
|
52
|
-
if first_node.
|
|
52
|
+
if first_node.is_match_type(Conv2D):
|
|
53
53
|
# Get nodes attributes
|
|
54
54
|
kernel = first_node.get_weights_by_keys(kernel_str)
|
|
55
55
|
(kH, kW, Cin, Cout) = kernel.shape
|