mct-nightly 1.8.0.8042023.post345__py3-none-any.whl → 1.8.0.8052023.post414__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (288) hide show
  1. {mct_nightly-1.8.0.8042023.post345.dist-info → mct_nightly-1.8.0.8052023.post414.dist-info}/METADATA +4 -3
  2. {mct_nightly-1.8.0.8042023.post345.dist-info → mct_nightly-1.8.0.8052023.post414.dist-info}/RECORD +285 -277
  3. model_compression_toolkit/__init__.py +9 -32
  4. model_compression_toolkit/{core/common/constants.py → constants.py} +2 -6
  5. model_compression_toolkit/core/__init__.py +14 -0
  6. model_compression_toolkit/core/analyzer.py +3 -2
  7. model_compression_toolkit/core/common/__init__.py +0 -1
  8. model_compression_toolkit/core/common/collectors/base_collector.py +1 -1
  9. model_compression_toolkit/core/common/collectors/mean_collector.py +1 -1
  10. model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +1 -1
  11. model_compression_toolkit/core/common/framework_implementation.py +1 -8
  12. model_compression_toolkit/core/common/framework_info.py +1 -1
  13. model_compression_toolkit/core/common/fusion/layer_fusing.py +4 -4
  14. model_compression_toolkit/core/common/graph/base_graph.py +2 -2
  15. model_compression_toolkit/core/common/graph/base_node.py +57 -1
  16. model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +1 -1
  17. model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +1 -1
  18. model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +2 -2
  19. model_compression_toolkit/core/common/memory_computation.py +1 -1
  20. model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +3 -5
  21. model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_data.py +3 -4
  22. model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_methods.py +3 -3
  23. model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +1 -1
  24. model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +3 -2
  25. model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +1 -1
  26. model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +1 -1
  27. model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +2 -2
  28. model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +2 -2
  29. model_compression_toolkit/core/common/model_collector.py +2 -2
  30. model_compression_toolkit/core/common/model_validation.py +1 -1
  31. model_compression_toolkit/core/common/network_editors/actions.py +4 -1
  32. model_compression_toolkit/core/common/network_editors/edit_network.py +0 -2
  33. model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +1 -1
  34. model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +3 -4
  35. model_compression_toolkit/core/common/quantization/node_quantization_config.py +3 -3
  36. model_compression_toolkit/core/common/quantization/quantization_config.py +2 -2
  37. model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +1 -1
  38. model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +2 -2
  39. model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +3 -2
  40. model_compression_toolkit/core/common/quantization/quantization_params_generation/kmeans_params.py +1 -1
  41. model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +2 -2
  42. model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +2 -2
  43. model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +3 -3
  44. model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +1 -1
  45. model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +1 -1
  46. model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +1 -1
  47. model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +2 -2
  48. model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +2 -2
  49. model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +4 -4
  50. model_compression_toolkit/core/common/quantization/quantize_node.py +2 -2
  51. model_compression_toolkit/core/common/quantization/quantizers/kmeans_quantizer.py +1 -1
  52. model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +1 -1
  53. model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +4 -2
  54. model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +2 -2
  55. model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +7 -7
  56. model_compression_toolkit/core/common/similarity_analyzer.py +2 -2
  57. model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +1 -1
  58. model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +2 -4
  59. model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +5 -5
  60. model_compression_toolkit/core/common/substitutions/apply_substitutions.py +2 -5
  61. model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +2 -2
  62. model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +3 -3
  63. model_compression_toolkit/core/common/substitutions/linear_collapsing.py +1 -1
  64. model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -3
  65. model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +5 -5
  66. model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +1 -1
  67. model_compression_toolkit/core/common/substitutions/weights_activation_split.py +1 -1
  68. model_compression_toolkit/core/common/visualization/tensorboard_writer.py +1 -1
  69. model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +1 -1
  70. model_compression_toolkit/core/keras/back2framework/float_model_builder.py +1 -1
  71. model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +1 -4
  72. model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +1 -1
  73. model_compression_toolkit/core/keras/back2framework/model_gradients.py +2 -2
  74. model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +1 -1
  75. model_compression_toolkit/core/keras/constants.py +0 -7
  76. model_compression_toolkit/core/keras/default_framework_info.py +3 -3
  77. model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +1 -1
  78. model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +1 -1
  79. model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +1 -1
  80. model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +3 -4
  81. model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +2 -1
  82. model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_relu_upper_bound.py +3 -2
  83. model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +1 -1
  84. model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +1 -1
  85. model_compression_toolkit/core/keras/keras_implementation.py +2 -10
  86. model_compression_toolkit/core/keras/keras_model_validation.py +1 -1
  87. model_compression_toolkit/core/keras/keras_node_prior_info.py +1 -1
  88. model_compression_toolkit/core/keras/kpi_data_facade.py +10 -10
  89. model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +2 -2
  90. model_compression_toolkit/core/keras/quantizer/input_layer_quantize_transform.py +1 -1
  91. model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +2 -2
  92. model_compression_toolkit/core/keras/quantizer/mixed_precision/selective_quantize_config.py +1 -1
  93. model_compression_toolkit/core/keras/reader/common.py +1 -1
  94. model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +1 -1
  95. model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +1 -1
  96. model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +1 -1
  97. model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +1 -1
  98. model_compression_toolkit/core/pytorch/back2framework/model_gradients.py +2 -2
  99. model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +1 -1
  100. model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +1 -1
  101. model_compression_toolkit/core/pytorch/constants.py +0 -6
  102. model_compression_toolkit/core/pytorch/default_framework_info.py +2 -2
  103. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +1 -1
  104. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +1 -1
  105. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +3 -2
  106. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +1 -1
  107. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +1 -1
  108. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +1 -1
  109. model_compression_toolkit/core/pytorch/kpi_data_facade.py +9 -9
  110. model_compression_toolkit/core/pytorch/mixed_precision/mixed_precision_wrapper.py +1 -1
  111. model_compression_toolkit/core/pytorch/pytorch_implementation.py +6 -12
  112. model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +1 -1
  113. model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +2 -2
  114. model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +1 -1
  115. model_compression_toolkit/core/pytorch/reader/graph_builders.py +4 -2
  116. model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +1 -1
  117. model_compression_toolkit/core/runner.py +7 -7
  118. model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +1 -1
  119. model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +1 -1
  120. model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +2 -2
  121. model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +15 -1
  122. model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +1 -1
  123. model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +2 -2
  124. model_compression_toolkit/exporter/model_exporter/tflite/fakely_quant_tflite_exporter.py +1 -1
  125. model_compression_toolkit/exporter/model_exporter/tflite/int8_tflite_exporter.py +1 -1
  126. model_compression_toolkit/exporter/model_exporter/tflite/tflite_export_facade.py +2 -2
  127. model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +3 -2
  128. model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +5 -3
  129. model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +2 -2
  130. model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +3 -2
  131. model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +4 -3
  132. model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +2 -2
  133. model_compression_toolkit/gptq/common/gptq_framework_implementation.py +32 -0
  134. model_compression_toolkit/gptq/common/gptq_graph.py +2 -2
  135. model_compression_toolkit/gptq/common/gptq_training.py +2 -1
  136. model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +29 -0
  137. model_compression_toolkit/gptq/keras/gptq_training.py +5 -4
  138. model_compression_toolkit/gptq/keras/quantization_facade.py +27 -20
  139. model_compression_toolkit/gptq/keras/quantizer/__init__.py +1 -0
  140. model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +2 -2
  141. model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +18 -1
  142. model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +3 -5
  143. model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +2 -2
  144. model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +224 -0
  145. model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +2 -2
  146. model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +29 -0
  147. model_compression_toolkit/gptq/pytorch/gptq_training.py +1 -1
  148. model_compression_toolkit/gptq/pytorch/quantization_facade.py +13 -13
  149. model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +3 -3
  150. model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +8 -3
  151. model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +1 -2
  152. model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +2 -2
  153. model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +9 -11
  154. model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +2 -2
  155. model_compression_toolkit/gptq/runner.py +3 -2
  156. model_compression_toolkit/{core/keras/quantization_facade.py → legacy/keras_quantization_facade.py} +11 -12
  157. model_compression_toolkit/{core/pytorch/quantization_facade.py → legacy/pytorch_quantization_facade.py} +11 -12
  158. model_compression_toolkit/ptq/__init__.py +3 -0
  159. model_compression_toolkit/ptq/keras/quantization_facade.py +11 -12
  160. model_compression_toolkit/ptq/pytorch/quantization_facade.py +8 -8
  161. model_compression_toolkit/qat/__init__.py +4 -0
  162. model_compression_toolkit/qat/common/__init__.py +1 -2
  163. model_compression_toolkit/qat/common/qat_config.py +3 -1
  164. model_compression_toolkit/qat/keras/quantization_facade.py +18 -20
  165. model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +2 -2
  166. model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +13 -11
  167. model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +9 -9
  168. model_compression_toolkit/qat/pytorch/quantization_facade.py +9 -9
  169. model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py +2 -2
  170. model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +4 -3
  171. model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +7 -5
  172. model_compression_toolkit/{qat/common → quantizers_infrastructure}/constants.py +2 -1
  173. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/base_inferable_quantizer.py +1 -1
  174. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_quantizers.py +2 -2
  175. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/load_model.py +2 -2
  176. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantize_wrapper.py +2 -2
  177. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py +3 -3
  178. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py +3 -3
  179. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py +2 -2
  180. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py +3 -3
  181. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/base_keras_inferable_quantizer.py +1 -1
  182. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py +2 -2
  183. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py +2 -2
  184. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py +2 -2
  185. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py +2 -2
  186. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py +2 -2
  187. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/validation_functions.py +1 -1
  188. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantize_wrapper.py +2 -2
  189. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py +2 -2
  190. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py +2 -2
  191. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py +2 -2
  192. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py +2 -2
  193. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_lut_symmetric_inferable_quantizer.py +2 -2
  194. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_pytorch_inferable_quantizer.py +1 -1
  195. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_symmetric_inferable_quantizer.py +2 -2
  196. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_uniform_inferable_quantizer.py +2 -2
  197. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py +2 -2
  198. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py +2 -2
  199. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py +2 -2
  200. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py +2 -2
  201. model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py +3 -3
  202. model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/base_trainable_quantizer.py +9 -9
  203. model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizer_config.py +2 -1
  204. model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizers.py +4 -6
  205. model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/trainable_quantizer_config.py +1 -1
  206. model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/base_keras_quantizer.py +2 -2
  207. model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/config_serialization.py +1 -1
  208. model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +2 -2
  209. model_compression_toolkit/target_platform_capabilities/constants.py +27 -0
  210. model_compression_toolkit/{core/common → target_platform_capabilities}/target_platform/__init__.py +5 -5
  211. model_compression_toolkit/{core/common → target_platform_capabilities}/target_platform/current_tp_model.py +1 -1
  212. model_compression_toolkit/{core/common → target_platform_capabilities}/target_platform/fusing.py +2 -2
  213. model_compression_toolkit/{core/common → target_platform_capabilities}/target_platform/operators.py +4 -4
  214. model_compression_toolkit/target_platform_capabilities/target_platform/quantization_format.py +20 -0
  215. model_compression_toolkit/{core/common → target_platform_capabilities}/target_platform/target_platform_model.py +16 -7
  216. model_compression_toolkit/{core/common → target_platform_capabilities}/target_platform/target_platform_model_component.py +1 -1
  217. model_compression_toolkit/{core/common → target_platform_capabilities}/target_platform/targetplatform2framework/__init__.py +5 -5
  218. model_compression_toolkit/{core/common → target_platform_capabilities}/target_platform/targetplatform2framework/attribute_filter.py +1 -1
  219. model_compression_toolkit/{core/common → target_platform_capabilities}/target_platform/targetplatform2framework/layer_filter_params.py +33 -35
  220. model_compression_toolkit/{core/common → target_platform_capabilities}/target_platform/targetplatform2framework/operations_to_layers.py +4 -4
  221. model_compression_toolkit/{core/common → target_platform_capabilities}/target_platform/targetplatform2framework/target_platform_capabilities.py +9 -30
  222. model_compression_toolkit/{core/common → target_platform_capabilities}/target_platform/targetplatform2framework/target_platform_capabilities_component.py +1 -1
  223. model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
  224. model_compression_toolkit/target_platform_capabilities/tpc_models/default_tpc/latest/__init__.py +25 -0
  225. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/target_platform_capabilities.py +19 -17
  226. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v1/tp_model.py +7 -1
  227. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v1/tpc_keras.py +2 -2
  228. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v1/tpc_pytorch.py +2 -2
  229. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v2/tp_model.py +7 -1
  230. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v2/tpc_keras.py +2 -2
  231. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v2/tpc_pytorch.py +2 -2
  232. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v3/tp_model.py +7 -1
  233. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v3/tpc_keras.py +2 -2
  234. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v3/tpc_pytorch.py +2 -2
  235. model_compression_toolkit/{core/tpc_models/default_tpc/v4_lut → target_platform_capabilities/tpc_models/default_tpc/v3_lut}/tp_model.py +7 -2
  236. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v3_lut/tpc_keras.py +2 -2
  237. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v3_lut/tpc_pytorch.py +2 -2
  238. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v4/tp_model.py +7 -1
  239. model_compression_toolkit/{core/tpc_models/default_tpc/v5 → target_platform_capabilities/tpc_models/default_tpc/v4}/tpc_keras.py +2 -3
  240. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v4/tpc_pytorch.py +2 -2
  241. model_compression_toolkit/{core/tpc_models/default_tpc/v3_lut → target_platform_capabilities/tpc_models/default_tpc/v4_lut}/tp_model.py +7 -2
  242. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v4_lut/tpc_keras.py +2 -2
  243. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v4_lut/tpc_pytorch.py +2 -2
  244. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v5/tp_model.py +7 -1
  245. model_compression_toolkit/{core/tpc_models/default_tpc/v4 → target_platform_capabilities/tpc_models/default_tpc/v5}/tpc_keras.py +2 -2
  246. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v5/tpc_pytorch.py +2 -2
  247. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/get_target_platform_capabilities.py +6 -8
  248. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +14 -0
  249. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/imx500_tpc/latest/__init__.py +6 -6
  250. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/imx500_tpc/target_platform_capabilities.py +6 -5
  251. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/imx500_tpc/v1/tp_model.py +7 -1
  252. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/imx500_tpc/v1/tpc_keras.py +2 -2
  253. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/imx500_tpc/v1/tpc_pytorch.py +2 -2
  254. model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +22 -0
  255. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/qnnpack_tpc/target_platform_capabilities.py +6 -5
  256. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/qnnpack_tpc/v1/tp_model.py +7 -1
  257. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/qnnpack_tpc/v1/tpc_keras.py +2 -2
  258. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +2 -2
  259. model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +22 -0
  260. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/tflite_tpc/target_platform_capabilities.py +6 -5
  261. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/tflite_tpc/v1/tp_model.py +26 -18
  262. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/tflite_tpc/v1/tpc_keras.py +3 -3
  263. model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/tflite_tpc/v1/tpc_pytorch.py +3 -3
  264. model_compression_toolkit/core/tpc_models/default_tpc/latest/__init__.py +0 -25
  265. model_compression_toolkit/core/tpc_models/qnnpack_tpc/latest/__init__.py +0 -22
  266. model_compression_toolkit/core/tpc_models/tflite_tpc/latest/__init__.py +0 -22
  267. {mct_nightly-1.8.0.8042023.post345.dist-info → mct_nightly-1.8.0.8052023.post414.dist-info}/LICENSE.md +0 -0
  268. {mct_nightly-1.8.0.8042023.post345.dist-info → mct_nightly-1.8.0.8052023.post414.dist-info}/WHEEL +0 -0
  269. {mct_nightly-1.8.0.8042023.post345.dist-info → mct_nightly-1.8.0.8052023.post414.dist-info}/top_level.txt +0 -0
  270. /model_compression_toolkit/{core/tpc_models/imx500_tpc → legacy}/__init__.py +0 -0
  271. /model_compression_toolkit/{core/common/logger.py → logger.py} +0 -0
  272. /model_compression_toolkit/{core/tpc_models → target_platform_capabilities}/__init__.py +0 -0
  273. /model_compression_toolkit/{core/common → target_platform_capabilities}/immutable.py +0 -0
  274. /model_compression_toolkit/{core/common → target_platform_capabilities}/target_platform/op_quantization_config.py +0 -0
  275. /model_compression_toolkit/{core/common → target_platform_capabilities}/target_platform/targetplatform2framework/current_tpc.py +0 -0
  276. /model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/__init__.py +0 -0
  277. /model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v1/__init__.py +0 -0
  278. /model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v2/__init__.py +0 -0
  279. /model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v3/__init__.py +0 -0
  280. /model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v3_lut/__init__.py +0 -0
  281. /model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v4/__init__.py +0 -0
  282. /model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v4_lut/__init__.py +0 -0
  283. /model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/default_tpc/v5/__init__.py +0 -0
  284. /model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/imx500_tpc/v1/__init__.py +0 -0
  285. /model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/qnnpack_tpc/__init__.py +0 -0
  286. /model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
  287. /model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/tflite_tpc/__init__.py +0 -0
  288. /model_compression_toolkit/{core → target_platform_capabilities}/tpc_models/tflite_tpc/v1/__init__.py +0 -0
@@ -18,13 +18,15 @@ from typing import Union
18
18
  import numpy as np
19
19
  import tensorflow as tf
20
20
  from tensorflow.python.framework.tensor_shape import TensorShape
21
- from model_compression_toolkit.core.common.constants import SIGNED
21
+ from model_compression_toolkit.constants import SIGNED
22
+ from model_compression_toolkit.quantizers_infrastructure.constants import FQ_MIN, FQ_MAX
22
23
 
23
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
24
+ from model_compression_toolkit.qat import TrainingMethod
25
+
26
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
24
27
  from model_compression_toolkit.qat.common import THRESHOLD_TENSOR
25
- from model_compression_toolkit.qat.common.constants import FQ_MIN, FQ_MAX
26
- from model_compression_toolkit import quantizers_infrastructure as qi, TrainingMethod
27
- from model_compression_toolkit.core.common import constants as C
28
+ from model_compression_toolkit import quantizers_infrastructure as qi, constants as C
29
+
28
30
  from model_compression_toolkit.qat.keras.quantizer.base_keras_qat_quantizer import BaseKerasQATTrainableQuantizer
29
31
  from model_compression_toolkit.quantizers_infrastructure import TrainableQuantizerWeightsConfig, \
30
32
  TrainableQuantizerActivationConfig
@@ -53,11 +55,11 @@ class STEWeightQATQuantizer(BaseKerasQATTrainableQuantizer):
53
55
  """
54
56
  super().__init__(quantization_config)
55
57
  self.power_of_two = quantization_config.weights_quantization_method == QuantizationMethod.POWER_OF_TWO
56
- self.threshold_values = quantization_config.weights_quantization_params[C.THRESHOLD]
58
+ self.threshold_values = np.array(quantization_config.weights_quantization_params[C.THRESHOLD])
57
59
  self.threshold_shape = np.asarray(self.threshold_values).shape
58
60
  self.per_channel = self.quantization_config.weights_per_channel_threshold
59
61
  self.channel_axis = self.quantization_config.weights_channels_axis
60
- self.np_threshold_values = np.reshape(np.asarray(self.threshold_values),[-1]) if self.channel_axis else float(self.threshold_values)
62
+ self.np_threshold_values = np.reshape(np.asarray(self.threshold_values),[-1]) if self.per_channel else float(self.threshold_values)
61
63
 
62
64
  if self.per_channel and self.channel_axis not in [-1, len(self.threshold_shape) - 1]:
63
65
  # Tensorflow's fake_quant_with_min_max_vars_per_channel only works on last axis, so
@@ -93,21 +95,21 @@ class STEWeightQATQuantizer(BaseKerasQATTrainableQuantizer):
93
95
  """
94
96
  ptq_threshold_tensor = layer.add_weight(
95
97
  name + THRESHOLD_TENSOR,
96
- shape=len(self.np_threshold_values) if self.channel_axis else (),
98
+ shape=len(self.np_threshold_values) if self.per_channel else (),
97
99
  initializer=tf.keras.initializers.Constant(1.0),
98
100
  trainable=False)
99
101
  ptq_threshold_tensor.assign(self.np_threshold_values)
100
102
 
101
103
  fq_min = layer.add_weight(
102
104
  name + FQ_MIN,
103
- shape=len(self.min) if self.channel_axis else (),
105
+ shape=len(self.min) if self.per_channel else (),
104
106
  initializer=tf.keras.initializers.Constant(-1.0),
105
107
  trainable=False)
106
108
  fq_min.assign(self.min)
107
109
 
108
110
  fq_max = layer.add_weight(
109
111
  name + FQ_MAX,
110
- shape=len(self.max) if self.channel_axis else (),
112
+ shape=len(self.max) if self.per_channel else (),
111
113
  initializer=tf.keras.initializers.Constant(1.0),
112
114
  trainable=False)
113
115
  fq_max.assign(self.max)
@@ -134,7 +136,7 @@ class STEWeightQATQuantizer(BaseKerasQATTrainableQuantizer):
134
136
 
135
137
  _min = self.get_quantizer_variable(FQ_MIN)
136
138
  _max = self.get_quantizer_variable(FQ_MAX)
137
- if self.channel_axis:
139
+ if self.per_channel:
138
140
  if self.perm_vec:
139
141
  inputs = tf.transpose(inputs, perm=self.perm_vec)
140
142
  q_tensor = tf.quantization.fake_quant_with_min_max_vars_per_channel(inputs, _min, _max,
@@ -15,13 +15,15 @@
15
15
  import numpy as np
16
16
  import tensorflow as tf
17
17
  from tensorflow.python.framework.tensor_shape import TensorShape
18
- from model_compression_toolkit.core.common.constants import RANGE_MIN, RANGE_MAX
19
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
20
- from model_compression_toolkit.qat.common.constants import FQ_MIN, FQ_MAX
18
+ from model_compression_toolkit.constants import RANGE_MIN, RANGE_MAX
19
+ from model_compression_toolkit.quantizers_infrastructure.constants import FQ_MIN, FQ_MAX
20
+ from model_compression_toolkit.qat import TrainingMethod
21
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
22
+
21
23
  from model_compression_toolkit.qat.keras.quantizer.quant_utils import adjust_range_to_include_zero
22
24
  from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import fix_range_to_include_zero
23
- from model_compression_toolkit import quantizers_infrastructure as qi, TrainingMethod
24
- from model_compression_toolkit.core.common import constants as C
25
+ from model_compression_toolkit import quantizers_infrastructure as qi, constants as C
26
+
25
27
  from model_compression_toolkit.qat.keras.quantizer.base_keras_qat_quantizer import BaseKerasQATTrainableQuantizer
26
28
  from model_compression_toolkit.quantizers_infrastructure import TrainableQuantizerWeightsConfig, \
27
29
  TrainableQuantizerActivationConfig
@@ -50,8 +52,8 @@ class STEUniformWeightQATQuantizer(BaseKerasQATTrainableQuantizer):
50
52
 
51
53
  """
52
54
  super().__init__(quantization_config)
53
- self.max_values = quantization_config.weights_quantization_params[RANGE_MAX]
54
- self.min_values = quantization_config.weights_quantization_params[RANGE_MIN]
55
+ self.max_values = np.array(quantization_config.weights_quantization_params[RANGE_MAX])
56
+ self.min_values = np.array(quantization_config.weights_quantization_params[RANGE_MIN])
55
57
  self.num_bits = self.quantization_config.weights_n_bits
56
58
  self.per_channel = self.quantization_config.weights_per_channel_threshold
57
59
  self.channel_axis = self.quantization_config.weights_channels_axis
@@ -98,7 +100,6 @@ class STEUniformWeightQATQuantizer(BaseKerasQATTrainableQuantizer):
98
100
  self.add_quantizer_variable(FQ_MIN, fq_min, VariableGroup.QPARAMS)
99
101
  self.add_quantizer_variable(FQ_MAX, fq_max, VariableGroup.QPARAMS)
100
102
 
101
-
102
103
  def __call__(self, inputs: tf.Tensor,
103
104
  training: bool):
104
105
  """
@@ -199,7 +200,6 @@ class STEUniformActivationQATQuantizer(BaseKerasQATTrainableQuantizer):
199
200
  self.add_quantizer_variable(FQ_MIN, fq_min, VariableGroup.QPARAMS)
200
201
  self.add_quantizer_variable(FQ_MAX, fq_max, VariableGroup.QPARAMS)
201
202
 
202
-
203
203
  def __call__(self,
204
204
  inputs: tf.Tensor,
205
205
  training: bool):
@@ -16,16 +16,16 @@ import copy
16
16
  from typing import Callable
17
17
  from functools import partial
18
18
 
19
- from model_compression_toolkit.core.common.constants import FOUND_TORCH, PYTORCH
19
+ from model_compression_toolkit.constants import FOUND_TORCH, PYTORCH
20
20
 
21
- from model_compression_toolkit import CoreConfig
21
+ from model_compression_toolkit.core import CoreConfig
22
22
  from model_compression_toolkit.core import common
23
- from model_compression_toolkit.core.common import Logger
23
+ from model_compression_toolkit.logger import Logger
24
24
  from model_compression_toolkit.core.common.framework_info import FrameworkInfo
25
25
  from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
26
26
  from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
27
27
  MixedPrecisionQuantizationConfigV2
28
- from model_compression_toolkit.core.common.target_platform.targetplatform2framework import TargetPlatformCapabilities
28
+ from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import TargetPlatformCapabilities
29
29
  from model_compression_toolkit.core.runner import core_runner, _init_tensorboard_writer
30
30
  from model_compression_toolkit.ptq.runner import ptq_runner
31
31
 
@@ -34,7 +34,7 @@ if FOUND_TORCH:
34
34
  import torch.nn as nn
35
35
  from torch.nn import Module
36
36
  from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
37
- from model_compression_toolkit.core.pytorch.constants import DEFAULT_TP_MODEL
37
+ from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
38
38
  from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
39
39
  from model_compression_toolkit.qat.common.qat_config import _is_qat_applicable
40
40
  from model_compression_toolkit.core.pytorch.back2framework.pytorch_model_builder import PyTorchModelBuilder
@@ -121,7 +121,7 @@ if FOUND_TORCH:
121
121
 
122
122
  Create a MCT core config, containing the quantization configuration:
123
123
 
124
- >>> config = mct.CoreConfig()
124
+ >>> config = mct.core.CoreConfig()
125
125
 
126
126
  Pass the model, the representative dataset generator, the configuration and the target KPI to get a
127
127
  quantized model. Now the model contains quantizer wrappers for fine tunning the weights:
@@ -134,11 +134,11 @@ if FOUND_TORCH:
134
134
 
135
135
  if core_config.mixed_precision_enable:
136
136
  if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfigV2):
137
- common.Logger.error("Given quantization config to mixed-precision facade is not of type "
137
+ Logger.error("Given quantization config to mixed-precision facade is not of type "
138
138
  "MixedPrecisionQuantizationConfigV2. Please use pytorch_post_training_quantization API,"
139
139
  "or pass a valid mixed precision configuration.")
140
140
 
141
- common.Logger.info("Using experimental mixed-precision quantization. "
141
+ Logger.info("Using experimental mixed-precision quantization. "
142
142
  "If you encounter an issue please file a bug.")
143
143
 
144
144
  tb_w = _init_tensorboard_writer(fw_info)
@@ -193,7 +193,7 @@ if FOUND_TORCH:
193
193
 
194
194
  Create a MCT core config, containing the quantization configuration:
195
195
 
196
- >>> config = mct.CoreConfig()
196
+ >>> config = mct.core.CoreConfig()
197
197
 
198
198
  Pass the model, the representative dataset generator, the configuration and the target KPI to get a
199
199
  quantized model:
@@ -14,8 +14,8 @@
14
14
  # ==============================================================================
15
15
  from typing import Union
16
16
 
17
- from model_compression_toolkit.core.common.logger import Logger
18
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
17
+ from model_compression_toolkit.logger import Logger
18
+ from model_compression_toolkit.constants import FOUND_TORCH
19
19
 
20
20
  from model_compression_toolkit.quantizers_infrastructure import TrainableQuantizerWeightsConfig, \
21
21
  TrainableQuantizerActivationConfig
@@ -18,12 +18,13 @@ import numpy as np
18
18
  import torch
19
19
  import torch.nn as nn
20
20
 
21
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
21
+ from model_compression_toolkit.qat import TrainingMethod
22
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
22
23
  from model_compression_toolkit.qat.common import THRESHOLD_TENSOR
23
- from model_compression_toolkit import quantizers_infrastructure as qi, TrainingMethod
24
+ from model_compression_toolkit import quantizers_infrastructure as qi, constants as C
24
25
  from model_compression_toolkit.qat.pytorch.quantizer.base_pytorch_qat_quantizer import BasePytorchQATTrainableQuantizer
25
26
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer
26
- from model_compression_toolkit.core.common import constants as C
27
+
27
28
  from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
28
29
  from model_compression_toolkit.qat.pytorch.quantizer.quantizer_utils import ste_round, ste_clip, symmetric_quantizer
29
30
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.pytorch.quantizers import \
@@ -17,11 +17,13 @@ import torch
17
17
  import torch.nn as nn
18
18
  from torch import Tensor
19
19
 
20
- from model_compression_toolkit.core.common.constants import RANGE_MAX, RANGE_MIN
21
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
22
- from model_compression_toolkit.qat.common.constants import FQ_MIN, FQ_MAX
23
- from model_compression_toolkit.core.common import constants as C
24
- from model_compression_toolkit import quantizers_infrastructure as qi, TrainingMethod
20
+ from model_compression_toolkit.constants import RANGE_MAX, RANGE_MIN
21
+ from model_compression_toolkit.quantizers_infrastructure.constants import FQ_MIN, FQ_MAX
22
+
23
+ from model_compression_toolkit.qat import TrainingMethod
24
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
25
+ from model_compression_toolkit import quantizers_infrastructure as qi, constants as C
26
+
25
27
  from model_compression_toolkit.qat.pytorch.quantizer.base_pytorch_qat_quantizer import BasePytorchQATTrainableQuantizer
26
28
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer
27
29
  from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
@@ -1,4 +1,4 @@
1
- # Copyright 2022 Sony Semiconductor Israel, Inc. All rights reserved.
1
+ # Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -13,6 +13,7 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
 
16
+ # Quantizers constants (for GPTQ, QAT, etc.)
16
17
  FQ_MIN = "min"
17
18
  FQ_MAX = "max"
18
19
  THRESHOLD_TENSOR = "ptq_threshold_tensor"
@@ -15,7 +15,7 @@
15
15
  from enum import Enum
16
16
  from typing import Any, Dict, List
17
17
 
18
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
18
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
19
19
 
20
20
 
21
21
  class QuantizationTarget(Enum):
@@ -13,8 +13,8 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
 
16
- from model_compression_toolkit.core.common import Logger
17
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
16
+ from model_compression_toolkit.logger import Logger
17
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
18
18
  from model_compression_toolkit.quantizers_infrastructure import QuantizationTarget
19
19
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.constants import QUANTIZATION_TARGET, \
20
20
  QUANTIZATION_METHOD
@@ -12,8 +12,8 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
- from model_compression_toolkit.core.common import Logger
16
- from model_compression_toolkit.core.common.constants import FOUND_TF
15
+ from model_compression_toolkit.logger import Logger
16
+ from model_compression_toolkit.constants import FOUND_TF
17
17
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.get_all_subclasses import get_all_subclasses
18
18
 
19
19
  if FOUND_TF:
@@ -14,8 +14,8 @@
14
14
  # ==============================================================================
15
15
  from typing import Dict, List, Any, Tuple
16
16
  from model_compression_toolkit import quantizers_infrastructure as qi
17
- from model_compression_toolkit.core.common.constants import FOUND_TF
18
- from model_compression_toolkit.core.common.logger import Logger
17
+ from model_compression_toolkit.constants import FOUND_TF
18
+ from model_compression_toolkit.logger import Logger
19
19
  from model_compression_toolkit.quantizers_infrastructure import BaseInferableQuantizer
20
20
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.constants import WEIGHTS_QUANTIZERS, ACTIVATION_QUANTIZERS, LAYER, STEPS, TRAINING
21
21
 
@@ -17,10 +17,10 @@ from typing import List
17
17
 
18
18
  import numpy as np
19
19
 
20
- from model_compression_toolkit.core.common.logger import Logger
21
- from model_compression_toolkit.core.common.constants import FOUND_TF
20
+ from model_compression_toolkit.logger import Logger
21
+ from model_compression_toolkit.constants import FOUND_TF
22
22
 
23
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
23
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
24
24
  from model_compression_toolkit.quantizers_infrastructure import QuantizationTarget
25
25
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer
26
26
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.constants import MULTIPLIER_N_BITS, EPS
@@ -16,10 +16,10 @@ from typing import List
16
16
 
17
17
  import numpy as np
18
18
 
19
- from model_compression_toolkit.core.common.logger import Logger
20
- from model_compression_toolkit.core.common.constants import FOUND_TF
19
+ from model_compression_toolkit.logger import Logger
20
+ from model_compression_toolkit.constants import FOUND_TF
21
21
 
22
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
22
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
23
23
  from model_compression_toolkit.quantizers_infrastructure import QuantizationTarget
24
24
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer
25
25
 
@@ -16,9 +16,9 @@ from typing import List
16
16
 
17
17
  import numpy as np
18
18
 
19
- from model_compression_toolkit.core.common.constants import FOUND_TF
19
+ from model_compression_toolkit.constants import FOUND_TF
20
20
 
21
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
21
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
22
22
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer, \
23
23
  QuantizationTarget
24
24
 
@@ -16,9 +16,9 @@ from typing import List
16
16
 
17
17
  import numpy as np
18
18
 
19
- from model_compression_toolkit.core.common.logger import Logger
20
- from model_compression_toolkit.core.common.constants import FOUND_TF
21
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
19
+ from model_compression_toolkit.logger import Logger
20
+ from model_compression_toolkit.constants import FOUND_TF
21
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
22
22
  from model_compression_toolkit.quantizers_infrastructure import QuantizationTarget
23
23
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer
24
24
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.quant_utils import \
@@ -14,7 +14,7 @@
14
14
  # ==============================================================================
15
15
  from abc import abstractmethod
16
16
 
17
- from model_compression_toolkit.core.common.constants import FOUND_TF
17
+ from model_compression_toolkit.constants import FOUND_TF
18
18
  from model_compression_toolkit.quantizers_infrastructure import BaseInferableQuantizer
19
19
 
20
20
  if FOUND_TF:
@@ -16,8 +16,8 @@ from typing import List
16
16
 
17
17
  import numpy as np
18
18
 
19
- from model_compression_toolkit.core.common.constants import FOUND_TF
20
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
19
+ from model_compression_toolkit.constants import FOUND_TF
20
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
21
21
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer, \
22
22
  QuantizationTarget
23
23
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.constants import MULTIPLIER_N_BITS, EPS
@@ -17,8 +17,8 @@ from typing import List
17
17
 
18
18
  import numpy as np
19
19
 
20
- from model_compression_toolkit.core.common.constants import FOUND_TF
21
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
20
+ from model_compression_toolkit.constants import FOUND_TF
21
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
22
22
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer, \
23
23
  QuantizationTarget
24
24
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.constants import MULTIPLIER_N_BITS, EPS
@@ -16,8 +16,8 @@ from typing import List
16
16
 
17
17
  import numpy as np
18
18
 
19
- from model_compression_toolkit.core.common.constants import FOUND_TF
20
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
19
+ from model_compression_toolkit.constants import FOUND_TF
20
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
21
21
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer, QuantizationTarget
22
22
 
23
23
  if FOUND_TF:
@@ -16,9 +16,9 @@ from typing import List
16
16
 
17
17
  import numpy as np
18
18
 
19
- from model_compression_toolkit.core.common.constants import FOUND_TF
19
+ from model_compression_toolkit.constants import FOUND_TF
20
20
 
21
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
21
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
22
22
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer, QuantizationTarget
23
23
 
24
24
  if FOUND_TF:
@@ -16,8 +16,8 @@ from typing import List
16
16
 
17
17
  import numpy as np
18
18
 
19
- from model_compression_toolkit.core.common.constants import FOUND_TF
20
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
19
+ from model_compression_toolkit.constants import FOUND_TF
20
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
21
21
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer, QuantizationTarget
22
22
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.quant_utils import \
23
23
  adjust_range_to_include_zero
@@ -16,7 +16,7 @@ from typing import Any
16
16
 
17
17
  import numpy as np
18
18
 
19
- from model_compression_toolkit.core.common import Logger
19
+ from model_compression_toolkit.logger import Logger
20
20
 
21
21
 
22
22
  def validate_uniform_min_max_ranges(min_range: Any, max_range: Any) -> None:
@@ -13,8 +13,8 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================f
15
15
  from typing import List, Union, Any, Dict, Tuple
16
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
17
- from model_compression_toolkit.core.common.logger import Logger
16
+ from model_compression_toolkit.constants import FOUND_TORCH
17
+ from model_compression_toolkit.logger import Logger
18
18
  from model_compression_toolkit.quantizers_infrastructure import BaseInferableQuantizer
19
19
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.constants import LAYER, TRAINING
20
20
  import inspect
@@ -14,8 +14,8 @@
14
14
  # ==============================================================================
15
15
  import numpy as np
16
16
 
17
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
18
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
17
+ from model_compression_toolkit.constants import FOUND_TORCH
18
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
19
19
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer \
20
20
  import mark_quantizer, QuantizationTarget
21
21
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.constants \
@@ -15,8 +15,8 @@
15
15
 
16
16
  import numpy as np
17
17
 
18
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
19
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
18
+ from model_compression_toolkit.constants import FOUND_TORCH
19
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
20
20
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer, \
21
21
  QuantizationTarget
22
22
 
@@ -14,8 +14,8 @@
14
14
  # ==============================================================================
15
15
  import numpy as np
16
16
 
17
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
18
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
17
+ from model_compression_toolkit.constants import FOUND_TORCH
18
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
19
19
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer, \
20
20
  QuantizationTarget
21
21
 
@@ -14,8 +14,8 @@
14
14
  # ==============================================================================
15
15
  import numpy as np
16
16
 
17
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
18
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
17
+ from model_compression_toolkit.constants import FOUND_TORCH
18
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
19
19
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer, \
20
20
  QuantizationTarget
21
21
 
@@ -15,8 +15,8 @@
15
15
  import numpy as np
16
16
  import warnings
17
17
 
18
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
19
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
18
+ from model_compression_toolkit.constants import FOUND_TORCH
19
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
20
20
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer \
21
21
  import mark_quantizer
22
22
 
@@ -14,7 +14,7 @@
14
14
  # ==============================================================================
15
15
  from abc import abstractmethod
16
16
 
17
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
17
+ from model_compression_toolkit.constants import FOUND_TORCH
18
18
  from model_compression_toolkit.quantizers_infrastructure import BaseInferableQuantizer
19
19
 
20
20
  if FOUND_TORCH:
@@ -14,8 +14,8 @@
14
14
  # ==============================================================================
15
15
  import numpy as np
16
16
 
17
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
18
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
17
+ from model_compression_toolkit.constants import FOUND_TORCH
18
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
19
19
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer
20
20
 
21
21
  if FOUND_TORCH:
@@ -14,8 +14,8 @@
14
14
  # ==============================================================================
15
15
  import numpy as np
16
16
 
17
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
18
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
17
+ from model_compression_toolkit.constants import FOUND_TORCH
18
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
19
19
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer
20
20
 
21
21
  if FOUND_TORCH:
@@ -15,8 +15,8 @@
15
15
 
16
16
  import numpy as np
17
17
 
18
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
19
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
18
+ from model_compression_toolkit.constants import FOUND_TORCH
19
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
20
20
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer \
21
21
  import mark_quantizer, QuantizationTarget
22
22
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.constants \
@@ -15,8 +15,8 @@
15
15
 
16
16
  import numpy as np
17
17
 
18
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
19
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
18
+ from model_compression_toolkit.constants import FOUND_TORCH
19
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
20
20
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer \
21
21
  import mark_quantizer, \
22
22
  QuantizationTarget
@@ -15,8 +15,8 @@
15
15
 
16
16
  import numpy as np
17
17
 
18
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
19
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
18
+ from model_compression_toolkit.constants import FOUND_TORCH
19
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
20
20
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer, \
21
21
  QuantizationTarget
22
22
 
@@ -15,8 +15,8 @@
15
15
 
16
16
  import numpy as np
17
17
 
18
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
19
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
18
+ from model_compression_toolkit.constants import FOUND_TORCH
19
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
20
20
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer, \
21
21
  QuantizationTarget
22
22
 
@@ -15,9 +15,9 @@
15
15
 
16
16
  import numpy as np
17
17
 
18
- from model_compression_toolkit.core.common.constants import FOUND_TORCH
19
- from model_compression_toolkit.core.common.logger import Logger
20
- from model_compression_toolkit.core.common.target_platform import QuantizationMethod
18
+ from model_compression_toolkit.constants import FOUND_TORCH
19
+ from model_compression_toolkit.logger import Logger
20
+ from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
21
21
  from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import QuantizationTarget, \
22
22
  mark_quantizer
23
23