mct-nightly 2.1.0.20240731.414__py3-none-any.whl → 2.1.0.20240802.429__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. {mct_nightly-2.1.0.20240731.414.dist-info → mct_nightly-2.1.0.20240802.429.dist-info}/METADATA +1 -1
  2. {mct_nightly-2.1.0.20240731.414.dist-info → mct_nightly-2.1.0.20240802.429.dist-info}/RECORD +51 -47
  3. model_compression_toolkit/__init__.py +1 -1
  4. model_compression_toolkit/constants.py +2 -1
  5. model_compression_toolkit/core/common/framework_implementation.py +5 -9
  6. model_compression_toolkit/core/common/graph/base_graph.py +1 -23
  7. model_compression_toolkit/core/common/graph/base_node.py +52 -33
  8. model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +6 -6
  9. model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +12 -12
  10. model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +2 -2
  11. model_compression_toolkit/core/common/quantization/node_quantization_config.py +17 -38
  12. model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +6 -4
  13. model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +19 -12
  14. model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +14 -14
  15. model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +14 -9
  16. model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +5 -27
  17. model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +25 -17
  18. model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +10 -6
  19. model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +1 -65
  20. model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +12 -5
  21. model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +7 -5
  22. model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +3 -3
  23. model_compression_toolkit/core/keras/keras_implementation.py +21 -17
  24. model_compression_toolkit/core/keras/tf_tensor_numpy.py +2 -2
  25. model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +3 -3
  26. model_compression_toolkit/core/pytorch/pytorch_implementation.py +15 -14
  27. model_compression_toolkit/core/pytorch/reader/node_holders.py +1 -1
  28. model_compression_toolkit/core/runner.py +1 -0
  29. model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +2 -2
  30. model_compression_toolkit/gptq/common/gptq_training.py +0 -35
  31. model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +1 -1
  32. model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py +1 -1
  33. model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +32 -8
  34. model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +2 -2
  35. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +5 -0
  36. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +2 -0
  37. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +2 -0
  38. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +2 -0
  39. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +2 -0
  40. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +2 -0
  41. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +2 -0
  42. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +2 -0
  43. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/__init__.py +16 -0
  44. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py +235 -0
  45. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_keras.py +132 -0
  46. model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_pytorch.py +112 -0
  47. model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +2 -0
  48. model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +2 -0
  49. {mct_nightly-2.1.0.20240731.414.dist-info → mct_nightly-2.1.0.20240802.429.dist-info}/LICENSE.md +0 -0
  50. {mct_nightly-2.1.0.20240731.414.dist-info → mct_nightly-2.1.0.20240802.429.dist-info}/WHEEL +0 -0
  51. {mct_nightly-2.1.0.20240731.414.dist-info → mct_nightly-2.1.0.20240802.429.dist-info}/top_level.txt +0 -0
@@ -88,6 +88,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
88
88
  attr_weights_configs_mapping={},
89
89
  activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
90
90
  activation_n_bits=8,
91
+ supported_input_activation_n_bits=8,
91
92
  enable_activation_quantization=True,
92
93
  quantization_preserving=False,
93
94
  fixed_scale=None,
@@ -100,6 +101,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
100
101
  default_weight_attr_config=default_weight_attr_config,
101
102
  attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config},
102
103
  activation_n_bits=8,
104
+ supported_input_activation_n_bits=8,
103
105
  enable_activation_quantization=True,
104
106
  quantization_preserving=False,
105
107
  fixed_scale=None,