mct-nightly 1.8.0.22042023.post414__py3-none-any.whl → 1.8.0.22052023.post408__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct_nightly-1.8.0.22042023.post414.dist-info → mct_nightly-1.8.0.22052023.post408.dist-info}/METADATA +1 -1
- {mct_nightly-1.8.0.22042023.post414.dist-info → mct_nightly-1.8.0.22052023.post408.dist-info}/RECORD +237 -230
- model_compression_toolkit/__init__.py +8 -31
- model_compression_toolkit/{core/common/constants.py → constants.py} +2 -6
- model_compression_toolkit/core/__init__.py +14 -0
- model_compression_toolkit/core/analyzer.py +3 -2
- model_compression_toolkit/core/common/__init__.py +0 -1
- model_compression_toolkit/core/common/collectors/base_collector.py +1 -1
- model_compression_toolkit/core/common/collectors/mean_collector.py +1 -1
- model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +1 -1
- model_compression_toolkit/core/common/framework_implementation.py +1 -8
- model_compression_toolkit/core/common/fusion/layer_fusing.py +2 -2
- model_compression_toolkit/core/common/graph/base_graph.py +1 -1
- model_compression_toolkit/core/common/graph/base_node.py +57 -1
- model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +1 -1
- model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +1 -1
- model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +2 -2
- model_compression_toolkit/core/common/memory_computation.py +1 -1
- model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +1 -1
- model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_data.py +2 -3
- model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_methods.py +3 -3
- model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +1 -1
- model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +3 -2
- model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +1 -1
- model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +1 -1
- model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +2 -2
- model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +2 -2
- model_compression_toolkit/core/common/model_collector.py +2 -2
- model_compression_toolkit/core/common/model_validation.py +1 -1
- model_compression_toolkit/core/common/network_editors/actions.py +4 -1
- model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +1 -1
- model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +1 -1
- model_compression_toolkit/core/common/quantization/node_quantization_config.py +1 -1
- model_compression_toolkit/core/common/quantization/quantization_config.py +2 -2
- model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +1 -1
- model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +1 -1
- model_compression_toolkit/core/common/quantization/quantization_params_generation/kmeans_params.py +1 -1
- model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +2 -2
- model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +1 -1
- model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +2 -2
- model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +1 -1
- model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +1 -1
- model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +1 -1
- model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +1 -1
- model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +1 -1
- model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +2 -1
- model_compression_toolkit/core/common/quantization/quantize_node.py +2 -2
- model_compression_toolkit/core/common/quantization/quantizers/kmeans_quantizer.py +1 -1
- model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +1 -1
- model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +4 -2
- model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +2 -2
- model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +3 -2
- model_compression_toolkit/core/common/similarity_analyzer.py +2 -2
- model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +4 -3
- model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +3 -2
- model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +1 -1
- model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +2 -2
- model_compression_toolkit/core/common/substitutions/linear_collapsing.py +1 -1
- model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +4 -4
- model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +1 -1
- model_compression_toolkit/core/common/substitutions/weights_activation_split.py +1 -1
- model_compression_toolkit/core/common/visualization/tensorboard_writer.py +1 -1
- model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +1 -1
- model_compression_toolkit/core/keras/back2framework/float_model_builder.py +1 -1
- model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +66 -21
- model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +1 -1
- model_compression_toolkit/core/keras/back2framework/model_gradients.py +2 -2
- model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +1 -1
- model_compression_toolkit/core/keras/constants.py +0 -7
- model_compression_toolkit/core/keras/default_framework_info.py +2 -2
- model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +1 -1
- model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +1 -1
- model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +1 -1
- model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +3 -4
- model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +2 -1
- model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_relu_upper_bound.py +3 -2
- model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +1 -1
- model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +1 -1
- model_compression_toolkit/core/keras/keras_implementation.py +2 -10
- model_compression_toolkit/core/keras/keras_model_validation.py +1 -1
- model_compression_toolkit/core/keras/keras_node_prior_info.py +1 -1
- model_compression_toolkit/core/keras/kpi_data_facade.py +7 -7
- model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +2 -2
- model_compression_toolkit/core/keras/quantizer/input_layer_quantize_transform.py +1 -1
- model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +2 -2
- model_compression_toolkit/core/keras/quantizer/mixed_precision/selective_quantize_config.py +1 -1
- model_compression_toolkit/core/keras/reader/common.py +1 -1
- model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +1 -1
- model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +1 -1
- model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +1 -1
- model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +1 -1
- model_compression_toolkit/core/pytorch/back2framework/model_gradients.py +2 -2
- model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +1 -1
- model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +1 -1
- model_compression_toolkit/core/pytorch/constants.py +0 -6
- model_compression_toolkit/core/pytorch/default_framework_info.py +1 -1
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +1 -1
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +1 -1
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +3 -2
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +1 -1
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +1 -1
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +1 -1
- model_compression_toolkit/core/pytorch/kpi_data_facade.py +6 -6
- model_compression_toolkit/core/pytorch/mixed_precision/mixed_precision_wrapper.py +1 -1
- model_compression_toolkit/core/pytorch/pytorch_implementation.py +1 -9
- model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +1 -1
- model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +2 -2
- model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +1 -1
- model_compression_toolkit/core/pytorch/reader/graph_builders.py +3 -2
- model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +1 -1
- model_compression_toolkit/core/runner.py +6 -6
- model_compression_toolkit/exporter/__init__.py +6 -3
- model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +1 -1
- model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +20 -0
- model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +1 -1
- model_compression_toolkit/exporter/model_exporter/{tflite → keras}/fakely_quant_tflite_exporter.py +1 -1
- model_compression_toolkit/exporter/model_exporter/{tflite → keras}/int8_tflite_exporter.py +1 -1
- model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +60 -22
- model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +20 -0
- model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +15 -1
- model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +1 -1
- model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +54 -31
- model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +5 -3
- model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +4 -2
- model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +2 -2
- model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +3 -2
- model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +3 -2
- model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +2 -2
- model_compression_toolkit/gptq/common/gptq_framework_implementation.py +32 -0
- model_compression_toolkit/gptq/common/gptq_graph.py +2 -2
- model_compression_toolkit/gptq/common/gptq_training.py +5 -4
- model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +29 -0
- model_compression_toolkit/gptq/keras/gptq_training.py +41 -14
- model_compression_toolkit/gptq/keras/graph_info.py +4 -0
- model_compression_toolkit/gptq/keras/quantization_facade.py +26 -19
- model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +2 -2
- model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +1 -1
- model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +1 -1
- model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +2 -2
- model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +1 -1
- model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +29 -0
- model_compression_toolkit/gptq/pytorch/gptq_training.py +1 -1
- model_compression_toolkit/gptq/pytorch/quantization_facade.py +11 -11
- model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +3 -3
- model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +1 -3
- model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +1 -1
- model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +2 -2
- model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +1 -1
- model_compression_toolkit/gptq/runner.py +3 -2
- model_compression_toolkit/{exporter/model_exporter/tflite → legacy}/__init__.py +1 -1
- model_compression_toolkit/{core/keras/quantization_facade.py → legacy/keras_quantization_facade.py} +8 -9
- model_compression_toolkit/{core/pytorch/quantization_facade.py → legacy/pytorch_quantization_facade.py} +8 -9
- model_compression_toolkit/ptq/__init__.py +3 -0
- model_compression_toolkit/ptq/keras/quantization_facade.py +10 -11
- model_compression_toolkit/ptq/pytorch/quantization_facade.py +7 -7
- model_compression_toolkit/qat/__init__.py +4 -0
- model_compression_toolkit/qat/common/__init__.py +1 -2
- model_compression_toolkit/qat/common/qat_config.py +5 -1
- model_compression_toolkit/qat/keras/quantization_facade.py +33 -27
- model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +2 -2
- model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +31 -4
- model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +12 -10
- model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +8 -8
- model_compression_toolkit/qat/pytorch/quantization_facade.py +8 -8
- model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py +2 -2
- model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +3 -2
- model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +6 -4
- model_compression_toolkit/quantizers_infrastructure/__init__.py +2 -2
- model_compression_toolkit/{qat/common → quantizers_infrastructure}/constants.py +2 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/constants.py +5 -0
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_quantizers.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/activation_quantization_holder.py +147 -0
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/load_model.py +5 -5
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantize_wrapper.py +2 -2
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py +2 -2
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py +2 -2
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py +2 -2
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/base_keras_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/validation_functions.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantize_wrapper.py +2 -2
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py +1 -2
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_lut_symmetric_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_pytorch_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_symmetric_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_uniform_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py +2 -2
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/base_trainable_quantizer.py +9 -9
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizer_config.py +2 -1
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizers.py +3 -5
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/base_keras_quantizer.py +2 -2
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +2 -2
- model_compression_toolkit/target_platform_capabilities/constants.py +27 -0
- model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +1 -1
- model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +1 -1
- model_compression_toolkit/target_platform_capabilities/target_platform/quantization_format.py +20 -0
- model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +11 -2
- model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +1 -1
- model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +32 -34
- model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +2 -2
- model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +3 -24
- model_compression_toolkit/target_platform_capabilities/tpc_models/default_tpc/latest/__init__.py +1 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/default_tpc/target_platform_capabilities.py +3 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/default_tpc/v1/tp_model.py +7 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/default_tpc/v2/tp_model.py +7 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/default_tpc/v3/tp_model.py +7 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/default_tpc/v3_lut/tp_model.py +7 -2
- model_compression_toolkit/target_platform_capabilities/tpc_models/default_tpc/v4/tp_model.py +7 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/default_tpc/v4_lut/tp_model.py +7 -2
- model_compression_toolkit/target_platform_capabilities/tpc_models/default_tpc/v5/tp_model.py +7 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +1 -3
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +1 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +2 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +7 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +1 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +2 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +7 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +1 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +2 -1
- model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +26 -18
- model_compression_toolkit/exporter/model_exporter/tflite/tflite_export_facade.py +0 -73
- {mct_nightly-1.8.0.22042023.post414.dist-info → mct_nightly-1.8.0.22052023.post408.dist-info}/LICENSE.md +0 -0
- {mct_nightly-1.8.0.22042023.post414.dist-info → mct_nightly-1.8.0.22052023.post408.dist-info}/WHEEL +0 -0
- {mct_nightly-1.8.0.22042023.post414.dist-info → mct_nightly-1.8.0.22052023.post408.dist-info}/top_level.txt +0 -0
- /model_compression_toolkit/{core/common/logger.py → logger.py} +0 -0
- /model_compression_toolkit/{core/common → target_platform_capabilities}/immutable.py +0 -0
|
@@ -14,9 +14,8 @@
|
|
|
14
14
|
# ==============================================================================
|
|
15
15
|
from typing import Callable, List, Tuple
|
|
16
16
|
|
|
17
|
-
from model_compression_toolkit.
|
|
18
|
-
from model_compression_toolkit.
|
|
19
|
-
from model_compression_toolkit.core.common.constants import PYTORCH
|
|
17
|
+
from model_compression_toolkit.logger import Logger
|
|
18
|
+
from model_compression_toolkit.constants import PYTORCH
|
|
20
19
|
from model_compression_toolkit.core.common.user_info import UserInformation
|
|
21
20
|
from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, GradientPTQConfigV2
|
|
22
21
|
from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
|
|
@@ -34,12 +33,12 @@ from model_compression_toolkit.gptq.runner import gptq_runner
|
|
|
34
33
|
from model_compression_toolkit.ptq.runner import ptq_runner
|
|
35
34
|
from model_compression_toolkit.core.exporter import export_model
|
|
36
35
|
from model_compression_toolkit.core.analyzer import analyzer_model_quantization
|
|
37
|
-
from model_compression_toolkit.
|
|
36
|
+
from model_compression_toolkit.constants import FOUND_TORCH
|
|
38
37
|
|
|
39
38
|
if FOUND_TORCH:
|
|
40
39
|
from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
|
|
41
40
|
from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
|
|
42
|
-
from model_compression_toolkit.
|
|
41
|
+
from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
|
|
43
42
|
from torch.nn import Module
|
|
44
43
|
|
|
45
44
|
from model_compression_toolkit import get_target_platform_capabilities
|
|
@@ -199,13 +198,13 @@ if FOUND_TORCH:
|
|
|
199
198
|
Create a mixed-precision configuration, to quantize a model with different bitwidths for different layers.
|
|
200
199
|
The candidates bitwidth for quantization should be defined in the target platform model:
|
|
201
200
|
|
|
202
|
-
>>> config = mct.MixedPrecisionQuantizationConfig()
|
|
201
|
+
>>> config = mct.core.MixedPrecisionQuantizationConfig()
|
|
203
202
|
|
|
204
203
|
Create a KPI object to limit our returned model's size. Note that this value affects only coefficients
|
|
205
204
|
that should be quantized (for example, the kernel of Conv2D in PyTorch will be affected by this value,
|
|
206
205
|
while the bias will not):
|
|
207
206
|
|
|
208
|
-
>>> kpi = mct.KPI(sum(p.numel() for p in module.parameters()) * 0.75) # About 0.75 of the model size when quantized with 8 bits.
|
|
207
|
+
>>> kpi = mct.core.KPI(sum(p.numel() for p in module.parameters()) * 0.75) # About 0.75 of the model size when quantized with 8 bits.
|
|
209
208
|
|
|
210
209
|
Pass the model, the representative dataset generator, the configuration and the target KPI to get a
|
|
211
210
|
quantized model:
|
|
@@ -217,11 +216,11 @@ if FOUND_TORCH:
|
|
|
217
216
|
"""
|
|
218
217
|
|
|
219
218
|
if not isinstance(quant_config, MixedPrecisionQuantizationConfig):
|
|
220
|
-
|
|
219
|
+
Logger.error("Given quantization config to mixed-precision facade is not of type "
|
|
221
220
|
"MixedPrecisionQuantizationConfig. Please use pytorch_post_training_quantization API, "
|
|
222
221
|
"or pass a valid mixed precision configuration.")
|
|
223
222
|
|
|
224
|
-
|
|
223
|
+
Logger.info("Using experimental mixed-precision quantization. "
|
|
225
224
|
"If you encounter an issue please file a bug.")
|
|
226
225
|
|
|
227
226
|
quantization_config, mp_config = quant_config.separate_configs()
|
|
@@ -12,3 +12,6 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ==============================================================================
|
|
15
|
+
|
|
16
|
+
from model_compression_toolkit.ptq.pytorch.quantization_facade import pytorch_post_training_quantization_experimental
|
|
17
|
+
from model_compression_toolkit.ptq.keras.quantization_facade import keras_post_training_quantization_experimental
|
|
@@ -15,11 +15,10 @@
|
|
|
15
15
|
|
|
16
16
|
from typing import Callable
|
|
17
17
|
|
|
18
|
-
from model_compression_toolkit import CoreConfig
|
|
19
|
-
from model_compression_toolkit.core import common
|
|
18
|
+
from model_compression_toolkit.core import CoreConfig
|
|
20
19
|
from model_compression_toolkit.core.analyzer import analyzer_model_quantization
|
|
21
|
-
from model_compression_toolkit.
|
|
22
|
-
from model_compression_toolkit.
|
|
20
|
+
from model_compression_toolkit.logger import Logger
|
|
21
|
+
from model_compression_toolkit.constants import TENSORFLOW, FOUND_TF
|
|
23
22
|
from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
|
|
24
23
|
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
|
|
25
24
|
MixedPrecisionQuantizationConfigV2
|
|
@@ -33,7 +32,7 @@ if FOUND_TF:
|
|
|
33
32
|
from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
|
|
34
33
|
from model_compression_toolkit.core.keras.keras_model_validation import KerasModelValidation
|
|
35
34
|
from tensorflow.keras.models import Model
|
|
36
|
-
from model_compression_toolkit.
|
|
35
|
+
from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
|
|
37
36
|
from model_compression_toolkit.exporter.model_wrapper import get_exportable_keras_model
|
|
38
37
|
|
|
39
38
|
from model_compression_toolkit import get_target_platform_capabilities
|
|
@@ -93,25 +92,25 @@ if FOUND_TF:
|
|
|
93
92
|
|
|
94
93
|
Create a MCT core config, containing the quantization configuration:
|
|
95
94
|
|
|
96
|
-
>>> config = mct.CoreConfig()
|
|
95
|
+
>>> config = mct.core.CoreConfig()
|
|
97
96
|
|
|
98
97
|
If mixed precision is desired, create a MCT core config with a mixed-precision configuration, to quantize a model with different bitwidths for different layers.
|
|
99
98
|
The candidates bitwidth for quantization should be defined in the target platform model.
|
|
100
99
|
In this example we use 1 image to search mixed-precision configuration:
|
|
101
100
|
|
|
102
|
-
>>> config = mct.CoreConfig(mixed_precision_config=mct.MixedPrecisionQuantizationConfigV2(num_of_images=1))
|
|
101
|
+
>>> config = mct.core.CoreConfig(mixed_precision_config=mct.core.MixedPrecisionQuantizationConfigV2(num_of_images=1))
|
|
103
102
|
|
|
104
103
|
For mixed-precision set a target KPI object:
|
|
105
104
|
Create a KPI object to limit our returned model's size. Note that this value affects only coefficients
|
|
106
105
|
that should be quantized (for example, the kernel of Conv2D in Keras will be affected by this value,
|
|
107
106
|
while the bias will not):
|
|
108
107
|
|
|
109
|
-
>>> kpi = mct.KPI(model.count_params() * 0.75) # About 0.75 of the model size when quantized with 8 bits.
|
|
108
|
+
>>> kpi = mct.core.KPI(model.count_params() * 0.75) # About 0.75 of the model size when quantized with 8 bits.
|
|
110
109
|
|
|
111
110
|
Pass the model, the representative dataset generator, the configuration and the target KPI to get a
|
|
112
111
|
quantized model:
|
|
113
112
|
|
|
114
|
-
>>> quantized_model, quantization_info = mct.keras_post_training_quantization_experimental(model, repr_datagen, kpi, core_config=config)
|
|
113
|
+
>>> quantized_model, quantization_info = mct.ptq.keras_post_training_quantization_experimental(model, repr_datagen, kpi, core_config=config)
|
|
115
114
|
|
|
116
115
|
For more configuration options, please take a look at our `API documentation <https://sony.github.io/model_optimization/api/api_docs/modules/mixed_precision_quantization_config.html>`_.
|
|
117
116
|
|
|
@@ -124,11 +123,11 @@ if FOUND_TF:
|
|
|
124
123
|
|
|
125
124
|
if core_config.mixed_precision_enable:
|
|
126
125
|
if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfigV2):
|
|
127
|
-
|
|
126
|
+
Logger.error("Given quantization config to mixed-precision facade is not of type "
|
|
128
127
|
"MixedPrecisionQuantizationConfigV2. Please use keras_post_training_quantization "
|
|
129
128
|
"API, or pass a valid mixed precision configuration.") # pragma: no cover
|
|
130
129
|
|
|
131
|
-
|
|
130
|
+
Logger.info("Using experimental mixed-precision quantization. "
|
|
132
131
|
"If you encounter an issue please file a bug.")
|
|
133
132
|
|
|
134
133
|
tb_w = _init_tensorboard_writer(fw_info)
|
|
@@ -15,11 +15,11 @@
|
|
|
15
15
|
from typing import Callable
|
|
16
16
|
|
|
17
17
|
from model_compression_toolkit.core import common
|
|
18
|
-
from model_compression_toolkit.
|
|
19
|
-
from model_compression_toolkit.
|
|
18
|
+
from model_compression_toolkit.logger import Logger
|
|
19
|
+
from model_compression_toolkit.constants import PYTORCH, FOUND_TORCH
|
|
20
20
|
from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
|
|
21
21
|
from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
|
|
22
|
-
from model_compression_toolkit import CoreConfig
|
|
22
|
+
from model_compression_toolkit.core import CoreConfig
|
|
23
23
|
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
|
|
24
24
|
MixedPrecisionQuantizationConfigV2
|
|
25
25
|
from model_compression_toolkit.core.runner import core_runner, _init_tensorboard_writer
|
|
@@ -31,7 +31,7 @@ from model_compression_toolkit.core.analyzer import analyzer_model_quantization
|
|
|
31
31
|
if FOUND_TORCH:
|
|
32
32
|
from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
|
|
33
33
|
from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
|
|
34
|
-
from model_compression_toolkit.
|
|
34
|
+
from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
|
|
35
35
|
from torch.nn import Module
|
|
36
36
|
from model_compression_toolkit.exporter.model_wrapper.pytorch.builder.fully_quantized_model_builder import get_exportable_pytorch_model
|
|
37
37
|
from model_compression_toolkit import get_target_platform_capabilities
|
|
@@ -88,18 +88,18 @@ if FOUND_TORCH:
|
|
|
88
88
|
Set number of clibration iterations to 1:
|
|
89
89
|
|
|
90
90
|
>>> import model_compression_toolkit as mct
|
|
91
|
-
>>> quantized_module, quantization_info = mct.pytorch_post_training_quantization_experimental(module, repr_datagen)
|
|
91
|
+
>>> quantized_module, quantization_info = mct.ptq.pytorch_post_training_quantization_experimental(module, repr_datagen)
|
|
92
92
|
|
|
93
93
|
"""
|
|
94
94
|
|
|
95
95
|
if core_config.mixed_precision_enable:
|
|
96
96
|
if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfigV2):
|
|
97
|
-
|
|
97
|
+
Logger.error("Given quantization config to mixed-precision facade is not of type "
|
|
98
98
|
"MixedPrecisionQuantizationConfigV2. Please use "
|
|
99
99
|
"pytorch_post_training_quantization API, or pass a valid mixed precision "
|
|
100
100
|
"configuration.") # pragma: no cover
|
|
101
101
|
|
|
102
|
-
|
|
102
|
+
Logger.info("Using experimental mixed-precision quantization. "
|
|
103
103
|
"If you encounter an issue please file a bug.")
|
|
104
104
|
|
|
105
105
|
tb_w = _init_tensorboard_writer(DEFAULT_PYTORCH_INFO)
|
|
@@ -12,3 +12,7 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ==============================================================================
|
|
15
|
+
from model_compression_toolkit.qat.common.qat_config import QATConfig, TrainingMethod
|
|
16
|
+
|
|
17
|
+
from model_compression_toolkit.qat.keras.quantization_facade import keras_quantization_aware_training_init, keras_quantization_aware_training_finalize
|
|
18
|
+
from model_compression_toolkit.qat.pytorch.quantization_facade import pytorch_quantization_aware_training_init, pytorch_quantization_aware_training_finalize
|
|
@@ -12,5 +12,4 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ==============================================================================
|
|
15
|
-
|
|
16
|
-
from model_compression_toolkit.qat.common.constants import THRESHOLD_TENSOR, WEIGHTS_QUANTIZATION_PARAMS
|
|
15
|
+
from model_compression_toolkit.quantizers_infrastructure.constants import THRESHOLD_TENSOR, WEIGHTS_QUANTIZATION_PARAMS
|
|
@@ -17,6 +17,8 @@ from typing import Dict
|
|
|
17
17
|
from enum import Enum
|
|
18
18
|
from model_compression_toolkit.core import common
|
|
19
19
|
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
|
|
20
|
+
from model_compression_toolkit.logger import Logger
|
|
21
|
+
|
|
20
22
|
|
|
21
23
|
def _is_qat_applicable(node: common.BaseNode,
|
|
22
24
|
fw_info: FrameworkInfo) -> bool:
|
|
@@ -31,7 +33,7 @@ def _is_qat_applicable(node: common.BaseNode,
|
|
|
31
33
|
"""
|
|
32
34
|
|
|
33
35
|
if node.is_weights_quantization_enabled() and not fw_info.is_kernel_op(node.type):
|
|
34
|
-
|
|
36
|
+
Logger.error("QAT Error: Quantizing a node without a kernel isn't supported")
|
|
35
37
|
return node.is_weights_quantization_enabled() or node.is_activation_quantization_enabled()
|
|
36
38
|
|
|
37
39
|
|
|
@@ -40,8 +42,10 @@ class TrainingMethod(Enum):
|
|
|
40
42
|
An enum for selecting a QAT training method
|
|
41
43
|
|
|
42
44
|
STE - Standard straight-through estimator. Includes PowerOfTwo, symmetric & uniform quantizers
|
|
45
|
+
DQA - DNN Quantization with Attention. Includes a smooth quantization introduces by DQA method
|
|
43
46
|
"""
|
|
44
47
|
STE = "STE",
|
|
48
|
+
DQA = "DQA"
|
|
45
49
|
|
|
46
50
|
|
|
47
51
|
class QATConfig:
|
|
@@ -16,14 +16,13 @@
|
|
|
16
16
|
from typing import Callable
|
|
17
17
|
from functools import partial
|
|
18
18
|
|
|
19
|
-
from model_compression_toolkit import CoreConfig
|
|
20
|
-
from model_compression_toolkit.
|
|
21
|
-
from model_compression_toolkit.
|
|
22
|
-
from model_compression_toolkit.core.common.constants import TENSORFLOW, FOUND_TF
|
|
23
|
-
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
|
|
19
|
+
from model_compression_toolkit.core import CoreConfig
|
|
20
|
+
from model_compression_toolkit.logger import Logger
|
|
21
|
+
from model_compression_toolkit.constants import FOUND_TF
|
|
24
22
|
from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
|
|
25
23
|
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
|
|
26
24
|
MixedPrecisionQuantizationConfigV2
|
|
25
|
+
from model_compression_toolkit.quantizers_infrastructure import ActivationQuantizationHolder
|
|
27
26
|
from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import TargetPlatformCapabilities
|
|
28
27
|
from model_compression_toolkit.core.runner import core_runner, _init_tensorboard_writer
|
|
29
28
|
from model_compression_toolkit.ptq.runner import ptq_runner
|
|
@@ -36,7 +35,7 @@ if FOUND_TF:
|
|
|
36
35
|
from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
|
|
37
36
|
from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
|
|
38
37
|
from model_compression_toolkit.core.keras.keras_model_validation import KerasModelValidation
|
|
39
|
-
from model_compression_toolkit.
|
|
38
|
+
from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
|
|
40
39
|
|
|
41
40
|
from model_compression_toolkit.core.keras.back2framework.keras_model_builder import KerasModelBuilder
|
|
42
41
|
|
|
@@ -46,33 +45,36 @@ if FOUND_TF:
|
|
|
46
45
|
from model_compression_toolkit import get_target_platform_capabilities
|
|
47
46
|
from model_compression_toolkit.core import common
|
|
48
47
|
from model_compression_toolkit.core.common import BaseNode
|
|
49
|
-
from model_compression_toolkit.
|
|
48
|
+
from model_compression_toolkit.constants import TENSORFLOW
|
|
50
49
|
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
|
|
51
50
|
from model_compression_toolkit.qat.common.qat_config import _is_qat_applicable
|
|
52
|
-
from model_compression_toolkit.
|
|
51
|
+
from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
|
|
53
52
|
from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
|
|
54
|
-
from model_compression_toolkit.qat.keras.quantizer.quantization_builder import quantization_builder
|
|
53
|
+
from model_compression_toolkit.qat.keras.quantizer.quantization_builder import quantization_builder, \
|
|
54
|
+
get_activation_quantizer_holder
|
|
55
55
|
from model_compression_toolkit.qat.common.qat_config import QATConfig
|
|
56
56
|
from model_compression_toolkit import quantizers_infrastructure as qi
|
|
57
57
|
|
|
58
58
|
DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
|
|
59
59
|
|
|
60
60
|
|
|
61
|
-
def qat_wrapper(n: common.BaseNode,
|
|
61
|
+
def qat_wrapper(n: common.BaseNode,
|
|
62
|
+
layer: Layer,
|
|
63
|
+
qat_config: QATConfig):
|
|
62
64
|
"""
|
|
63
65
|
A function which takes a computational graph node and a keras layer and perform the quantization wrapping
|
|
64
66
|
Args:
|
|
67
|
+
qat_config: Configuration of QAT (such as training methods for example).
|
|
65
68
|
n: A node of mct graph.
|
|
66
|
-
layer: A keras layer
|
|
69
|
+
layer: A keras layer.
|
|
67
70
|
|
|
68
71
|
Returns: Wrapped layer
|
|
69
72
|
|
|
70
73
|
"""
|
|
71
74
|
if _is_qat_applicable(n, DEFAULT_KERAS_INFO):
|
|
72
75
|
weights_quantizers, activation_quantizers = quantization_builder(n, qat_config, DEFAULT_KERAS_INFO)
|
|
73
|
-
return qi.KerasQuantizationWrapper(layer, weights_quantizers
|
|
74
|
-
|
|
75
|
-
return layer
|
|
76
|
+
return qi.KerasQuantizationWrapper(layer, weights_quantizers)
|
|
77
|
+
return layer
|
|
76
78
|
|
|
77
79
|
|
|
78
80
|
def keras_quantization_aware_training_init(in_model: Model,
|
|
@@ -134,24 +136,24 @@ if FOUND_TF:
|
|
|
134
136
|
|
|
135
137
|
Create a MCT core config, containing the quantization configuration:
|
|
136
138
|
|
|
137
|
-
>>> config = mct.CoreConfig()
|
|
139
|
+
>>> config = mct.core.CoreConfig()
|
|
138
140
|
|
|
139
141
|
If mixed precision is desired, create a MCT core config with a mixed-precision configuration, to quantize a model with different bitwidths for different layers.
|
|
140
142
|
The candidates bitwidth for quantization should be defined in the target platform model:
|
|
141
143
|
|
|
142
|
-
>>> config = mct.CoreConfig(mixed_precision_config=MixedPrecisionQuantizationConfigV2())
|
|
144
|
+
>>> config = mct.core.CoreConfig(mixed_precision_config=MixedPrecisionQuantizationConfigV2())
|
|
143
145
|
|
|
144
146
|
For mixed-precision set a target KPI object:
|
|
145
147
|
Create a KPI object to limit our returned model's size. Note that this value affects only coefficients
|
|
146
148
|
that should be quantized (for example, the kernel of Conv2D in Keras will be affected by this value,
|
|
147
149
|
while the bias will not):
|
|
148
150
|
|
|
149
|
-
>>> kpi = mct.KPI(model.count_params() * 0.75) # About 0.75 of the model size when quantized with 8 bits.
|
|
151
|
+
>>> kpi = mct.core.KPI(model.count_params() * 0.75) # About 0.75 of the model size when quantized with 8 bits.
|
|
150
152
|
|
|
151
153
|
Pass the model, the representative dataset generator, the configuration and the target KPI to get a
|
|
152
154
|
quantized model:
|
|
153
155
|
|
|
154
|
-
>>> quantized_model, quantization_info, custom_objects = mct.keras_quantization_aware_training_init(model, repr_datagen, kpi, core_config=config)
|
|
156
|
+
>>> quantized_model, quantization_info, custom_objects = mct.qat.keras_quantization_aware_training_init(model, repr_datagen, kpi, core_config=config)
|
|
155
157
|
|
|
156
158
|
Use the quantized model for fine-tuning. For loading the model from file, use the custom_objects dictionary:
|
|
157
159
|
|
|
@@ -165,11 +167,11 @@ if FOUND_TF:
|
|
|
165
167
|
|
|
166
168
|
if core_config.mixed_precision_enable:
|
|
167
169
|
if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfigV2):
|
|
168
|
-
|
|
170
|
+
Logger.error("Given quantization config to mixed-precision facade is not of type "
|
|
169
171
|
"MixedPrecisionQuantizationConfigV2. Please use keras_post_training_quantization API,"
|
|
170
172
|
"or pass a valid mixed precision configuration.")
|
|
171
173
|
|
|
172
|
-
|
|
174
|
+
Logger.info("Using experimental mixed-precision quantization. "
|
|
173
175
|
"If you encounter an issue please file a bug.")
|
|
174
176
|
|
|
175
177
|
tb_w = _init_tensorboard_writer(fw_info)
|
|
@@ -188,7 +190,11 @@ if FOUND_TF:
|
|
|
188
190
|
tg = ptq_runner(tg, representative_data_gen, core_config, fw_info, fw_impl, tb_w)
|
|
189
191
|
|
|
190
192
|
_qat_wrapper = partial(qat_wrapper, qat_config=qat_config)
|
|
191
|
-
qat_model, user_info = KerasModelBuilder(graph=tg,
|
|
193
|
+
qat_model, user_info = KerasModelBuilder(graph=tg,
|
|
194
|
+
fw_info=fw_info,
|
|
195
|
+
wrapper=_qat_wrapper,
|
|
196
|
+
get_activation_quantizer_holder_fn=partial(get_activation_quantizer_holder,
|
|
197
|
+
qat_config=qat_config)).build_model()
|
|
192
198
|
|
|
193
199
|
user_info.mixed_precision_cfg = bit_widths_config
|
|
194
200
|
#TODO: remove the last output after updating documentation.
|
|
@@ -223,33 +229,33 @@ if FOUND_TF:
|
|
|
223
229
|
|
|
224
230
|
Create a MCT core config, containing the quantization configuration:
|
|
225
231
|
|
|
226
|
-
>>> config = mct.CoreConfig()
|
|
232
|
+
>>> config = mct.core.CoreConfig()
|
|
227
233
|
|
|
228
234
|
If mixed precision is desired, create a MCT core config with a mixed-precision configuration, to quantize a model with different bitwidths for different layers.
|
|
229
235
|
The candidates bitwidth for quantization should be defined in the target platform model:
|
|
230
236
|
|
|
231
|
-
>>> config = mct.CoreConfig(mixed_precision_config=MixedPrecisionQuantizationConfigV2())
|
|
237
|
+
>>> config = mct.core.CoreConfig(mixed_precision_config=MixedPrecisionQuantizationConfigV2())
|
|
232
238
|
|
|
233
239
|
For mixed-precision set a target KPI object:
|
|
234
240
|
Create a KPI object to limit our returned model's size. Note that this value affects only coefficients
|
|
235
241
|
that should be quantized (for example, the kernel of Conv2D in Keras will be affected by this value,
|
|
236
242
|
while the bias will not):
|
|
237
243
|
|
|
238
|
-
>>> kpi = mct.KPI(model.count_params() * 0.75) # About 0.75 of the model size when quantized with 8 bits.
|
|
244
|
+
>>> kpi = mct.core.KPI(model.count_params() * 0.75) # About 0.75 of the model size when quantized with 8 bits.
|
|
239
245
|
|
|
240
246
|
Pass the model, the representative dataset generator, the configuration and the target KPI to get a
|
|
241
247
|
quantized model:
|
|
242
248
|
|
|
243
|
-
>>> quantized_model, quantization_info, custom_objects = mct.keras_quantization_aware_training_init(model, repr_datagen, kpi, core_config=config)
|
|
249
|
+
>>> quantized_model, quantization_info, custom_objects = mct.qat.keras_quantization_aware_training_init(model, repr_datagen, kpi, core_config=config)
|
|
244
250
|
|
|
245
251
|
Use the quantized model for fine-tuning. For loading the model from file, use the custom_objects dictionary:
|
|
246
252
|
|
|
247
253
|
>>> quantized_model = tf.keras.models.load_model(model_file, custom_objects=custom_objects)
|
|
248
|
-
>>> quantized_model = mct.keras_quantization_aware_training_finalize(quantized_model)
|
|
254
|
+
>>> quantized_model = mct.qat.keras_quantization_aware_training_finalize(quantized_model)
|
|
249
255
|
|
|
250
256
|
"""
|
|
251
257
|
def _export(layer):
|
|
252
|
-
if isinstance(layer, qi.KerasQuantizationWrapper):
|
|
258
|
+
if isinstance(layer, (qi.KerasQuantizationWrapper, ActivationQuantizationHolder)):
|
|
253
259
|
layer.convert_to_inferable_quantizers()
|
|
254
260
|
return layer
|
|
255
261
|
|
|
@@ -14,8 +14,8 @@
|
|
|
14
14
|
# ==============================================================================
|
|
15
15
|
from typing import Union
|
|
16
16
|
|
|
17
|
-
from model_compression_toolkit.
|
|
18
|
-
from model_compression_toolkit.
|
|
17
|
+
from model_compression_toolkit.logger import Logger
|
|
18
|
+
from model_compression_toolkit.constants import FOUND_TF
|
|
19
19
|
|
|
20
20
|
from model_compression_toolkit.quantizers_infrastructure import TrainableQuantizerWeightsConfig, \
|
|
21
21
|
TrainableQuantizerActivationConfig, BaseKerasTrainableQuantizer
|
|
@@ -12,20 +12,47 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ==============================================================================
|
|
15
|
-
from typing import Tuple, Dict, List
|
|
15
|
+
from typing import Tuple, Dict, List, Union, Callable
|
|
16
16
|
|
|
17
17
|
from model_compression_toolkit.core import common
|
|
18
18
|
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
|
|
19
|
+
from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
|
|
20
|
+
from model_compression_toolkit.logger import Logger
|
|
21
|
+
from model_compression_toolkit.qat.common.qat_config import QATConfig, _is_qat_applicable
|
|
22
|
+
from model_compression_toolkit.qat.keras.quantizer.base_keras_qat_quantizer import BaseKerasQATTrainableQuantizer
|
|
23
|
+
from model_compression_toolkit.quantizers_infrastructure import QuantizationTarget, ActivationQuantizationHolder
|
|
19
24
|
from model_compression_toolkit.quantizers_infrastructure.trainable_infrastructure.common.get_quantizer_config import \
|
|
20
25
|
get_trainable_quantizer_weights_config, get_trainable_quantizer_activation_config, \
|
|
21
26
|
get_trainable_quantizer_quantization_candidates
|
|
22
|
-
from model_compression_toolkit.qat.keras.quantizer.base_keras_qat_quantizer import BaseKerasQATTrainableQuantizer
|
|
23
|
-
from model_compression_toolkit.qat.common.qat_config import QATConfig
|
|
24
|
-
from model_compression_toolkit.quantizers_infrastructure import QuantizationTarget
|
|
25
27
|
from model_compression_toolkit.quantizers_infrastructure.trainable_infrastructure.common.get_quantizers import \
|
|
26
28
|
get_trainable_quantizer_class
|
|
27
29
|
|
|
28
30
|
|
|
31
|
+
def get_activation_quantizer_holder(n: common.BaseNode,
|
|
32
|
+
qat_config: QATConfig) -> Union[None, Callable]:
|
|
33
|
+
"""
|
|
34
|
+
Retrieve a ActivationQuantizationHolder layer to use for activation quantization for a node.
|
|
35
|
+
If the layer is not supposed to be wrapped with activation quantizers - return None.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
n: Node to get ActivationQuantizationHolder to attach in its output.
|
|
39
|
+
qat_config: Configuration of QAT (such as training methods for example).
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
A ActivationQuantizationHolder layer for the node activation quantization.
|
|
43
|
+
"""
|
|
44
|
+
_, activation_quantizers = quantization_builder(n,
|
|
45
|
+
qat_config,
|
|
46
|
+
DEFAULT_KERAS_INFO)
|
|
47
|
+
|
|
48
|
+
# Holder by definition uses a single quantizer for the activation quantization
|
|
49
|
+
# thus we make sure this is the only possible case (unless it's a node with no activation
|
|
50
|
+
# quantization, which in this case has an empty list).
|
|
51
|
+
if len(activation_quantizers) == 1:
|
|
52
|
+
return ActivationQuantizationHolder(activation_quantizers[0])
|
|
53
|
+
Logger.error(f'ActivationQuantizationHolder supports a single quantizer but {len(activation_quantizers)} quantizers were found for node {n}')
|
|
54
|
+
|
|
55
|
+
|
|
29
56
|
def quantization_builder(n: common.BaseNode,
|
|
30
57
|
qat_config: QATConfig,
|
|
31
58
|
fw_info: FrameworkInfo,
|
|
@@ -18,13 +18,15 @@ from typing import Union
|
|
|
18
18
|
import numpy as np
|
|
19
19
|
import tensorflow as tf
|
|
20
20
|
from tensorflow.python.framework.tensor_shape import TensorShape
|
|
21
|
-
from model_compression_toolkit.
|
|
21
|
+
from model_compression_toolkit.constants import SIGNED
|
|
22
|
+
from model_compression_toolkit.quantizers_infrastructure.constants import FQ_MIN, FQ_MAX
|
|
23
|
+
|
|
24
|
+
from model_compression_toolkit.qat import TrainingMethod
|
|
22
25
|
|
|
23
26
|
from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
|
|
24
27
|
from model_compression_toolkit.qat.common import THRESHOLD_TENSOR
|
|
25
|
-
from model_compression_toolkit
|
|
26
|
-
|
|
27
|
-
from model_compression_toolkit.core.common import constants as C
|
|
28
|
+
from model_compression_toolkit import quantizers_infrastructure as qi, constants as C
|
|
29
|
+
|
|
28
30
|
from model_compression_toolkit.qat.keras.quantizer.base_keras_qat_quantizer import BaseKerasQATTrainableQuantizer
|
|
29
31
|
from model_compression_toolkit.quantizers_infrastructure import TrainableQuantizerWeightsConfig, \
|
|
30
32
|
TrainableQuantizerActivationConfig
|
|
@@ -53,11 +55,11 @@ class STEWeightQATQuantizer(BaseKerasQATTrainableQuantizer):
|
|
|
53
55
|
"""
|
|
54
56
|
super().__init__(quantization_config)
|
|
55
57
|
self.power_of_two = quantization_config.weights_quantization_method == QuantizationMethod.POWER_OF_TWO
|
|
56
|
-
self.threshold_values = quantization_config.weights_quantization_params[C.THRESHOLD]
|
|
58
|
+
self.threshold_values = np.array(quantization_config.weights_quantization_params[C.THRESHOLD])
|
|
57
59
|
self.threshold_shape = np.asarray(self.threshold_values).shape
|
|
58
60
|
self.per_channel = self.quantization_config.weights_per_channel_threshold
|
|
59
61
|
self.channel_axis = self.quantization_config.weights_channels_axis
|
|
60
|
-
self.np_threshold_values = np.reshape(np.asarray(self.threshold_values),[-1]) if self.
|
|
62
|
+
self.np_threshold_values = np.reshape(np.asarray(self.threshold_values),[-1]) if self.per_channel else float(self.threshold_values)
|
|
61
63
|
|
|
62
64
|
if self.per_channel and self.channel_axis not in [-1, len(self.threshold_shape) - 1]:
|
|
63
65
|
# Tensorflow's fake_quant_with_min_max_vars_per_channel only works on last axis, so
|
|
@@ -93,21 +95,21 @@ class STEWeightQATQuantizer(BaseKerasQATTrainableQuantizer):
|
|
|
93
95
|
"""
|
|
94
96
|
ptq_threshold_tensor = layer.add_weight(
|
|
95
97
|
name + THRESHOLD_TENSOR,
|
|
96
|
-
shape=len(self.np_threshold_values) if self.
|
|
98
|
+
shape=len(self.np_threshold_values) if self.per_channel else (),
|
|
97
99
|
initializer=tf.keras.initializers.Constant(1.0),
|
|
98
100
|
trainable=False)
|
|
99
101
|
ptq_threshold_tensor.assign(self.np_threshold_values)
|
|
100
102
|
|
|
101
103
|
fq_min = layer.add_weight(
|
|
102
104
|
name + FQ_MIN,
|
|
103
|
-
shape=len(self.min) if self.
|
|
105
|
+
shape=len(self.min) if self.per_channel else (),
|
|
104
106
|
initializer=tf.keras.initializers.Constant(-1.0),
|
|
105
107
|
trainable=False)
|
|
106
108
|
fq_min.assign(self.min)
|
|
107
109
|
|
|
108
110
|
fq_max = layer.add_weight(
|
|
109
111
|
name + FQ_MAX,
|
|
110
|
-
shape=len(self.max) if self.
|
|
112
|
+
shape=len(self.max) if self.per_channel else (),
|
|
111
113
|
initializer=tf.keras.initializers.Constant(1.0),
|
|
112
114
|
trainable=False)
|
|
113
115
|
fq_max.assign(self.max)
|
|
@@ -134,7 +136,7 @@ class STEWeightQATQuantizer(BaseKerasQATTrainableQuantizer):
|
|
|
134
136
|
|
|
135
137
|
_min = self.get_quantizer_variable(FQ_MIN)
|
|
136
138
|
_max = self.get_quantizer_variable(FQ_MAX)
|
|
137
|
-
if self.
|
|
139
|
+
if self.per_channel:
|
|
138
140
|
if self.perm_vec:
|
|
139
141
|
inputs = tf.transpose(inputs, perm=self.perm_vec)
|
|
140
142
|
q_tensor = tf.quantization.fake_quant_with_min_max_vars_per_channel(inputs, _min, _max,
|
|
@@ -15,13 +15,15 @@
|
|
|
15
15
|
import numpy as np
|
|
16
16
|
import tensorflow as tf
|
|
17
17
|
from tensorflow.python.framework.tensor_shape import TensorShape
|
|
18
|
-
from model_compression_toolkit.
|
|
18
|
+
from model_compression_toolkit.constants import RANGE_MIN, RANGE_MAX
|
|
19
|
+
from model_compression_toolkit.quantizers_infrastructure.constants import FQ_MIN, FQ_MAX
|
|
20
|
+
from model_compression_toolkit.qat import TrainingMethod
|
|
19
21
|
from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
|
|
20
|
-
|
|
22
|
+
|
|
21
23
|
from model_compression_toolkit.qat.keras.quantizer.quant_utils import adjust_range_to_include_zero
|
|
22
24
|
from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import fix_range_to_include_zero
|
|
23
|
-
from model_compression_toolkit import quantizers_infrastructure as qi,
|
|
24
|
-
|
|
25
|
+
from model_compression_toolkit import quantizers_infrastructure as qi, constants as C
|
|
26
|
+
|
|
25
27
|
from model_compression_toolkit.qat.keras.quantizer.base_keras_qat_quantizer import BaseKerasQATTrainableQuantizer
|
|
26
28
|
from model_compression_toolkit.quantizers_infrastructure import TrainableQuantizerWeightsConfig, \
|
|
27
29
|
TrainableQuantizerActivationConfig
|
|
@@ -50,8 +52,8 @@ class STEUniformWeightQATQuantizer(BaseKerasQATTrainableQuantizer):
|
|
|
50
52
|
|
|
51
53
|
"""
|
|
52
54
|
super().__init__(quantization_config)
|
|
53
|
-
self.max_values = quantization_config.weights_quantization_params[RANGE_MAX]
|
|
54
|
-
self.min_values = quantization_config.weights_quantization_params[RANGE_MIN]
|
|
55
|
+
self.max_values = np.array(quantization_config.weights_quantization_params[RANGE_MAX])
|
|
56
|
+
self.min_values = np.array(quantization_config.weights_quantization_params[RANGE_MIN])
|
|
55
57
|
self.num_bits = self.quantization_config.weights_n_bits
|
|
56
58
|
self.per_channel = self.quantization_config.weights_per_channel_threshold
|
|
57
59
|
self.channel_axis = self.quantization_config.weights_channels_axis
|
|
@@ -98,7 +100,6 @@ class STEUniformWeightQATQuantizer(BaseKerasQATTrainableQuantizer):
|
|
|
98
100
|
self.add_quantizer_variable(FQ_MIN, fq_min, VariableGroup.QPARAMS)
|
|
99
101
|
self.add_quantizer_variable(FQ_MAX, fq_max, VariableGroup.QPARAMS)
|
|
100
102
|
|
|
101
|
-
|
|
102
103
|
def __call__(self, inputs: tf.Tensor,
|
|
103
104
|
training: bool):
|
|
104
105
|
"""
|
|
@@ -199,7 +200,6 @@ class STEUniformActivationQATQuantizer(BaseKerasQATTrainableQuantizer):
|
|
|
199
200
|
self.add_quantizer_variable(FQ_MIN, fq_min, VariableGroup.QPARAMS)
|
|
200
201
|
self.add_quantizer_variable(FQ_MAX, fq_max, VariableGroup.QPARAMS)
|
|
201
202
|
|
|
202
|
-
|
|
203
203
|
def __call__(self,
|
|
204
204
|
inputs: tf.Tensor,
|
|
205
205
|
training: bool):
|
|
@@ -16,11 +16,11 @@ import copy
|
|
|
16
16
|
from typing import Callable
|
|
17
17
|
from functools import partial
|
|
18
18
|
|
|
19
|
-
from model_compression_toolkit.
|
|
19
|
+
from model_compression_toolkit.constants import FOUND_TORCH, PYTORCH
|
|
20
20
|
|
|
21
|
-
from model_compression_toolkit import CoreConfig
|
|
21
|
+
from model_compression_toolkit.core import CoreConfig
|
|
22
22
|
from model_compression_toolkit.core import common
|
|
23
|
-
from model_compression_toolkit.
|
|
23
|
+
from model_compression_toolkit.logger import Logger
|
|
24
24
|
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
|
|
25
25
|
from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
|
|
26
26
|
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
|
|
@@ -34,7 +34,7 @@ if FOUND_TORCH:
|
|
|
34
34
|
import torch.nn as nn
|
|
35
35
|
from torch.nn import Module
|
|
36
36
|
from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
|
|
37
|
-
from model_compression_toolkit.
|
|
37
|
+
from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
|
|
38
38
|
from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
|
|
39
39
|
from model_compression_toolkit.qat.common.qat_config import _is_qat_applicable
|
|
40
40
|
from model_compression_toolkit.core.pytorch.back2framework.pytorch_model_builder import PyTorchModelBuilder
|
|
@@ -121,7 +121,7 @@ if FOUND_TORCH:
|
|
|
121
121
|
|
|
122
122
|
Create a MCT core config, containing the quantization configuration:
|
|
123
123
|
|
|
124
|
-
>>> config = mct.CoreConfig()
|
|
124
|
+
>>> config = mct.core.CoreConfig()
|
|
125
125
|
|
|
126
126
|
Pass the model, the representative dataset generator, the configuration and the target KPI to get a
|
|
127
127
|
quantized model. Now the model contains quantizer wrappers for fine tunning the weights:
|
|
@@ -134,11 +134,11 @@ if FOUND_TORCH:
|
|
|
134
134
|
|
|
135
135
|
if core_config.mixed_precision_enable:
|
|
136
136
|
if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfigV2):
|
|
137
|
-
|
|
137
|
+
Logger.error("Given quantization config to mixed-precision facade is not of type "
|
|
138
138
|
"MixedPrecisionQuantizationConfigV2. Please use pytorch_post_training_quantization API,"
|
|
139
139
|
"or pass a valid mixed precision configuration.")
|
|
140
140
|
|
|
141
|
-
|
|
141
|
+
Logger.info("Using experimental mixed-precision quantization. "
|
|
142
142
|
"If you encounter an issue please file a bug.")
|
|
143
143
|
|
|
144
144
|
tb_w = _init_tensorboard_writer(fw_info)
|
|
@@ -193,7 +193,7 @@ if FOUND_TORCH:
|
|
|
193
193
|
|
|
194
194
|
Create a MCT core config, containing the quantization configuration:
|
|
195
195
|
|
|
196
|
-
>>> config = mct.CoreConfig()
|
|
196
|
+
>>> config = mct.core.CoreConfig()
|
|
197
197
|
|
|
198
198
|
Pass the model, the representative dataset generator, the configuration and the target KPI to get a
|
|
199
199
|
quantized model:
|
|
@@ -14,8 +14,8 @@
|
|
|
14
14
|
# ==============================================================================
|
|
15
15
|
from typing import Union
|
|
16
16
|
|
|
17
|
-
from model_compression_toolkit.
|
|
18
|
-
from model_compression_toolkit.
|
|
17
|
+
from model_compression_toolkit.logger import Logger
|
|
18
|
+
from model_compression_toolkit.constants import FOUND_TORCH
|
|
19
19
|
|
|
20
20
|
from model_compression_toolkit.quantizers_infrastructure import TrainableQuantizerWeightsConfig, \
|
|
21
21
|
TrainableQuantizerActivationConfig
|