mct-nightly 2.1.0.20240731.414__tar.gz → 2.1.0.20240802.429__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/PKG-INFO +1 -1
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/mct_nightly.egg-info/PKG-INFO +1 -1
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/mct_nightly.egg-info/SOURCES.txt +4 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/__init__.py +1 -1
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/constants.py +2 -1
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/framework_implementation.py +5 -9
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/base_graph.py +1 -23
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/base_node.py +52 -33
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +6 -6
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +12 -12
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +2 -2
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +17 -38
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +6 -4
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +19 -12
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +14 -14
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +14 -9
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +5 -27
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +25 -17
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +10 -6
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +1 -65
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +12 -5
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +7 -5
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +3 -3
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/keras_implementation.py +21 -17
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +2 -2
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +3 -3
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +15 -14
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/reader/node_holders.py +1 -1
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/runner.py +1 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +2 -2
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/common/gptq_training.py +0 -35
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +1 -1
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py +1 -1
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +32 -8
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +2 -2
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +5 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +2 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +2 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +2 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +2 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +2 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +2 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +2 -0
- mct-nightly-2.1.0.20240802.429/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/__init__.py +16 -0
- mct-nightly-2.1.0.20240802.429/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py +235 -0
- mct-nightly-2.1.0.20240802.429/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_keras.py +132 -0
- mct-nightly-2.1.0.20240802.429/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_pytorch.py +112 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +2 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +2 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/LICENSE.md +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/README.md +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/mct_nightly.egg-info/dependency_links.txt +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/mct_nightly.egg-info/requires.txt +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/mct_nightly.egg-info/top_level.txt +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/analyzer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/framework_info.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/edge.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/hessian/hessian_scores_calculator.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/hessian/hessian_scores_request.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/matchers/function.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/memory_computation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/model_collector.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/model_validation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/user_info.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/constants.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/hessian/hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/hessian/weights_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/reader/common.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/constants.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/hessian/hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/hessian/weights_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/pytorch/utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/common/constants.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/common/enums.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/keras/image_operations.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/keras/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/pytorch/image_operations.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/pytorch/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/defaultdict.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/common/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/common/gptq_config.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/gptq_training.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/gptq/runner.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/logger.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/metadata.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/pruning/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/ptq/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/ptq/runner.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/common/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/common/qat_config.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/pytorch/quantizer/quantizer_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/common/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/common/constants.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/common/core_report_generator.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/common/framework_report_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/common/model_folding_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/common/similarity_calculator.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/common/tensorboard_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/common/xquant_config.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/keras/keras_report_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/setup.cfg +0 -0
- {mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/setup.py +0 -0
{mct-nightly-2.1.0.20240731.414 → mct-nightly-2.1.0.20240802.429}/mct_nightly.egg-info/SOURCES.txt
RENAMED
@@ -474,6 +474,10 @@ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_
|
|
474
474
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py
|
475
475
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py
|
476
476
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py
|
477
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/__init__.py
|
478
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py
|
479
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_keras.py
|
480
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_pytorch.py
|
477
481
|
model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py
|
478
482
|
model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py
|
479
483
|
model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py
|
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
|
|
27
27
|
from model_compression_toolkit import pruning
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
29
29
|
|
30
|
-
__version__ = "2.1.0.
|
30
|
+
__version__ = "2.1.0.20240802.000429"
|
@@ -69,7 +69,8 @@ FLOAT_BITWIDTH = 32
|
|
69
69
|
# that are shared among different candidates:
|
70
70
|
WEIGHTS_NBITS_ATTRIBUTE = 'weights_n_bits'
|
71
71
|
CORRECTED_BIAS_ATTRIBUTE = 'corrected_bias'
|
72
|
-
|
72
|
+
ACTIVATION_N_BITS_ATTRIBUTE = 'activation_n_bits'
|
73
|
+
SUPPORTED_INPUT_ACTIVATION_NBITS_ATTRIBUTE = 'supported_input_activation_n_bits'
|
73
74
|
|
74
75
|
# Quantization Parameters Iterative Search Defaults:
|
75
76
|
SYMMETRIC_TENSOR_N_ITER = 40
|
@@ -348,24 +348,20 @@ class FrameworkImplementation(ABC):
|
|
348
348
|
raise NotImplemented(f'{self.__class__.__name__} have to implement the '
|
349
349
|
f'framework\'s count_node_for_mixed_precision_interest_points method.') # pragma: no cover
|
350
350
|
|
351
|
-
def get_mp_node_distance_fn(self,
|
352
|
-
|
353
|
-
|
354
|
-
axis: int = None,
|
355
|
-
norm_mse: bool = False) -> Callable:
|
351
|
+
def get_mp_node_distance_fn(self, n: BaseNode,
|
352
|
+
compute_distance_fn: Callable = None,
|
353
|
+
norm_mse: bool = False) -> Tuple[Callable, int]:
|
356
354
|
"""
|
357
355
|
A mapping between layers' types and a distance function for computing the distance between
|
358
356
|
two tensors in mixed precision (for loss computation purposes). Returns a specific function if node of specific types is
|
359
357
|
given, or a default (normalized MSE) function otherwise.
|
360
358
|
|
361
359
|
Args:
|
362
|
-
|
363
|
-
framework_attrs: Framework attributes the layer had which the graph node holds.
|
360
|
+
n: Node to choose distance function for.
|
364
361
|
compute_distance_fn: An optional distance function to use globally for all nodes.
|
365
|
-
axis: The axis on which the operation is preformed (if specified).
|
366
362
|
norm_mse: whether to normalize mse distance function.
|
367
363
|
|
368
|
-
Returns: A distance function between two tensors.
|
364
|
+
Returns: A distance function between two tensors and a axis on which the distance is computed (if exists).
|
369
365
|
"""
|
370
366
|
|
371
367
|
raise NotImplemented(f'{self.__class__.__name__} have to implement the '
|
@@ -440,7 +440,7 @@ class Graph(nx.MultiDiGraph, GraphSearches):
|
|
440
440
|
|
441
441
|
output_nodes = [ot.node for ot in self.get_outputs()] # get output nodes from namedtuples
|
442
442
|
if node_to_remove in output_nodes: # If node is in the graph's outputs, the outputs should be updated
|
443
|
-
if new_graph_outputs is None:
|
443
|
+
if new_graph_outputs is None: # pragma: no cover
|
444
444
|
Logger.critical(
|
445
445
|
f"{node_to_remove.name} is among the graph outputs; however, it cannot be removed without providing a new output.") # pragma: no cover
|
446
446
|
self.set_outputs(new_graph_outputs)
|
@@ -506,28 +506,6 @@ class Graph(nx.MultiDiGraph, GraphSearches):
|
|
506
506
|
output_edges.sort(key=lambda e: getattr(e, sort_by_attr))
|
507
507
|
return output_edges
|
508
508
|
|
509
|
-
def get_memory(self) -> float:
|
510
|
-
"""
|
511
|
-
|
512
|
-
Returns: Total memory consumption of the graph in bytes.
|
513
|
-
|
514
|
-
"""
|
515
|
-
memory = 0
|
516
|
-
for n in self.nodes:
|
517
|
-
memory += n.get_memory_bytes(self.fw_info)
|
518
|
-
return memory
|
519
|
-
|
520
|
-
def get_float_memory(self) -> float:
|
521
|
-
"""
|
522
|
-
|
523
|
-
Returns: Total memory consumption of the float graph in bytes.
|
524
|
-
|
525
|
-
"""
|
526
|
-
memory = 0
|
527
|
-
for n in self.nodes:
|
528
|
-
memory += n.get_float_memory_bytes(self.fw_info)
|
529
|
-
return memory
|
530
|
-
|
531
509
|
def get_configurable_sorted_nodes_names(self,
|
532
510
|
fw_info: FrameworkInfo,
|
533
511
|
include_reused_nodes: bool = False) -> List[str]:
|
@@ -19,11 +19,11 @@ from typing import Dict, Any, Tuple, List, Type, Union
|
|
19
19
|
import numpy as np
|
20
20
|
|
21
21
|
from model_compression_toolkit.constants import WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE, \
|
22
|
-
|
22
|
+
ACTIVATION_N_BITS_ATTRIBUTE, FP32_BYTES_PER_PARAMETER
|
23
23
|
from model_compression_toolkit.core.common.quantization.node_quantization_config import WeightsAttrQuantizationConfig
|
24
24
|
from model_compression_toolkit.logger import Logger
|
25
25
|
from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationConfigOptions, \
|
26
|
-
TargetPlatformCapabilities, LayerFilterParams
|
26
|
+
TargetPlatformCapabilities, LayerFilterParams, OpQuantizationConfig
|
27
27
|
|
28
28
|
|
29
29
|
class BaseNode:
|
@@ -297,19 +297,6 @@ class BaseNode:
|
|
297
297
|
|
298
298
|
return memory
|
299
299
|
|
300
|
-
def get_float_memory_bytes(self, fw_info) -> float:
|
301
|
-
"""
|
302
|
-
Compute the number of bytes the node's memory requires.
|
303
|
-
|
304
|
-
Args:
|
305
|
-
fw_info: Framework info to decide which attributes should be quantized.
|
306
|
-
|
307
|
-
Returns: Number of bytes the node's memory requires when in floating point (32 bit).
|
308
|
-
|
309
|
-
"""
|
310
|
-
q_params, f_params = self.get_num_parameters(fw_info)
|
311
|
-
return (f_params + q_params) * FP32_BYTES_PER_PARAMETER
|
312
|
-
|
313
300
|
def get_unified_weights_candidates_dict(self, fw_info) -> Dict[str, Any]:
|
314
301
|
"""
|
315
302
|
In Mixed-Precision, a node's kernel can have multiple candidates for weights quantization configuration.
|
@@ -355,7 +342,7 @@ class BaseNode:
|
|
355
342
|
Returns: A dictionary containing information from node's activation quantization configuration candidates.
|
356
343
|
|
357
344
|
"""
|
358
|
-
shared_attributes = [
|
345
|
+
shared_attributes = [ACTIVATION_N_BITS_ATTRIBUTE]
|
359
346
|
attr = dict()
|
360
347
|
if self.is_activation_quantization_enabled():
|
361
348
|
attr = copy.deepcopy(self.candidates_quantization_cfg[0].activation_quantization_cfg.__dict__)
|
@@ -436,20 +423,6 @@ class BaseNode:
|
|
436
423
|
|
437
424
|
return sum([np.prod([x for x in output_shape if x is not None]) for output_shape in output_shapes])
|
438
425
|
|
439
|
-
def get_total_input_params(self) -> float:
|
440
|
-
"""
|
441
|
-
Calculates the total parameters in the node's input tensors.
|
442
|
-
|
443
|
-
Returns: Input size (i.e., total number of parameters).
|
444
|
-
"""
|
445
|
-
|
446
|
-
input_shapes = self.input_shape if isinstance(self.input_shape, List) else [self.input_shape]
|
447
|
-
|
448
|
-
# remove batch size (first element) from input shape
|
449
|
-
input_shapes = [s[1:] for s in input_shapes]
|
450
|
-
|
451
|
-
return sum([np.prod([x for x in input_shape if x is not None]) for input_shape in input_shapes])
|
452
|
-
|
453
426
|
def find_min_candidates_indices(self) -> List[int]:
|
454
427
|
"""
|
455
428
|
Returns a list with potential minimal candidates.
|
@@ -565,7 +538,7 @@ class BaseNode:
|
|
565
538
|
to the mappings from layers/LayerFilterParams to the OperatorsSet in the TargetPlatformModel.
|
566
539
|
|
567
540
|
Args:
|
568
|
-
tpc: TPC to extract the QuantizationConfigOptions for the node
|
541
|
+
tpc: TPC to extract the QuantizationConfigOptions for the node.
|
569
542
|
|
570
543
|
Returns:
|
571
544
|
QuantizationConfigOptions of the node.
|
@@ -585,6 +558,52 @@ class BaseNode:
|
|
585
558
|
return matching_qcos[0]
|
586
559
|
return tpc.tp_model.default_qco
|
587
560
|
|
561
|
+
def filter_node_qco_by_graph(self, tpc: TargetPlatformCapabilities,
|
562
|
+
next_nodes: List, node_qc_options: QuantizationConfigOptions
|
563
|
+
) -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig]]:
|
564
|
+
"""
|
565
|
+
Filter quantization config options that don't match the graph.
|
566
|
+
A node may have several quantization config options with 'activation_n_bits' values, and
|
567
|
+
the next nodes in the graph may support different bit-width as input activation. This function
|
568
|
+
filters out quantization config that don't comply to these attributes.
|
569
|
+
|
570
|
+
Args:
|
571
|
+
tpc: TPC to extract the QuantizationConfigOptions for the next nodes.
|
572
|
+
next_nodes: Output nodes of current node.
|
573
|
+
node_qc_options: Node's QuantizationConfigOptions.
|
574
|
+
|
575
|
+
Returns:
|
576
|
+
|
577
|
+
"""
|
578
|
+
# Filter quantization config options that don't match the graph.
|
579
|
+
_base_config = node_qc_options.base_config
|
580
|
+
_node_qc_options = node_qc_options.quantization_config_list
|
581
|
+
if len(next_nodes):
|
582
|
+
next_nodes_qc_options = [_node.get_qco(tpc) for _node in next_nodes]
|
583
|
+
next_nodes_supported_input_bitwidth = min([op_cfg.max_input_activation_n_bits
|
584
|
+
for qc_opts in next_nodes_qc_options
|
585
|
+
for op_cfg in qc_opts.quantization_config_list])
|
586
|
+
|
587
|
+
# Filter node's QC options that match next nodes input bit-width.
|
588
|
+
_node_qc_options = [_option for _option in _node_qc_options
|
589
|
+
if _option.activation_n_bits <= next_nodes_supported_input_bitwidth]
|
590
|
+
if len(_node_qc_options) == 0:
|
591
|
+
Logger.critical(f"Graph doesn't match TPC bit configurations: {self} -> {next_nodes}.") # pragma: no cover
|
592
|
+
|
593
|
+
# Verify base config match
|
594
|
+
if any([node_qc_options.base_config.activation_n_bits > qc_opt.base_config.max_input_activation_n_bits
|
595
|
+
for qc_opt in next_nodes_qc_options]):
|
596
|
+
# base_config activation bits doesn't match next node supported input bit-width -> replace with
|
597
|
+
# a qco from quantization_config_list with maximum activation bit-width.
|
598
|
+
if len(_node_qc_options) > 0:
|
599
|
+
output_act_bitwidth = {qco.activation_n_bits: i for i, qco in enumerate(_node_qc_options)}
|
600
|
+
_base_config = _node_qc_options[output_act_bitwidth[max(output_act_bitwidth)]]
|
601
|
+
Logger.warning(f"Node {self} base quantization config changed to match Graph and TPC configuration.\nCause: {self} -> {next_nodes}.")
|
602
|
+
else:
|
603
|
+
Logger.critical(f"Graph doesn't match TPC bit configurations: {self} -> {next_nodes}.") # pragma: no cover
|
604
|
+
|
605
|
+
return _base_config, _node_qc_options
|
606
|
+
|
588
607
|
def is_match_type(self, _type: Type) -> bool:
|
589
608
|
"""
|
590
609
|
Check if input type matches the node type, either in instance type or in type name.
|
@@ -644,10 +663,10 @@ class BaseNode:
|
|
644
663
|
if len(simd_list) > 1:
|
645
664
|
Logger.warning(f"More than one pruning SIMD option is available."
|
646
665
|
f" Min SIMD is used: {min(simd_list)}")
|
647
|
-
if len(simd_list) == 0:
|
666
|
+
if len(simd_list) == 0: # pragma: no cover
|
648
667
|
Logger.critical(f"No SIMD option is available for {self}")
|
649
668
|
_simd = min(simd_list)
|
650
|
-
if _simd <= 0 or int(_simd) != _simd:
|
669
|
+
if _simd <= 0 or int(_simd) != _simd: # pragma: no cover
|
651
670
|
Logger.critical(f"SIMD is expected to be a non-positive integer but found: {_simd}")
|
652
671
|
return _simd
|
653
672
|
|
@@ -195,12 +195,12 @@ def compute_total_bops(graph: Graph, fw_info: FrameworkInfo, fw_impl: FrameworkI
|
|
195
195
|
|
196
196
|
|
197
197
|
def requires_mixed_precision(in_model: Any,
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
198
|
+
target_resource_utilization: ResourceUtilization,
|
199
|
+
representative_data_gen: Callable,
|
200
|
+
core_config: CoreConfig,
|
201
|
+
tpc: TargetPlatformCapabilities,
|
202
|
+
fw_info: FrameworkInfo,
|
203
|
+
fw_impl: FrameworkImplementation) -> bool:
|
204
204
|
"""
|
205
205
|
The function checks whether the model requires mixed precision to meet the requested target resource utilization.
|
206
206
|
This is determined by whether the target memory usage of the weights is less than the available memory,
|
@@ -77,7 +77,8 @@ class SensitivityEvaluation:
|
|
77
77
|
self.disable_activation_for_metric = disable_activation_for_metric
|
78
78
|
if self.quant_config.use_hessian_based_scores:
|
79
79
|
if not isinstance(hessian_info_service, HessianInfoService):
|
80
|
-
Logger.critical(
|
80
|
+
Logger.critical(
|
81
|
+
f"When using Hessian-based approximations for sensitivity evaluation, a valid HessianInfoService object is required; found {type(hessian_info_service)}.")
|
81
82
|
self.hessian_info_service = hessian_info_service
|
82
83
|
|
83
84
|
self.sorted_configurable_nodes_names = graph.get_configurable_sorted_nodes_names(self.fw_info)
|
@@ -94,7 +95,8 @@ class SensitivityEvaluation:
|
|
94
95
|
self.ips_distance_fns, self.ips_axis = self._init_metric_points_lists(self.interest_points, use_normalized_mse)
|
95
96
|
|
96
97
|
self.output_points = get_output_nodes_for_metric(graph)
|
97
|
-
self.out_ps_distance_fns, self.out_ps_axis = self._init_metric_points_lists(self.output_points,
|
98
|
+
self.out_ps_distance_fns, self.out_ps_axis = self._init_metric_points_lists(self.output_points,
|
99
|
+
use_normalized_mse)
|
98
100
|
|
99
101
|
# Setting lists with relative position of the interest points
|
100
102
|
# and output points in the list of all mp model activation tensors
|
@@ -130,7 +132,8 @@ class SensitivityEvaluation:
|
|
130
132
|
self.interest_points_hessians = self._compute_hessian_based_scores()
|
131
133
|
self.quant_config.distance_weighting_method = lambda d: self.interest_points_hessians
|
132
134
|
|
133
|
-
def _init_metric_points_lists(self, points: List[BaseNode], norm_mse: bool = False) -> Tuple[
|
135
|
+
def _init_metric_points_lists(self, points: List[BaseNode], norm_mse: bool = False) -> Tuple[
|
136
|
+
List[Callable], List[int]]:
|
134
137
|
"""
|
135
138
|
Initiates required lists for future use when computing the sensitivity metric.
|
136
139
|
Each point on which the metric is computed uses a dedicated distance function based on its type.
|
@@ -146,16 +149,12 @@ class SensitivityEvaluation:
|
|
146
149
|
distance_fns_list = []
|
147
150
|
axis_list = []
|
148
151
|
for n in points:
|
149
|
-
axis =
|
150
|
-
|
151
|
-
|
152
|
-
framework_attrs=n.framework_attr,
|
153
|
-
compute_distance_fn=self.quant_config.compute_distance_fn,
|
154
|
-
axis=axis,
|
155
|
-
norm_mse=norm_mse)
|
152
|
+
distance_fn, axis = self.fw_impl.get_mp_node_distance_fn(n,
|
153
|
+
compute_distance_fn=self.quant_config.compute_distance_fn,
|
154
|
+
norm_mse=norm_mse)
|
156
155
|
distance_fns_list.append(distance_fn)
|
157
156
|
# Axis is needed only for KL Divergence calculation, otherwise we use per-tensor computation
|
158
|
-
axis_list.append(axis if distance_fn==compute_kl_divergence else None)
|
157
|
+
axis_list.append(axis if distance_fn == compute_kl_divergence else None)
|
159
158
|
return distance_fns_list, axis_list
|
160
159
|
|
161
160
|
def compute_metric(self,
|
@@ -300,7 +299,8 @@ class SensitivityEvaluation:
|
|
300
299
|
node_name = sorted_configurable_nodes_names[node_idx_to_configure]
|
301
300
|
layers_to_config = self.conf_node2layers.get(node_name, None)
|
302
301
|
if layers_to_config is None:
|
303
|
-
Logger.critical(
|
302
|
+
Logger.critical(
|
303
|
+
f"Matching layers for node {node_name} not found in the mixed precision model configuration.") # pragma: no cover
|
304
304
|
|
305
305
|
for current_layer in layers_to_config:
|
306
306
|
self.set_layer_to_bitwidth(current_layer, mp_model_configuration[node_idx_to_configure])
|
@@ -58,7 +58,7 @@ class CandidateNodeQuantizationConfig(BaseNodeQuantizationConfig):
|
|
58
58
|
if activation_quantization_cfg is not None:
|
59
59
|
self.activation_quantization_cfg = activation_quantization_cfg
|
60
60
|
else:
|
61
|
-
if any(v is None for v in (qc, op_cfg, activation_quantization_fn, activation_quantization_params_fn)):
|
61
|
+
if any(v is None for v in (qc, op_cfg, activation_quantization_fn, activation_quantization_params_fn)): # pragma: no cover
|
62
62
|
Logger.critical(
|
63
63
|
"Missing required arguments to initialize a node activation quantization configuration. "
|
64
64
|
"Ensure QuantizationConfig, OpQuantizationConfig, activation quantization function, "
|
@@ -72,7 +72,7 @@ class CandidateNodeQuantizationConfig(BaseNodeQuantizationConfig):
|
|
72
72
|
if weights_quantization_cfg is not None:
|
73
73
|
self.weights_quantization_cfg = weights_quantization_cfg
|
74
74
|
else:
|
75
|
-
if any(v is None for v in (qc, op_cfg, node_attrs_list)):
|
75
|
+
if any(v is None for v in (qc, op_cfg, node_attrs_list)): # pragma: no cover
|
76
76
|
Logger.critical("Missing required arguments to initialize a node weights quantization configuration. "
|
77
77
|
"Ensure QuantizationConfig, OpQuantizationConfig, weights quantization function, "
|
78
78
|
"parameters function, and weights attribute quantization config are provided.")
|
@@ -96,6 +96,7 @@ class NodeActivationQuantizationConfig(BaseNodeQuantizationConfig):
|
|
96
96
|
self.activation_n_bits = op_cfg.activation_n_bits
|
97
97
|
self.relu_bound_to_power_of_2 = qc.relu_bound_to_power_of_2
|
98
98
|
self.enable_activation_quantization = op_cfg.enable_activation_quantization
|
99
|
+
self.is_signed = op_cfg.is_signed
|
99
100
|
self.activation_channel_equalization = qc.activation_channel_equalization
|
100
101
|
self.input_scaling = qc.input_scaling
|
101
102
|
self.min_threshold = qc.min_threshold
|
@@ -178,20 +179,6 @@ class NodeActivationQuantizationConfig(BaseNodeQuantizationConfig):
|
|
178
179
|
for param_name, param_value in activation_params.items():
|
179
180
|
self.activation_quantization_params[param_name] = param_value
|
180
181
|
|
181
|
-
def has_activation_quantization_params(self) -> bool:
|
182
|
-
"""
|
183
|
-
|
184
|
-
Returns: Whether NodeQuantizationConfig has a activation quantization params or not.
|
185
|
-
|
186
|
-
"""
|
187
|
-
return len(self.activation_quantization_params) > 0
|
188
|
-
|
189
|
-
def no_quantization(self) -> bool:
|
190
|
-
"""
|
191
|
-
Returns: Whether NodeQuantizationConfig does not have activation params.
|
192
|
-
"""
|
193
|
-
return (not self.has_activation_quantization_params())
|
194
|
-
|
195
182
|
def __eq__(self, other: Any) -> bool:
|
196
183
|
"""
|
197
184
|
Compares the object to another object to find if they are equal.
|
@@ -203,7 +190,7 @@ class NodeActivationQuantizationConfig(BaseNodeQuantizationConfig):
|
|
203
190
|
|
204
191
|
"""
|
205
192
|
if not isinstance(other, NodeActivationQuantizationConfig):
|
206
|
-
return False
|
193
|
+
return False # pragma: no cover
|
207
194
|
|
208
195
|
return self.activation_quantization_fn == other.activation_quantization_fn and \
|
209
196
|
self.activation_quantization_params_fn == other.activation_quantization_params_fn and \
|
@@ -340,14 +327,6 @@ class WeightsAttrQuantizationConfig:
|
|
340
327
|
else:
|
341
328
|
self.set_weights_quantization_param({})
|
342
329
|
|
343
|
-
def has_weights_quantization_params(self) -> bool:
|
344
|
-
"""
|
345
|
-
|
346
|
-
Returns: Whether NodeQuantizationConfig has weights quantization params or not.
|
347
|
-
|
348
|
-
"""
|
349
|
-
return len(self.weights_quantization_params) > 0
|
350
|
-
|
351
330
|
def __eq__(self, other: Any) -> bool:
|
352
331
|
"""
|
353
332
|
Compares the object to another object to find if they are equal.
|
@@ -359,7 +338,7 @@ class WeightsAttrQuantizationConfig:
|
|
359
338
|
|
360
339
|
"""
|
361
340
|
if not isinstance(other, WeightsAttrQuantizationConfig):
|
362
|
-
return False
|
341
|
+
return False # pragma: no cover
|
363
342
|
|
364
343
|
return self.weights_quantization_fn == other.weights_quantization_fn and \
|
365
344
|
self.weights_quantization_params_fn == other.weights_quantization_params_fn and \
|
@@ -419,11 +398,11 @@ class NodeWeightsQuantizationConfig(BaseNodeQuantizationConfig):
|
|
419
398
|
# In Tensorflow, the attribute name is composed of the framework attribute name and the layer name,
|
420
399
|
# therefore, we need to look for the attribute in the op_cfg that is contained in the node attribute's name.
|
421
400
|
attrs_included_in_name = {k: v for k, v in op_cfg.attr_weights_configs_mapping.items() if k in attr}
|
422
|
-
if len(attrs_included_in_name) > 1:
|
423
|
-
Logger.
|
424
|
-
|
425
|
-
|
426
|
-
|
401
|
+
if len(attrs_included_in_name) > 1: # pragma: no cover
|
402
|
+
Logger.critical(f"Found multiple attribute in TPC OpConfig that are contained "
|
403
|
+
f"in the attribute name '{attr}'."
|
404
|
+
f"Please fix the TPC attribute names mapping such that each operator's attribute would "
|
405
|
+
f"have a unique matching name.")
|
427
406
|
if len(attrs_included_in_name) == 0:
|
428
407
|
attr_cfg = op_cfg.default_weight_attr_config
|
429
408
|
else:
|
@@ -446,8 +425,8 @@ class NodeWeightsQuantizationConfig(BaseNodeQuantizationConfig):
|
|
446
425
|
Returns: An attribute quantization configuration.
|
447
426
|
|
448
427
|
"""
|
449
|
-
if attr_name is None:
|
450
|
-
Logger.
|
428
|
+
if attr_name is None: # pragma: no cover
|
429
|
+
Logger.critical("Got 'None' attribute name for retrieving weights attribute quantization configuration.")
|
451
430
|
|
452
431
|
if isinstance(attr_name, int):
|
453
432
|
# this is a positional attribute
|
@@ -463,8 +442,8 @@ class NodeWeightsQuantizationConfig(BaseNodeQuantizationConfig):
|
|
463
442
|
# If no attribute with the exact name then an error would be thrown
|
464
443
|
attr_cfg = self.attributes_config_mapping.get(attr_name)
|
465
444
|
|
466
|
-
if attr_cfg is None:
|
467
|
-
Logger.
|
445
|
+
if attr_cfg is None: # pragma: no cover
|
446
|
+
Logger.critical(f"Weight attribute '{attr_name}' config could not be found.")
|
468
447
|
|
469
448
|
return attr_cfg
|
470
449
|
|
@@ -519,8 +498,8 @@ class NodeWeightsQuantizationConfig(BaseNodeQuantizationConfig):
|
|
519
498
|
f"{list(attrs_with_name.keys())}.")
|
520
499
|
return attrs_with_name
|
521
500
|
|
522
|
-
def set_quant_config_attr(self, config_parameter_name: str, config_parameter_value: Any,
|
523
|
-
*args: List[Any], **kwargs: Dict[str, Any]):
|
501
|
+
def set_quant_config_attr(self, config_parameter_name: str, config_parameter_value: Any,
|
502
|
+
attr_name: Union[str, int] = None, *args: List[Any], **kwargs: Dict[str, Any]):
|
524
503
|
"""
|
525
504
|
This method overrides the parent class set_quant_config_attr to enable setting a specific weights
|
526
505
|
attribute config parameter.
|
@@ -546,8 +525,8 @@ class NodeWeightsQuantizationConfig(BaseNodeQuantizationConfig):
|
|
546
525
|
else:
|
547
526
|
Logger.warning(f"Parameter {config_parameter_name} could not be found in the node quantization config of "
|
548
527
|
f"weights attribute {attr_name} and was not updated!")
|
549
|
-
else:
|
550
|
-
Logger.
|
528
|
+
else: # pragma: no cover
|
529
|
+
Logger.critical(f"Weights attribute {attr_name} could not be found to set parameter {config_parameter_name}.")
|
551
530
|
|
552
531
|
def __eq__(self, other: Any) -> bool:
|
553
532
|
"""
|
@@ -560,7 +539,7 @@ class NodeWeightsQuantizationConfig(BaseNodeQuantizationConfig):
|
|
560
539
|
|
561
540
|
"""
|
562
541
|
if not isinstance(other, NodeWeightsQuantizationConfig):
|
563
|
-
return False
|
542
|
+
return False # pragma: no cover
|
564
543
|
|
565
544
|
return self.min_threshold == other.min_threshold and \
|
566
545
|
self.simd_size == other.simd_size and \
|
@@ -19,7 +19,7 @@ from sklearn.cluster import KMeans
|
|
19
19
|
|
20
20
|
import model_compression_toolkit.core.common.quantization.quantization_config as qc
|
21
21
|
from model_compression_toolkit.constants import LUT_VALUES, MIN_THRESHOLD, SCALE_PER_CHANNEL, \
|
22
|
-
LUT_VALUES_BITWIDTH, THRESHOLD, NUM_QPARAM_HESSIAN_SAMPLES
|
22
|
+
LUT_VALUES_BITWIDTH, THRESHOLD, NUM_QPARAM_HESSIAN_SAMPLES, SIGNED
|
23
23
|
from model_compression_toolkit.core.common.hessian import HessianInfoService
|
24
24
|
from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import \
|
25
25
|
max_power_of_two, int_quantization_with_threshold
|
@@ -110,7 +110,8 @@ def lut_kmeans_histogram(bins: np.ndarray,
|
|
110
110
|
constrained: bool = True,
|
111
111
|
n_iter: int = 20,
|
112
112
|
min_threshold: float = MIN_THRESHOLD,
|
113
|
-
quant_error_method: qc.QuantizationErrorMethod = qc.QuantizationErrorMethod.MSE
|
113
|
+
quant_error_method: qc.QuantizationErrorMethod = qc.QuantizationErrorMethod.MSE,
|
114
|
+
is_signed: bool = None) -> Dict:
|
114
115
|
"""
|
115
116
|
Finds quantization cluster points for non-uniform activation quantization.
|
116
117
|
The quantizer first finds the closest power-of-two number to the max value of the given histogram,
|
@@ -129,6 +130,7 @@ def lut_kmeans_histogram(bins: np.ndarray,
|
|
129
130
|
n_iter: Number of iteration ot search for the threshold (not used for this method).
|
130
131
|
min_threshold: Minimal threshold to use if threshold is too small.
|
131
132
|
quant_error_method: an error function to optimize the parameters' selection accordingly (not used for this method).
|
133
|
+
is_signed: Whether the quantization is signed or not. If None then compute SIGNED value.
|
132
134
|
|
133
135
|
Returns:
|
134
136
|
A dictionary containing the cluster assignments according to the k-means algorithm and
|
@@ -148,9 +150,9 @@ def lut_kmeans_histogram(bins: np.ndarray,
|
|
148
150
|
tensor_max = np.max(bins_with_values)
|
149
151
|
threshold = max_power_of_two(tensor_max, min_threshold)
|
150
152
|
|
151
|
-
signed = np.any(bins[:-1][counts != 0] < 0) # Whether histogram contains negative values or not.
|
153
|
+
signed = np.any(bins[:-1][counts != 0] < 0) if is_signed is None else is_signed # Whether histogram contains negative values or not.
|
152
154
|
tensor_for_kmeans = int_quantization_with_threshold(data=bins, threshold=threshold, n_bits=LUT_VALUES_BITWIDTH, signed=signed)
|
153
155
|
kmeans.fit(tensor_for_kmeans.reshape(-1, 1), sample_weight=np.insert(counts, 0, 0))
|
154
156
|
|
155
157
|
return {LUT_VALUES: np.float32(np.round(kmeans.cluster_centers_)),
|
156
|
-
THRESHOLD: threshold}
|
158
|
+
THRESHOLD: threshold, SIGNED: signed}
|
@@ -16,7 +16,7 @@ import numpy as np
|
|
16
16
|
from typing import Union, Tuple, Dict
|
17
17
|
|
18
18
|
import model_compression_toolkit.core.common.quantization.quantization_config as qc
|
19
|
-
from model_compression_toolkit.constants import MIN_THRESHOLD, THRESHOLD, NUM_QPARAM_HESSIAN_SAMPLES
|
19
|
+
from model_compression_toolkit.constants import MIN_THRESHOLD, THRESHOLD, NUM_QPARAM_HESSIAN_SAMPLES, SIGNED
|
20
20
|
from model_compression_toolkit.core.common.hessian import HessianInfoService
|
21
21
|
from model_compression_toolkit.core.common.quantization.quantization_params_generation.qparams_search import \
|
22
22
|
qparams_selection_tensor_search, qparams_selection_histogram_search
|
@@ -105,7 +105,8 @@ def power_of_two_selection_histogram(bins: np.ndarray,
|
|
105
105
|
constrained: bool = True,
|
106
106
|
n_iter: int = 20,
|
107
107
|
min_threshold: float = MIN_THRESHOLD,
|
108
|
-
quant_error_method: qc.QuantizationErrorMethod = qc.QuantizationErrorMethod.MSE
|
108
|
+
quant_error_method: qc.QuantizationErrorMethod = qc.QuantizationErrorMethod.MSE,
|
109
|
+
is_signed: bool = None) -> Dict:
|
109
110
|
"""
|
110
111
|
Compute the power of two threshold based on the provided QuantizationErrorMethod to quantize a histogram.
|
111
112
|
Different search is applied, depends on the value of the selected QuantizationErrorMethod.
|
@@ -121,6 +122,7 @@ def power_of_two_selection_histogram(bins: np.ndarray,
|
|
121
122
|
n_iter: Number of iteration ot search for the threshold (not used for this method).
|
122
123
|
min_threshold: Minimal threshold to use if threshold is too small (used only for kl threshold selection).
|
123
124
|
quant_error_method: an error function to optimize the parameters' selection accordingly.
|
125
|
+
is_signed: Whether the quantization is signed or not. If None then compute SIGNED value.
|
124
126
|
|
125
127
|
Returns:
|
126
128
|
Power of two threshold to quantize the histogram a power of 2 manner.
|
@@ -128,17 +130,20 @@ def power_of_two_selection_histogram(bins: np.ndarray,
|
|
128
130
|
if quant_error_method == qc.QuantizationErrorMethod.NOCLIPPING:
|
129
131
|
tensor_max = np.max(np.abs(bins)[1:][counts > 0])
|
130
132
|
threshold = max_power_of_two(tensor_max, min_threshold)
|
133
|
+
# Resolve is_signed in case it is None.
|
134
|
+
signed = (bins<0).any() if is_signed is None else is_signed
|
131
135
|
else:
|
132
136
|
error_function = get_threshold_selection_histogram_error_function(QuantizationMethod.POWER_OF_TWO,
|
133
137
|
quant_error_method, p)
|
134
|
-
threshold = qparams_selection_histogram_search(error_function,
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
138
|
+
threshold, signed = qparams_selection_histogram_search(error_function,
|
139
|
+
bins,
|
140
|
+
counts,
|
141
|
+
n_bits,
|
142
|
+
constrained=constrained,
|
143
|
+
n_iter=n_iter,
|
144
|
+
min_threshold=min_threshold,
|
145
|
+
is_signed=is_signed)
|
146
|
+
return {THRESHOLD: threshold, SIGNED: signed}
|
142
147
|
|
143
148
|
|
144
149
|
def power_of_two_no_clipping_selection_min_max(bins: np.ndarray,
|
@@ -151,7 +156,8 @@ def power_of_two_no_clipping_selection_min_max(bins: np.ndarray,
|
|
151
156
|
n_iter: int = 20,
|
152
157
|
min_threshold: float = MIN_THRESHOLD,
|
153
158
|
quant_error_method: qc.QuantizationErrorMethod =
|
154
|
-
qc.QuantizationErrorMethod.NOCLIPPING
|
159
|
+
qc.QuantizationErrorMethod.NOCLIPPING,
|
160
|
+
is_signed: bool = None) -> Dict:
|
155
161
|
"""
|
156
162
|
Gets a threshold between min and max numbers.
|
157
163
|
If computed threshold is less than min_threshold, min_threshold is returned.
|
@@ -168,4 +174,5 @@ def power_of_two_no_clipping_selection_min_max(bins: np.ndarray,
|
|
168
174
|
constrained,
|
169
175
|
n_iter,
|
170
176
|
min_threshold=min_threshold,
|
171
|
-
quant_error_method=qc.QuantizationErrorMethod.NOCLIPPING
|
177
|
+
quant_error_method=qc.QuantizationErrorMethod.NOCLIPPING,
|
178
|
+
is_signed=is_signed)
|