mct-nightly 2.2.0.20241006.532__tar.gz → 2.2.0.20241008.450__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/PKG-INFO +1 -1
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/mct_nightly.egg-info/PKG-INFO +1 -1
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/__init__.py +1 -1
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/hessian/__init__.py +3 -1
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +60 -2
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/hessian/hessian_scores_request.py +14 -3
- mct-nightly-2.2.0.20241008.450/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +201 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/common/gptq_config.py +7 -2
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/common/gptq_training.py +43 -12
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +36 -1
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/gptq_training.py +58 -8
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +28 -7
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +1 -1
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +14 -12
- mct-nightly-2.2.0.20241006.532/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +0 -152
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/LICENSE.md +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/README.md +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/mct_nightly.egg-info/SOURCES.txt +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/mct_nightly.egg-info/dependency_links.txt +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/mct_nightly.egg-info/requires.txt +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/mct_nightly.egg-info/top_level.txt +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/constants.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/analyzer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/framework_info.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/fusion/graph_fuser.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/edge.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/hessian/hessian_scores_calculator.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/matchers/function.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/memory_computation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/model_collector.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/model_validation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/bit_width_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/user_info.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/sigmoid_mul_to_swish.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/hessian/hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/hessian/weights_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/common.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scaled_dot_product_attention.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/hessian/hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/hessian/weights_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/runner.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/enums.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/image_operations.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/image_operations.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/defaultdict.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/gradual_activation_quantization.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/runner.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/logger.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/metadata.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/ptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/ptq/runner.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/common/qat_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_weight_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/training_method.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/annealing_schedulers.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/util.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/verify_packages.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/core_report_generator.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/framework_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/model_folding_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/similarity_calculator.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/xquant_config.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/keras/keras_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/setup.cfg +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/setup.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/tests_pytest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/tests_pytest/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/tests_pytest/pytorch/gptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/tests_pytest/pytorch/gptq/test_annealing_cfg.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/tests_pytest/pytorch/gptq/test_gradual_act_quantization.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/tests_pytest/pytorch/trainable_infrastructure/__init__.py +0 -0
- {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/tests_pytest/pytorch/trainable_infrastructure/test_linear_annealing.py +0 -0
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
|
|
27
27
|
from model_compression_toolkit import pruning
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
29
29
|
|
30
|
-
__version__ = "2.2.0.
|
30
|
+
__version__ = "2.2.0.20241008.000450"
|
@@ -12,6 +12,8 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
# ==============================================================================
|
15
|
-
from model_compression_toolkit.core.common.hessian.hessian_scores_request import
|
15
|
+
from model_compression_toolkit.core.common.hessian.hessian_scores_request import (
|
16
|
+
HessianScoresRequest, HessianMode, HessianScoresGranularity, HessianEstimationDistribution
|
17
|
+
)
|
16
18
|
from model_compression_toolkit.core.common.hessian.hessian_info_service import HessianInfoService
|
17
19
|
import model_compression_toolkit.core.common.hessian.hessian_info_utils as hessian_utils
|
@@ -12,16 +12,19 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
# ==============================================================================
|
15
|
+
import hashlib
|
15
16
|
|
16
17
|
import numpy as np
|
17
18
|
from functools import partial
|
18
19
|
from tqdm import tqdm
|
19
|
-
from typing import Callable, List, Dict, Any, Tuple
|
20
|
+
from typing import Callable, List, Dict, Any, Tuple, TYPE_CHECKING
|
20
21
|
|
21
22
|
from model_compression_toolkit.constants import HESSIAN_NUM_ITERATIONS
|
22
23
|
from model_compression_toolkit.core.common.hessian.hessian_scores_request import HessianScoresRequest, \
|
23
24
|
HessianScoresGranularity, HessianMode
|
24
25
|
from model_compression_toolkit.logger import Logger
|
26
|
+
if TYPE_CHECKING: # pragma: no cover
|
27
|
+
from model_compression_toolkit.core.common import BaseNode
|
25
28
|
|
26
29
|
|
27
30
|
class HessianInfoService:
|
@@ -228,6 +231,61 @@ class HessianInfoService:
|
|
228
231
|
return next_iter_remain_samples if next_iter_remain_samples is not None and len(next_iter_remain_samples) > 0 \
|
229
232
|
and len(next_iter_remain_samples[0]) > 0 else None
|
230
233
|
|
234
|
+
def compute_trackable_per_sample_hessian(self,
|
235
|
+
hessian_scores_request: HessianScoresRequest,
|
236
|
+
inputs_batch: List[np.ndarray]) -> Dict[str, Dict['BaseNode', np.ndarray]]:
|
237
|
+
"""
|
238
|
+
Compute hessian score per image hash. We compute the score directly for images rather than via data generator,
|
239
|
+
as data generator might yield different images each time, depending on how it was defined,
|
240
|
+
|
241
|
+
Args:
|
242
|
+
hessian_scores_request: hessian scores request
|
243
|
+
inputs_batch: a list containing a batch of inputs.
|
244
|
+
|
245
|
+
Returns:
|
246
|
+
A dict of Hessian scores per image hash per layer {image hash: {layer: score}}
|
247
|
+
"""
|
248
|
+
topo_sorted_nodes_names = [x.name for x in self.graph.get_topo_sorted_nodes()]
|
249
|
+
hessian_scores_request.target_nodes.sort(key=lambda x: topo_sorted_nodes_names.index(x.name))
|
250
|
+
|
251
|
+
hessian_score_by_image_hash = {}
|
252
|
+
|
253
|
+
if not inputs_batch or not isinstance(inputs_batch, list):
|
254
|
+
raise TypeError('Expected a non-empty list of inputs') # pragma: no cover
|
255
|
+
if len(inputs_batch) > 1:
|
256
|
+
raise NotImplementedError('Per-sample hessian computation is not supported for networks with multiple inputs') # pragma: no cover
|
257
|
+
|
258
|
+
# Get the framework-specific calculator Hessian-approximation scores
|
259
|
+
fw_hessian_calculator = self.fw_impl.get_hessian_scores_calculator(graph=self.graph,
|
260
|
+
input_images=inputs_batch,
|
261
|
+
hessian_scores_request=hessian_scores_request,
|
262
|
+
num_iterations_for_approximation=self.num_iterations_for_approximation)
|
263
|
+
hessian_scores = fw_hessian_calculator.compute()
|
264
|
+
for i in range(inputs_batch[0].shape[0]):
|
265
|
+
img_hash = self.calc_image_hash(inputs_batch[0][i])
|
266
|
+
hessian_score_by_image_hash[img_hash] = {
|
267
|
+
node: score[i] for node, score in zip(hessian_scores_request.target_nodes, hessian_scores)
|
268
|
+
}
|
269
|
+
|
270
|
+
return hessian_score_by_image_hash
|
271
|
+
|
272
|
+
@staticmethod
|
273
|
+
def calc_image_hash(image):
|
274
|
+
"""
|
275
|
+
Calculates hash for an input image.
|
276
|
+
|
277
|
+
Args:
|
278
|
+
image: input 3d image (without batch).
|
279
|
+
|
280
|
+
Returns:
|
281
|
+
Image hash.
|
282
|
+
|
283
|
+
"""
|
284
|
+
if not len(image.shape) == 3: # pragma: no cover
|
285
|
+
raise ValueError(f'Expected 3d image (without batch) for image hash calculation, got {len(image.shape)}')
|
286
|
+
image_bytes = image.astype(np.float32).tobytes()
|
287
|
+
return hashlib.md5(image_bytes).hexdigest()
|
288
|
+
|
231
289
|
def fetch_hessian(self,
|
232
290
|
hessian_scores_request: HessianScoresRequest,
|
233
291
|
required_size: int,
|
@@ -248,7 +306,7 @@ class HessianInfoService:
|
|
248
306
|
OC for per-output-channel when the requested node has OC output-channels, etc.)
|
249
307
|
"""
|
250
308
|
|
251
|
-
if len(hessian_scores_request.target_nodes) == 0:
|
309
|
+
if len(hessian_scores_request.target_nodes) == 0: # pragma: no cover
|
252
310
|
return []
|
253
311
|
|
254
312
|
if required_size == 0:
|
@@ -40,6 +40,14 @@ class HessianScoresGranularity(Enum):
|
|
40
40
|
PER_TENSOR = 2
|
41
41
|
|
42
42
|
|
43
|
+
class HessianEstimationDistribution(str, Enum):
|
44
|
+
"""
|
45
|
+
Distribution for Hutchinson estimator random vector
|
46
|
+
"""
|
47
|
+
GAUSSIAN = 'gaussian'
|
48
|
+
RADEMACHER = 'rademacher'
|
49
|
+
|
50
|
+
|
43
51
|
class HessianScoresRequest:
|
44
52
|
"""
|
45
53
|
Request configuration for the Hessian-approximation scores.
|
@@ -53,7 +61,8 @@ class HessianScoresRequest:
|
|
53
61
|
def __init__(self,
|
54
62
|
mode: HessianMode,
|
55
63
|
granularity: HessianScoresGranularity,
|
56
|
-
target_nodes: List
|
64
|
+
target_nodes: List,
|
65
|
+
distribution: HessianEstimationDistribution = HessianEstimationDistribution.GAUSSIAN):
|
57
66
|
"""
|
58
67
|
Attributes:
|
59
68
|
mode (HessianMode): Mode of Hessian-approximation score (w.r.t weights or activations).
|
@@ -64,6 +73,7 @@ class HessianScoresRequest:
|
|
64
73
|
self.mode = mode # w.r.t activations or weights
|
65
74
|
self.granularity = granularity # per element, per layer, per channel
|
66
75
|
self.target_nodes = target_nodes
|
76
|
+
self.distribution = distribution
|
67
77
|
|
68
78
|
def __eq__(self, other):
|
69
79
|
# Checks if the other object is an instance of HessianScoresRequest
|
@@ -71,9 +81,10 @@ class HessianScoresRequest:
|
|
71
81
|
return isinstance(other, HessianScoresRequest) and \
|
72
82
|
self.mode == other.mode and \
|
73
83
|
self.granularity == other.granularity and \
|
74
|
-
self.target_nodes == other.target_nodes
|
84
|
+
self.target_nodes == other.target_nodes and \
|
85
|
+
self.distribution == other.distribution
|
75
86
|
|
76
87
|
def __hash__(self):
|
77
88
|
# Computes the hash based on the attributes.
|
78
89
|
# The use of a tuple here ensures that the hash is influenced by all the attributes.
|
79
|
-
return hash((self.mode, self.granularity, tuple(self.target_nodes)))
|
90
|
+
return hash((self.mode, self.granularity, tuple(self.target_nodes), self.distribution))
|
@@ -0,0 +1,201 @@
|
|
1
|
+
# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
|
16
|
+
from typing import List
|
17
|
+
|
18
|
+
from torch import autograd
|
19
|
+
from tqdm import tqdm
|
20
|
+
import numpy as np
|
21
|
+
|
22
|
+
from model_compression_toolkit.constants import MIN_HESSIAN_ITER, HESSIAN_COMP_TOLERANCE, HESSIAN_NUM_ITERATIONS
|
23
|
+
from model_compression_toolkit.core.common import Graph
|
24
|
+
from model_compression_toolkit.core.common.hessian import (HessianScoresRequest, HessianScoresGranularity,
|
25
|
+
HessianEstimationDistribution)
|
26
|
+
from model_compression_toolkit.core.pytorch.back2framework.float_model_builder import FloatPyTorchModelBuilder
|
27
|
+
from model_compression_toolkit.core.pytorch.hessian.hessian_scores_calculator_pytorch import \
|
28
|
+
HessianScoresCalculatorPytorch
|
29
|
+
from model_compression_toolkit.core.pytorch.utils import torch_tensor_to_numpy
|
30
|
+
from model_compression_toolkit.logger import Logger
|
31
|
+
import torch
|
32
|
+
|
33
|
+
|
34
|
+
class ActivationHessianScoresCalculatorPytorch(HessianScoresCalculatorPytorch):
|
35
|
+
"""
|
36
|
+
Pytorch implementation of the Hessian approximation scores Calculator for activations.
|
37
|
+
"""
|
38
|
+
def __init__(self,
|
39
|
+
graph: Graph,
|
40
|
+
input_images: List[torch.Tensor],
|
41
|
+
fw_impl,
|
42
|
+
hessian_scores_request: HessianScoresRequest,
|
43
|
+
num_iterations_for_approximation: int = HESSIAN_NUM_ITERATIONS):
|
44
|
+
"""
|
45
|
+
Args:
|
46
|
+
graph: Computational graph for the float model.
|
47
|
+
input_images: List of input images for the computation.
|
48
|
+
fw_impl: Framework-specific implementation for Hessian approximation scores computation.
|
49
|
+
hessian_scores_request: Configuration request for which to compute the Hessian approximation scores.
|
50
|
+
num_iterations_for_approximation: Number of iterations to use when approximating the Hessian scores.
|
51
|
+
|
52
|
+
"""
|
53
|
+
super(ActivationHessianScoresCalculatorPytorch, self).__init__(graph=graph,
|
54
|
+
input_images=input_images,
|
55
|
+
fw_impl=fw_impl,
|
56
|
+
hessian_scores_request=hessian_scores_request,
|
57
|
+
num_iterations_for_approximation=num_iterations_for_approximation)
|
58
|
+
|
59
|
+
def forward_pass(self):
|
60
|
+
model_output_nodes = [ot.node for ot in self.graph.get_outputs()]
|
61
|
+
|
62
|
+
if len([n for n in self.hessian_request.target_nodes if n in model_output_nodes]) > 0:
|
63
|
+
Logger.critical("Activation Hessian approximation cannot be computed for model outputs. "
|
64
|
+
"Exclude output nodes from Hessian request targets.")
|
65
|
+
|
66
|
+
grad_model_outputs = self.hessian_request.target_nodes + model_output_nodes
|
67
|
+
model, _ = FloatPyTorchModelBuilder(graph=self.graph, append2output=grad_model_outputs).build_model()
|
68
|
+
model.eval()
|
69
|
+
|
70
|
+
# Run model inference
|
71
|
+
# Set inputs to track gradients during inference
|
72
|
+
for input_tensor in self.input_images:
|
73
|
+
input_tensor.requires_grad_()
|
74
|
+
input_tensor.retain_grad()
|
75
|
+
|
76
|
+
outputs = model(*self.input_images)
|
77
|
+
|
78
|
+
if len(outputs) != len(grad_model_outputs): # pragma: no cover
|
79
|
+
Logger.critical(f"Mismatch in expected and actual model outputs for activation Hessian approximation. "
|
80
|
+
f"Expected {len(grad_model_outputs)} outputs, received {len(outputs)}.")
|
81
|
+
|
82
|
+
# Extracting the intermediate activation tensors and the model real output.
|
83
|
+
# Note that we do not allow computing Hessian for output nodes, so there shouldn't be an overlap.
|
84
|
+
num_target_nodes = len(self.hessian_request.target_nodes)
|
85
|
+
# Extract activation tensors of nodes for which we want to compute Hessian
|
86
|
+
target_activation_tensors = outputs[:num_target_nodes]
|
87
|
+
# Extract the model outputs
|
88
|
+
output_tensors = outputs[num_target_nodes:]
|
89
|
+
device = output_tensors[0].device
|
90
|
+
|
91
|
+
# Concat outputs
|
92
|
+
# First, we need to unfold all outputs that are given as list, to extract the actual output tensors
|
93
|
+
output = self.concat_tensors(output_tensors)
|
94
|
+
return output, target_activation_tensors
|
95
|
+
|
96
|
+
def _generate_random_vectors_batch(self, shape: tuple, distribution: HessianEstimationDistribution,
|
97
|
+
device: torch.device) -> torch.Tensor:
|
98
|
+
"""
|
99
|
+
Generate a batch of random vectors for Hutchinson estimation
|
100
|
+
|
101
|
+
Args:
|
102
|
+
shape: target shape
|
103
|
+
distribution: distribution to sample from
|
104
|
+
device: target device
|
105
|
+
|
106
|
+
Returns:
|
107
|
+
Random tensor
|
108
|
+
"""
|
109
|
+
if distribution == HessianEstimationDistribution.GAUSSIAN:
|
110
|
+
return torch.randn(shape, device=device)
|
111
|
+
|
112
|
+
if distribution == HessianEstimationDistribution.RADEMACHER:
|
113
|
+
v = torch.randint(high=2, size=shape, device=device)
|
114
|
+
v[v == 0] = -1
|
115
|
+
return v
|
116
|
+
|
117
|
+
raise ValueError(f'Unknown distribution {distribution}') # pragma: no cover
|
118
|
+
|
119
|
+
def compute(self) -> List[np.ndarray]:
|
120
|
+
"""
|
121
|
+
Compute the scores that are based on the approximation of the Hessian w.r.t the requested target nodes' activations.
|
122
|
+
|
123
|
+
Returns:
|
124
|
+
List[np.ndarray]: Scores based on the approximated Hessian for the requested nodes.
|
125
|
+
"""
|
126
|
+
output, target_activation_tensors = self.forward_pass()
|
127
|
+
|
128
|
+
if self.hessian_request.granularity == HessianScoresGranularity.PER_TENSOR:
|
129
|
+
hessian_scores = self._compute_per_tensor(output, target_activation_tensors)
|
130
|
+
elif self.hessian_request.granularity == HessianScoresGranularity.PER_OUTPUT_CHANNEL:
|
131
|
+
hessian_scores = self._compute_per_channel(output, target_activation_tensors)
|
132
|
+
else:
|
133
|
+
raise NotImplementedError(f'{self.hessian_request.granularity} is not supported') # pragma: no cover
|
134
|
+
|
135
|
+
# Convert results to list of numpy arrays
|
136
|
+
hessian_results = [torch_tensor_to_numpy(h) for h in hessian_scores]
|
137
|
+
return hessian_results
|
138
|
+
|
139
|
+
def _compute_per_tensor(self, output, target_activation_tensors):
|
140
|
+
assert self.hessian_request.granularity == HessianScoresGranularity.PER_TENSOR
|
141
|
+
ipts_hessian_approx_scores = [torch.tensor([0.0], requires_grad=True, device=output.device)
|
142
|
+
for _ in range(len(target_activation_tensors))]
|
143
|
+
prev_mean_results = None
|
144
|
+
for j in tqdm(range(self.num_iterations_for_approximation), "Hessian random iterations"): # Approximation iterations
|
145
|
+
# Getting a random vector with normal distribution
|
146
|
+
v = self._generate_random_vectors_batch(output.shape, self.hessian_request.distribution, output.device)
|
147
|
+
f_v = torch.sum(v * output)
|
148
|
+
for i, ipt_tensor in enumerate(target_activation_tensors): # Per Interest point activation tensor
|
149
|
+
# Computing the hessian-approximation scores by getting the gradient of (output * v)
|
150
|
+
hess_v = autograd.grad(outputs=f_v,
|
151
|
+
inputs=ipt_tensor,
|
152
|
+
retain_graph=True,
|
153
|
+
allow_unused=True)[0]
|
154
|
+
|
155
|
+
if hess_v is None:
|
156
|
+
# In case we have an output node, which is an interest point, but it is not differentiable,
|
157
|
+
# we consider its Hessian to be the initial value 0.
|
158
|
+
continue # pragma: no cover
|
159
|
+
|
160
|
+
# Mean over all dims but the batch (CXHXW for conv)
|
161
|
+
hessian_approx_scores = torch.sum(hess_v ** 2.0, dim=tuple(d for d in range(1, len(hess_v.shape))))
|
162
|
+
|
163
|
+
# Update node Hessian approximation mean over random iterations
|
164
|
+
ipts_hessian_approx_scores[i] = (j * ipts_hessian_approx_scores[i] + hessian_approx_scores) / (j + 1)
|
165
|
+
|
166
|
+
# If the change to the maximal mean Hessian approximation is insignificant we stop the calculation
|
167
|
+
if j > MIN_HESSIAN_ITER:
|
168
|
+
if prev_mean_results is not None:
|
169
|
+
new_mean_res = torch.mean(torch.stack(ipts_hessian_approx_scores), dim=1)
|
170
|
+
relative_delta_per_node = (torch.abs(new_mean_res - prev_mean_results) /
|
171
|
+
(torch.abs(new_mean_res) + 1e-6))
|
172
|
+
max_delta = torch.max(relative_delta_per_node)
|
173
|
+
if max_delta < HESSIAN_COMP_TOLERANCE:
|
174
|
+
break
|
175
|
+
prev_mean_results = torch.mean(torch.stack(ipts_hessian_approx_scores), dim=1)
|
176
|
+
|
177
|
+
# add extra dimension to preserve previous behaviour
|
178
|
+
ipts_hessian_approx_scores = [torch.unsqueeze(t, -1) for t in ipts_hessian_approx_scores]
|
179
|
+
return ipts_hessian_approx_scores
|
180
|
+
|
181
|
+
def _compute_per_channel(self, output, target_activation_tensors):
|
182
|
+
assert self.hessian_request.granularity == HessianScoresGranularity.PER_OUTPUT_CHANNEL
|
183
|
+
ipts_hessian_approx_scores = [torch.tensor(0.0, requires_grad=True, device=output.device)
|
184
|
+
for _ in range(len(target_activation_tensors))]
|
185
|
+
|
186
|
+
for j in tqdm(range(self.num_iterations_for_approximation), "Hessian random iterations"): # Approximation iterations
|
187
|
+
v = self._generate_random_vectors_batch(output.shape, self.hessian_request.distribution, output.device)
|
188
|
+
f_v = torch.sum(v * output)
|
189
|
+
for i, ipt_tensor in enumerate(target_activation_tensors): # Per Interest point activation tensor
|
190
|
+
hess_v = autograd.grad(outputs=f_v,
|
191
|
+
inputs=ipt_tensor,
|
192
|
+
retain_graph=True)[0]
|
193
|
+
hessian_approx_scores = hess_v ** 2
|
194
|
+
rank = len(hess_v.shape)
|
195
|
+
if rank > 2:
|
196
|
+
hessian_approx_scores = torch.mean(hessian_approx_scores, dim=tuple(range(2, rank)))
|
197
|
+
|
198
|
+
# Update node Hessian approximation mean over random iterations
|
199
|
+
ipts_hessian_approx_scores[i] = (j * ipts_hessian_approx_scores[i] + hessian_approx_scores) / (j + 1)
|
200
|
+
|
201
|
+
return ipts_hessian_approx_scores
|
@@ -17,6 +17,7 @@ from enum import Enum
|
|
17
17
|
from typing import Callable, Any, Dict, Optional
|
18
18
|
|
19
19
|
from model_compression_toolkit.constants import GPTQ_HESSIAN_NUM_SAMPLES, ACT_HESSIAN_DEFAULT_BATCH_SIZE
|
20
|
+
from model_compression_toolkit.core.common.hessian import HessianScoresGranularity, HessianEstimationDistribution
|
20
21
|
from model_compression_toolkit.gptq.common.gptq_constants import REG_DEFAULT
|
21
22
|
|
22
23
|
|
@@ -39,17 +40,21 @@ class GPTQHessianScoresConfig:
|
|
39
40
|
Configuration to use for computing the Hessian-based scores for GPTQ loss metric.
|
40
41
|
|
41
42
|
Args:
|
42
|
-
hessians_num_samples (int): Number of samples to use for computing the Hessian-based scores.
|
43
|
+
hessians_num_samples (int|None): Number of samples to use for computing the Hessian-based scores.
|
44
|
+
If None, compute Hessian for all images.
|
43
45
|
norm_scores (bool): Whether to normalize the returned scores of the weighted loss function (to get values between 0 and 1).
|
44
46
|
log_norm (bool): Whether to use log normalization for the GPTQ Hessian-based scores.
|
45
47
|
scale_log_norm (bool): Whether to scale the final vector of the Hessian-based scores.
|
46
48
|
hessian_batch_size (int): The Hessian computation batch size. used only if using GPTQ with Hessian-based objective.
|
49
|
+
per_sample (bool): Whether to use per sample attention score.
|
47
50
|
"""
|
48
|
-
hessians_num_samples: int = GPTQ_HESSIAN_NUM_SAMPLES
|
51
|
+
hessians_num_samples: Optional[int] = GPTQ_HESSIAN_NUM_SAMPLES
|
49
52
|
norm_scores: bool = True
|
50
53
|
log_norm: bool = True
|
51
54
|
scale_log_norm: bool = False
|
52
55
|
hessian_batch_size: int = ACT_HESSIAN_DEFAULT_BATCH_SIZE
|
56
|
+
per_sample: bool = False
|
57
|
+
estimator_distribution: HessianEstimationDistribution = HessianEstimationDistribution.GAUSSIAN
|
53
58
|
|
54
59
|
|
55
60
|
@dataclass
|
@@ -13,6 +13,7 @@
|
|
13
13
|
# limitations under the License.
|
14
14
|
# ==============================================================================
|
15
15
|
import copy
|
16
|
+
import hashlib
|
16
17
|
from abc import ABC, abstractmethod
|
17
18
|
import numpy as np
|
18
19
|
from typing import Callable, List, Any, Dict
|
@@ -143,7 +144,11 @@ class GPTQTrainer(ABC):
|
|
143
144
|
return np.asarray([1 / num_nodes for _ in range(num_nodes)])
|
144
145
|
|
145
146
|
# Fetch hessian approximations for each target node
|
146
|
-
|
147
|
+
# TODO this smells like a potential bug. In hessian calculation target nodes are topo sorted and results are returned
|
148
|
+
# TODO also target nodes are replaced for reuse. Does this work correctly?
|
149
|
+
approximations = self._fetch_hessian_approximations(HessianScoresGranularity.PER_TENSOR)
|
150
|
+
compare_point_to_hessian_approx_scores = {node: score for node, score in zip(self.compare_points, approximations)}
|
151
|
+
|
147
152
|
# Process the fetched hessian approximations to gather them per images
|
148
153
|
hessian_approx_score_by_image = (
|
149
154
|
self._process_hessian_approximations(compare_point_to_hessian_approx_scores))
|
@@ -172,29 +177,55 @@ class GPTQTrainer(ABC):
|
|
172
177
|
# If log normalization is not enabled, return the mean of the approximations across images
|
173
178
|
return np.mean(hessian_approx_score_by_image, axis=0)
|
174
179
|
|
175
|
-
def
|
180
|
+
def _compute_sample_layer_attention_scores(self, inputs_batch) -> Dict[str, Dict[BaseNode, np.ndarray]]:
|
181
|
+
"""
|
182
|
+
Compute sample layer attention scores per image hash per layer.
|
183
|
+
|
184
|
+
Args:
|
185
|
+
inputs_batch: a list containing a batch of inputs.
|
186
|
+
|
187
|
+
Returns:
|
188
|
+
A dictionary with a structure {img_hash: {layer: score}}.
|
189
|
+
|
190
|
+
"""
|
191
|
+
request = self._build_hessian_request(HessianScoresGranularity.PER_OUTPUT_CHANNEL)
|
192
|
+
hessian_batch_size = self.gptq_config.hessian_weights_config.hessian_batch_size
|
193
|
+
|
194
|
+
hessian_score_per_image_per_layer = {}
|
195
|
+
# If hessian batch is smaller than inputs batch, split it to hessian batches. If hessian batch is larger,
|
196
|
+
# it's currently ignored (TODO)
|
197
|
+
for i in range(0, inputs_batch[0].shape[0], hessian_batch_size):
|
198
|
+
inputs = [t[i: i+hessian_batch_size] for t in inputs_batch]
|
199
|
+
hessian_score_per_image_per_layer.update(
|
200
|
+
self.hessian_service.compute_trackable_per_sample_hessian(request, inputs)
|
201
|
+
)
|
202
|
+
for img_hash, v in hessian_score_per_image_per_layer.items():
|
203
|
+
hessian_score_per_image_per_layer[img_hash] = {k: t.max(axis=0) for k, t in v.items()}
|
204
|
+
return hessian_score_per_image_per_layer
|
205
|
+
|
206
|
+
def _fetch_hessian_approximations(self, granularity: HessianScoresGranularity) -> Dict[BaseNode, List[List[float]]]:
|
176
207
|
"""
|
177
208
|
Fetches hessian approximations for each target node.
|
178
209
|
|
179
210
|
Returns:
|
180
211
|
Mapping of target nodes to their hessian approximations.
|
181
212
|
"""
|
182
|
-
|
183
|
-
|
184
|
-
mode=HessianMode.ACTIVATION,
|
185
|
-
granularity=HessianScoresGranularity.PER_TENSOR,
|
186
|
-
target_nodes=self.compare_points
|
187
|
-
)
|
213
|
+
hessian_scores_request = self._build_hessian_request(granularity)
|
214
|
+
|
188
215
|
node_approximations = self.hessian_service.fetch_hessian(
|
189
216
|
hessian_scores_request=hessian_scores_request,
|
190
217
|
required_size=self.gptq_config.hessian_weights_config.hessians_num_samples,
|
191
218
|
batch_size=self.gptq_config.hessian_weights_config.hessian_batch_size
|
192
219
|
)
|
220
|
+
return node_approximations
|
193
221
|
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
222
|
+
def _build_hessian_request(self, granularity):
|
223
|
+
return HessianScoresRequest(
|
224
|
+
mode=HessianMode.ACTIVATION,
|
225
|
+
granularity=granularity,
|
226
|
+
target_nodes=self.compare_points,
|
227
|
+
distribution=self.gptq_config.hessian_weights_config.estimator_distribution
|
228
|
+
)
|
198
229
|
|
199
230
|
def _process_hessian_approximations(self, approximations: Dict[BaseNode, List[List[float]]]) -> List:
|
200
231
|
"""
|
@@ -13,8 +13,10 @@
|
|
13
13
|
# limitations under the License.
|
14
14
|
# ==============================================================================
|
15
15
|
from typing import List
|
16
|
+
|
16
17
|
import torch
|
17
18
|
|
19
|
+
|
18
20
|
def mse_loss(y: torch.Tensor, x: torch.Tensor, normalized: bool = True) -> torch.Tensor:
|
19
21
|
"""
|
20
22
|
Compute the MSE of two tensors.
|
@@ -25,7 +27,7 @@ def mse_loss(y: torch.Tensor, x: torch.Tensor, normalized: bool = True) -> torch
|
|
25
27
|
Returns:
|
26
28
|
The MSE of two tensors.
|
27
29
|
"""
|
28
|
-
loss = torch.nn.MSELoss()(x,y)
|
30
|
+
loss = torch.nn.MSELoss()(x, y)
|
29
31
|
return loss / torch.mean(torch.square(x)) if normalized else loss
|
30
32
|
|
31
33
|
|
@@ -62,3 +64,36 @@ def multiple_tensors_mse_loss(y_list: List[torch.Tensor],
|
|
62
64
|
else:
|
63
65
|
return torch.mean(torch.stack(loss_values_list))
|
64
66
|
|
67
|
+
|
68
|
+
def sample_layer_attention_loss(y_list: List[torch.Tensor],
|
69
|
+
x_list: List[torch.Tensor],
|
70
|
+
fxp_w_list,
|
71
|
+
flp_w_list,
|
72
|
+
act_bn_mean,
|
73
|
+
act_bn_std,
|
74
|
+
loss_weights: torch.Tensor) -> torch.Tensor:
|
75
|
+
"""
|
76
|
+
Compute Sample Layer Attention loss between two lists of tensors.
|
77
|
+
|
78
|
+
Args:
|
79
|
+
y_list: First list of tensors.
|
80
|
+
x_list: Second list of tensors.
|
81
|
+
fxp_w_list, flp_w_list, act_bn_mean, act_bn_std: unused (needed to comply with the interface).
|
82
|
+
loss_weights: layer-sample weights tensor of shape (layers, batch)
|
83
|
+
|
84
|
+
Returns:
|
85
|
+
Sample Layer Attention loss (a scalar).
|
86
|
+
"""
|
87
|
+
loss = 0
|
88
|
+
layers_mean_w = []
|
89
|
+
|
90
|
+
for i, (y, x, w) in enumerate(zip(y_list, x_list, loss_weights)):
|
91
|
+
norm = (y - x).pow(2).sum(1)
|
92
|
+
if len(norm.shape) > 1:
|
93
|
+
norm = norm.flatten(1).mean(1)
|
94
|
+
loss += torch.mean(w * norm)
|
95
|
+
layers_mean_w.append(w.mean())
|
96
|
+
|
97
|
+
loss = loss / torch.stack(layers_mean_w).max()
|
98
|
+
return loss
|
99
|
+
|