mct-nightly 2.2.0.20241204.524__tar.gz → 2.2.0.20241206.524__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/PKG-INFO +1 -1
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/mct_nightly.egg-info/PKG-INFO +1 -1
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/__init__.py +1 -1
- mct-nightly-2.2.0.20241206.524/model_compression_toolkit/core/keras/data_util.py +200 -0
- mct-nightly-2.2.0.20241206.524/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +162 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/hessian/hessian_scores_calculator_keras.py +17 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/hessian/weights_hessian_scores_calculator_keras.py +1 -2
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/common/gptq_training.py +58 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/gptq_loss.py +35 -2
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/gptq_training.py +137 -67
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/graph_info.py +1 -4
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/quantization_facade.py +24 -11
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +23 -11
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/gptq_training.py +4 -45
- mct-nightly-2.2.0.20241204.524/model_compression_toolkit/core/keras/data_util.py +0 -67
- mct-nightly-2.2.0.20241204.524/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +0 -155
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/LICENSE.md +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/README.md +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/mct_nightly.egg-info/SOURCES.txt +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/mct_nightly.egg-info/dependency_links.txt +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/mct_nightly.egg-info/requires.txt +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/mct_nightly.egg-info/top_level.txt +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/constants.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/analyzer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/framework_info.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/fusion/graph_fuser.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/edge.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/hessian/hessian_scores_calculator.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/hessian/hessian_scores_request.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/matchers/function.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/memory_computation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/model_collector.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/model_validation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/bit_width_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/statistics_correction/apply_activation_bias_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/statistics_correction/compute_activation_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/user_info.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/sigmoid_mul_to_swish.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/reader/common.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/statistics_correction/keras_compute_activation_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/data_util.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_linear.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scaled_dot_product_attention.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/hessian/hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/hessian/weights_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/statistics_correction/pytorch_compute_activation_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/pytorch/utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/core/runner.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/common/enums.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/keras/image_operations.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/keras/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/pytorch/image_operations.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/pytorch/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/defaultdict.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/common/gptq_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/common/gradual_activation_quantization.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/common/regularization_factory.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/gptq/runner.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/logger.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/metadata.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/ptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/ptq/runner.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/common/qat_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_weight_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_weight_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/schema/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/schema/mct_current_schema.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/schema/schema_functions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/schema/v1.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/common/annealing_schedulers.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/common/training_method.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/common/util.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/base_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/annealing_schedulers.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/pytorch/annealing_schedulers.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/verify_packages.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/common/core_report_generator.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/common/framework_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/common/model_folding_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/common/similarity_calculator.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/common/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/common/xquant_config.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/keras/keras_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/setup.cfg +0 -0
- {mct-nightly-2.2.0.20241204.524 → mct-nightly-2.2.0.20241206.524}/setup.py +0 -0
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
|
|
27
27
|
from model_compression_toolkit import pruning
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
29
29
|
|
30
|
-
__version__ = "2.2.0.
|
30
|
+
__version__ = "2.2.0.20241206.000524"
|
@@ -0,0 +1,200 @@
|
|
1
|
+
# Copyright 2024 Sony Semiconductor Israel, Inc. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
from typing import Generator, Callable
|
16
|
+
|
17
|
+
import tensorflow as tf
|
18
|
+
|
19
|
+
from model_compression_toolkit.core.keras.tf_tensor_numpy import to_tf_tensor
|
20
|
+
|
21
|
+
import tensorflow as tf
|
22
|
+
from typing import Callable, Generator, Sequence, Any
|
23
|
+
|
24
|
+
|
25
|
+
def get_tensor_spec(item, ignore_batch_dim=False):
|
26
|
+
"""
|
27
|
+
Get the TensorFlow TensorSpec for an item, optionally ignoring the first dimension.
|
28
|
+
|
29
|
+
Args:
|
30
|
+
item: The input item, which could be a tensor, tuple, or list.
|
31
|
+
ignore_batch_dim (bool): Whether to ignore the first dimension of the tensor shape.
|
32
|
+
|
33
|
+
Returns:
|
34
|
+
TensorSpec or a tuple of TensorSpecs.
|
35
|
+
"""
|
36
|
+
if isinstance(item, (tuple, list)):
|
37
|
+
return tuple(get_tensor_spec(sub_item, ignore_batch_dim) for sub_item in item)
|
38
|
+
|
39
|
+
shape = item.shape[1:] if ignore_batch_dim else item.shape
|
40
|
+
return tf.TensorSpec(shape=shape, dtype=item.dtype)
|
41
|
+
|
42
|
+
|
43
|
+
def flat_gen_fn(data_gen_fn: Callable[[], Generator]):
|
44
|
+
"""
|
45
|
+
Convert data generator with arbitrary batch size to a flat (sample by sample) data generator.
|
46
|
+
|
47
|
+
Args:
|
48
|
+
data_gen_fn: input data generator factory. Generator is expected to yield lists of tensors.
|
49
|
+
|
50
|
+
Returns:
|
51
|
+
A factory for a flattened data generator.
|
52
|
+
"""
|
53
|
+
|
54
|
+
def gen():
|
55
|
+
for inputs_batch in data_gen_fn():
|
56
|
+
for sample in zip(*inputs_batch):
|
57
|
+
yield tuple([tf.convert_to_tensor(s) for s in sample])
|
58
|
+
|
59
|
+
return gen
|
60
|
+
|
61
|
+
class TFDatasetFromGenerator:
|
62
|
+
"""
|
63
|
+
TensorFlow dataset from a data generator function, batched to a specified size.
|
64
|
+
"""
|
65
|
+
|
66
|
+
def __init__(self, data_gen_fn: Callable[[], Generator]):
|
67
|
+
"""
|
68
|
+
Args:
|
69
|
+
data_gen_fn: a factory function for data generator that yields lists of tensors.
|
70
|
+
"""
|
71
|
+
inputs = next(data_gen_fn())
|
72
|
+
if not isinstance(inputs, list):
|
73
|
+
raise TypeError(f'Data generator is expected to yield a list of tensors, got {type(inputs)}')
|
74
|
+
self.orig_batch_size = inputs[0].shape[0]
|
75
|
+
self._size = None
|
76
|
+
|
77
|
+
# TFDatasetFromGenerator flattens the dataset, thus we ignore the batch dimension
|
78
|
+
output_signature = get_tensor_spec(inputs, ignore_batch_dim=True)
|
79
|
+
self.dataset = tf.data.Dataset.from_generator(flat_gen_fn(data_gen_fn), output_signature=output_signature)
|
80
|
+
|
81
|
+
|
82
|
+
def __iter__(self):
|
83
|
+
return iter(self.dataset)
|
84
|
+
|
85
|
+
def __len__(self):
|
86
|
+
""" Returns the number of batches. """
|
87
|
+
if self._size is None:
|
88
|
+
self._size = sum(1 for _ in self.dataset)
|
89
|
+
return self._size
|
90
|
+
|
91
|
+
|
92
|
+
|
93
|
+
class FixedTFDataset:
|
94
|
+
"""
|
95
|
+
Fixed dataset containing samples from a generator, stored in memory.
|
96
|
+
"""
|
97
|
+
|
98
|
+
def __init__(self, data_gen_fn: Callable[[], Generator], n_samples: int = None):
|
99
|
+
"""
|
100
|
+
Args:
|
101
|
+
data_gen_fn: data generator function.
|
102
|
+
n_samples: number of samples to store in the dataset. If None, uses all samples in one pass.
|
103
|
+
"""
|
104
|
+
inputs = next(data_gen_fn())
|
105
|
+
if not isinstance(inputs, list):
|
106
|
+
raise TypeError(f'Data generator is expected to yield a list of tensors, got {type(inputs)}')
|
107
|
+
self.orig_batch_size = inputs[0].shape[0]
|
108
|
+
|
109
|
+
samples = []
|
110
|
+
for batch in data_gen_fn():
|
111
|
+
samples.extend(zip(*[tf.convert_to_tensor(t) for t in batch]))
|
112
|
+
if n_samples is not None and len(samples) >= n_samples:
|
113
|
+
samples = samples[:n_samples]
|
114
|
+
break
|
115
|
+
|
116
|
+
if n_samples and len(samples) < n_samples:
|
117
|
+
raise ValueError(f'Not enough samples to create a dataset with {n_samples} samples')
|
118
|
+
self.samples = samples
|
119
|
+
|
120
|
+
def __len__(self):
|
121
|
+
return len(self.samples)
|
122
|
+
|
123
|
+
def __getitem__(self, index):
|
124
|
+
return self.samples[index]
|
125
|
+
|
126
|
+
|
127
|
+
class FixedSampleInfoDataset:
|
128
|
+
"""
|
129
|
+
Dataset for samples with additional info, each element is a tuple of (sample, sample_info).
|
130
|
+
"""
|
131
|
+
|
132
|
+
def __init__(self, samples: Sequence, sample_info: Sequence):
|
133
|
+
if not all(len(info) == len(samples) for info in sample_info):
|
134
|
+
raise ValueError('Sample and additional info lengths must match')
|
135
|
+
self.samples = samples
|
136
|
+
self.sample_info = sample_info
|
137
|
+
|
138
|
+
def __len__(self):
|
139
|
+
return len(self.samples)
|
140
|
+
|
141
|
+
def __getitem__(self, index):
|
142
|
+
return self.samples[index], tuple([info[index] for info in self.sample_info])
|
143
|
+
|
144
|
+
|
145
|
+
class IterableSampleWithConstInfoDataset:
|
146
|
+
"""
|
147
|
+
Augments each sample in an iterable dataset with constant additional information.
|
148
|
+
"""
|
149
|
+
|
150
|
+
def __init__(self, samples_dataset: tf.data.Dataset, *info: Any):
|
151
|
+
self.samples_dataset = samples_dataset
|
152
|
+
self.info = info
|
153
|
+
|
154
|
+
def __iter__(self):
|
155
|
+
for sample in self.samples_dataset:
|
156
|
+
yield (sample, *self.info)
|
157
|
+
|
158
|
+
|
159
|
+
def data_gen_to_dataloader(data_gen_fn: Callable[[], Generator], batch_size: int):
|
160
|
+
"""Create a DataLoader based on samples yielded by data_gen."""
|
161
|
+
ds = TFDatasetFromGenerator(data_gen_fn)
|
162
|
+
return create_tf_dataloader(dataset=ds, batch_size=batch_size)
|
163
|
+
|
164
|
+
|
165
|
+
def create_tf_dataloader(dataset, batch_size, shuffle=False, collate_fn=None):
|
166
|
+
"""
|
167
|
+
Creates a tf.data.Dataset with specified loading options.
|
168
|
+
|
169
|
+
Args:
|
170
|
+
dataset: The dataset container (e.g., FixedDatasetFromGenerator or FixedSampleInfoDataset).
|
171
|
+
batch_size: Number of samples per batch.
|
172
|
+
shuffle: Whether to shuffle the dataset.
|
173
|
+
collate_fn: A function to apply to each batch (e.g., add extra outputs like regularization weights).
|
174
|
+
|
175
|
+
Returns:
|
176
|
+
tf.data.Dataset: Configured for batching, shuffling, and custom transformations.
|
177
|
+
"""
|
178
|
+
def generator():
|
179
|
+
for item in dataset:
|
180
|
+
yield item
|
181
|
+
|
182
|
+
dummy_input_tensors = next(generator())
|
183
|
+
|
184
|
+
output_signature = get_tensor_spec(dummy_input_tensors)
|
185
|
+
|
186
|
+
tf_dataset = tf.data.Dataset.from_generator(
|
187
|
+
generator,
|
188
|
+
output_signature=output_signature
|
189
|
+
)
|
190
|
+
|
191
|
+
if shuffle:
|
192
|
+
tf_dataset = tf_dataset.shuffle(buffer_size=len(dataset))
|
193
|
+
|
194
|
+
tf_dataset = tf_dataset.batch(batch_size)
|
195
|
+
|
196
|
+
# Apply collate function if provided
|
197
|
+
if collate_fn:
|
198
|
+
tf_dataset = tf_dataset.map(lambda *args: collate_fn(args))
|
199
|
+
|
200
|
+
return tf_dataset
|
@@ -0,0 +1,162 @@
|
|
1
|
+
# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
|
16
|
+
from typing import List
|
17
|
+
|
18
|
+
import tensorflow as tf
|
19
|
+
from tqdm import tqdm
|
20
|
+
import numpy as np
|
21
|
+
|
22
|
+
from model_compression_toolkit.constants import MIN_HESSIAN_ITER, HESSIAN_COMP_TOLERANCE, \
|
23
|
+
HESSIAN_NUM_ITERATIONS
|
24
|
+
from model_compression_toolkit.core.common import Graph
|
25
|
+
from model_compression_toolkit.core.common.hessian import HessianScoresRequest, HessianScoresGranularity
|
26
|
+
from model_compression_toolkit.core.keras.back2framework.float_model_builder import FloatKerasModelBuilder
|
27
|
+
from model_compression_toolkit.core.keras.hessian.hessian_scores_calculator_keras import HessianScoresCalculatorKeras
|
28
|
+
from model_compression_toolkit.logger import Logger
|
29
|
+
|
30
|
+
|
31
|
+
class ActivationHessianScoresCalculatorKeras(HessianScoresCalculatorKeras):
|
32
|
+
"""
|
33
|
+
Keras implementation of the Hessian-approximation scores Calculator for activations.
|
34
|
+
"""
|
35
|
+
def __init__(self,
|
36
|
+
graph: Graph,
|
37
|
+
input_images: List[tf.Tensor],
|
38
|
+
fw_impl,
|
39
|
+
hessian_scores_request: HessianScoresRequest,
|
40
|
+
num_iterations_for_approximation: int = HESSIAN_NUM_ITERATIONS):
|
41
|
+
"""
|
42
|
+
Args:
|
43
|
+
graph: Computational graph for the float model.
|
44
|
+
input_images: List of input images for the computation.
|
45
|
+
fw_impl: Framework-specific implementation for Hessian approximation scores computation.
|
46
|
+
hessian_scores_request: Configuration request for which to compute the Hessian approximation scores.
|
47
|
+
num_iterations_for_approximation: Number of iterations to use when approximating the Hessian scores.
|
48
|
+
|
49
|
+
"""
|
50
|
+
super(ActivationHessianScoresCalculatorKeras, self).__init__(graph=graph,
|
51
|
+
input_images=input_images,
|
52
|
+
fw_impl=fw_impl,
|
53
|
+
hessian_scores_request=hessian_scores_request,
|
54
|
+
num_iterations_for_approximation=num_iterations_for_approximation)
|
55
|
+
|
56
|
+
def compute(self) -> List[np.ndarray]:
|
57
|
+
"""
|
58
|
+
Compute the Hessian-approximation based scores w.r.t the requested target nodes' activations.
|
59
|
+
|
60
|
+
Returns:
|
61
|
+
List[np.ndarray]: Scores based on the Hessian-approximation for the requested nodes.
|
62
|
+
"""
|
63
|
+
model_output_nodes = [ot.node for ot in self.graph.get_outputs()]
|
64
|
+
|
65
|
+
if len([n for n in self.hessian_request.target_nodes if n in model_output_nodes]) > 0:
|
66
|
+
Logger.critical("Trying to compute activation Hessian approximation with respect to the model output. "
|
67
|
+
"This operation is not supported. "
|
68
|
+
"Remove the output node from the set of node targets in the Hessian request.")
|
69
|
+
|
70
|
+
grad_model_outputs = self.hessian_request.target_nodes + model_output_nodes
|
71
|
+
|
72
|
+
# Building a model to run Hessian approximation on
|
73
|
+
model, _ = FloatKerasModelBuilder(graph=self.graph, append2output=grad_model_outputs).build_model()
|
74
|
+
|
75
|
+
# Record operations for automatic differentiation
|
76
|
+
with tf.GradientTape(persistent=True, watch_accessed_variables=False) as g:
|
77
|
+
g.watch(self.input_images)
|
78
|
+
|
79
|
+
if len(self.input_images) > 1:
|
80
|
+
outputs = model(self.input_images)
|
81
|
+
else:
|
82
|
+
outputs = model(*self.input_images)
|
83
|
+
|
84
|
+
if len(outputs) != len(grad_model_outputs): # pragma: no cover
|
85
|
+
Logger.critical(
|
86
|
+
f"Model for computing activation Hessian approximation expects {len(grad_model_outputs)} "
|
87
|
+
f"outputs, but got {len(outputs)} output tensors.")
|
88
|
+
|
89
|
+
# Extracting the intermediate activation tensors and the model real output.
|
90
|
+
# Note that we do not allow computing Hessian for output nodes, so there shouldn't be an overlap.
|
91
|
+
num_target_nodes = len(self.hessian_request.target_nodes)
|
92
|
+
# Extract activation tensors of nodes for which we want to compute Hessian
|
93
|
+
target_activation_tensors = outputs[:num_target_nodes]
|
94
|
+
# Extract the model outputs
|
95
|
+
output_tensors = outputs[num_target_nodes:]
|
96
|
+
|
97
|
+
# Unfold and concatenate all outputs to form a single tensor
|
98
|
+
output = self._concat_tensors(output_tensors)
|
99
|
+
|
100
|
+
# List to store the Hessian-approximation scores for each interest point
|
101
|
+
ipts_hessian_approximations = [tf.Variable([0.0], dtype=tf.float32, trainable=True)
|
102
|
+
for _ in range(len(target_activation_tensors))]
|
103
|
+
|
104
|
+
# Loop through each interest point activation tensor
|
105
|
+
prev_mean_results = None
|
106
|
+
for j in tqdm(range(self.num_iterations_for_approximation)): # Approximation iterations
|
107
|
+
# Generate random tensor of 1s and -1s
|
108
|
+
v = self._generate_random_vectors_batch(output.shape)
|
109
|
+
f_v = tf.reduce_sum(v * output)
|
110
|
+
for i, ipt in enumerate(target_activation_tensors): # Per Interest point activation tensor
|
111
|
+
interest_point_scores = [] # List to store scores for each interest point
|
112
|
+
with g.stop_recording():
|
113
|
+
# Computing the approximation by getting the gradient of (output * v)
|
114
|
+
hess_v = g.gradient(f_v, ipt)
|
115
|
+
|
116
|
+
if hess_v is None:
|
117
|
+
# In case we have an output node, which is an interest point, but it is not
|
118
|
+
# differentiable, we consider its Hessian to be the initial value 0.
|
119
|
+
continue # pragma: no cover
|
120
|
+
|
121
|
+
if self.hessian_request.granularity == HessianScoresGranularity.PER_TENSOR:
|
122
|
+
# Mean over all dims but the batch (CXHXW for conv)
|
123
|
+
hessian_approx = tf.reduce_sum(hess_v ** 2.0,
|
124
|
+
axis=tuple(d for d in range(1, len(hess_v.shape))))
|
125
|
+
elif self.hessian_request.granularity == HessianScoresGranularity.PER_ELEMENT:
|
126
|
+
hessian_approx = hess_v ** 2
|
127
|
+
elif self.hessian_request.granularity == HessianScoresGranularity.PER_OUTPUT_CHANNEL:
|
128
|
+
axes_to_sum = tuple(d for d in range(1, len(hess_v.shape)-1))
|
129
|
+
hessian_approx = tf.reduce_sum(hess_v ** 2.0, axis=axes_to_sum)
|
130
|
+
|
131
|
+
else: # pragma: no cover
|
132
|
+
Logger.critical(f"{self.hessian_request.granularity} "
|
133
|
+
f"is not supported for Keras activation hessian\'s approximation scores calculator.")
|
134
|
+
|
135
|
+
# Free gradients
|
136
|
+
del hess_v
|
137
|
+
|
138
|
+
# Update node Hessian approximation mean over random iterations
|
139
|
+
ipts_hessian_approximations[i] = (j * ipts_hessian_approximations[i] + hessian_approx) / (j + 1)
|
140
|
+
|
141
|
+
# If the change to the mean approximation is insignificant (to all outputs)
|
142
|
+
# we stop the calculation.
|
143
|
+
if j > MIN_HESSIAN_ITER and prev_mean_results is not None:
|
144
|
+
new_mean_res = tf.reduce_mean(tf.stack(ipts_hessian_approximations), axis=1)
|
145
|
+
relative_delta_per_node = (tf.abs(new_mean_res - prev_mean_results) /
|
146
|
+
(tf.abs(new_mean_res) + 1e-6))
|
147
|
+
max_delta = tf.reduce_max(relative_delta_per_node)
|
148
|
+
if max_delta < HESSIAN_COMP_TOLERANCE:
|
149
|
+
break
|
150
|
+
|
151
|
+
if self.hessian_request.granularity == HessianScoresGranularity.PER_TENSOR:
|
152
|
+
prev_mean_results = tf.reduce_mean(tf.stack(ipts_hessian_approximations), axis=1)
|
153
|
+
|
154
|
+
# Convert results to list of numpy arrays
|
155
|
+
hessian_results = [h.numpy() for h in ipts_hessian_approximations]
|
156
|
+
# Extend the Hessian tensors shape to align with expected return type
|
157
|
+
# TODO: currently, only per-tensor Hessian is available for activation.
|
158
|
+
# Once implementing per-channel or per-element, this alignment needs to be verified and handled separately.
|
159
|
+
hessian_results = [h[..., np.newaxis] for h in hessian_results]
|
160
|
+
|
161
|
+
return hessian_results
|
162
|
+
|
@@ -12,6 +12,7 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
# ==============================================================================
|
15
|
+
from tensorflow import TensorShape
|
15
16
|
|
16
17
|
from model_compression_toolkit.core.common.hessian.hessian_scores_calculator import HessianScoresCalculator
|
17
18
|
|
@@ -77,3 +78,19 @@ class HessianScoresCalculatorKeras(HessianScoresCalculator):
|
|
77
78
|
"Unable to concatenate tensors for gradient calculation due to mismatched shapes along the first axis.") # pragma: no cover
|
78
79
|
|
79
80
|
return tf.concat(_r_tensors, axis=1)
|
81
|
+
|
82
|
+
def _generate_random_vectors_batch(self, shape: TensorShape) -> tf.Tensor:
|
83
|
+
"""
|
84
|
+
Generate a batch of random vectors for Hutchinson estimation using Rademacher distribution.
|
85
|
+
|
86
|
+
Args:
|
87
|
+
shape: target shape.
|
88
|
+
|
89
|
+
Returns:
|
90
|
+
Random tensor.
|
91
|
+
"""
|
92
|
+
v = tf.random.uniform(shape=shape, minval=0, maxval=2, dtype=tf.int32)
|
93
|
+
v = tf.where(v == 0, -1, 1)
|
94
|
+
v = tf.cast(v, tf.float32)
|
95
|
+
return v
|
96
|
+
|
@@ -89,8 +89,7 @@ class WeightsHessianScoresCalculatorKeras(HessianScoresCalculatorKeras):
|
|
89
89
|
prev_mean_results = None
|
90
90
|
tensors_original_shape = []
|
91
91
|
for j in tqdm(range(self.num_iterations_for_approximation)): # Approximation iterations
|
92
|
-
|
93
|
-
v = tf.random.normal(shape=output.shape)
|
92
|
+
v = self._generate_random_vectors_batch(output.shape)
|
94
93
|
f_v = tf.reduce_sum(v * output)
|
95
94
|
|
96
95
|
for i, ipt_node in enumerate(self.hessian_request.target_nodes): # Per Interest point weights tensor
|
@@ -27,7 +27,11 @@ from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig
|
|
27
27
|
from model_compression_toolkit.gptq.common.gptq_constants import QUANT_PARAM_LEARNING_STR
|
28
28
|
from model_compression_toolkit.gptq.common.gptq_framework_implementation import GPTQFrameworkImplemantation
|
29
29
|
from model_compression_toolkit.gptq.common.gptq_graph import get_compare_points
|
30
|
+
from model_compression_toolkit.gptq.common.gradual_activation_quantization import \
|
31
|
+
get_gradual_activation_quantizer_wrapper_factory
|
32
|
+
from model_compression_toolkit.gptq.common.regularization_factory import get_regularization
|
30
33
|
from model_compression_toolkit.logger import Logger
|
34
|
+
from model_compression_toolkit.trainable_infrastructure.common.util import get_total_grad_steps
|
31
35
|
|
32
36
|
|
33
37
|
class GPTQTrainer(ABC):
|
@@ -64,6 +68,14 @@ class GPTQTrainer(ABC):
|
|
64
68
|
self.fw_impl = fw_impl
|
65
69
|
self.fw_info = fw_info
|
66
70
|
self.representative_data_gen_fn = representative_data_gen_fn
|
71
|
+
|
72
|
+
def _get_total_grad_steps():
|
73
|
+
return get_total_grad_steps(representative_data_gen_fn) * gptq_config.n_epochs
|
74
|
+
|
75
|
+
self.gradual_act_quantizer_wrapper_factory = get_gradual_activation_quantizer_wrapper_factory(gptq_config,
|
76
|
+
_get_total_grad_steps,
|
77
|
+
self.fw_linear_annealing_scheduler)
|
78
|
+
|
67
79
|
# ----------------------------------------------
|
68
80
|
# Build two models and create compare nodes
|
69
81
|
# ----------------------------------------------
|
@@ -81,6 +93,52 @@ class GPTQTrainer(ABC):
|
|
81
93
|
f"an 'HessianInfoService' object must be provided, but received: {hessian_info_service}.") # pragma: no cover
|
82
94
|
self.hessian_service = hessian_info_service
|
83
95
|
|
96
|
+
self.reg_func = get_regularization(self.gptq_config,
|
97
|
+
_get_total_grad_steps,
|
98
|
+
self.fw_soft_quantizer_regularization,
|
99
|
+
self.fw_linear_annealing_scheduler)
|
100
|
+
self.loss_list = []
|
101
|
+
self.input_scale = 1
|
102
|
+
if self.float_user_info.input_scale != self.gptq_user_info.input_scale:
|
103
|
+
Logger.critical("Input scale mismatch between float and GPTQ networks. "
|
104
|
+
"Ensure both networks have matching input scales.") # pragma: no cover
|
105
|
+
else:
|
106
|
+
self.input_scale = self.gptq_user_info.input_scale
|
107
|
+
|
108
|
+
trainable_weights, trainable_bias, trainable_threshold = self.fw_get_gptq_trainable_parameters_fn(
|
109
|
+
self.fxp_model,
|
110
|
+
add_bias=self.gptq_config.train_bias)
|
111
|
+
self.flp_weights_list, self.fxp_weights_list = self.fw_get_weights_for_loss_fn(self.fxp_model)
|
112
|
+
|
113
|
+
if not (len(self.compare_points) == len(trainable_weights) == len(self.flp_weights_list) == len(
|
114
|
+
self.fxp_weights_list)):
|
115
|
+
Logger.critical("Mismatch in the number of comparison points, layers with trainable weights, "
|
116
|
+
"and the number of float and quantized weights for loss calculation. "
|
117
|
+
"Ensure all these elements align to proceed with GPTQ training.")
|
118
|
+
|
119
|
+
# In Keras we need to flatten the weights first before attaching the optimizer
|
120
|
+
if len(trainable_weights) > 0 and isinstance(trainable_weights[0], (list, tuple)):
|
121
|
+
trainable_weights = [w for layer_weights in trainable_weights for w in layer_weights]
|
122
|
+
if len(trainable_bias) > 0 and isinstance(trainable_bias[0], (list, tuple)):
|
123
|
+
trainable_bias = [w for layer_weights in trainable_bias for w in layer_weights]
|
124
|
+
|
125
|
+
self.optimizer_with_param = self.get_optimizer_with_param(trainable_weights,
|
126
|
+
trainable_bias,
|
127
|
+
trainable_threshold)
|
128
|
+
hessian_cfg = self.gptq_config.hessian_weights_config
|
129
|
+
|
130
|
+
self.has_params_to_train = np.sum(
|
131
|
+
[len(optimizer_params_tuple[1]) for optimizer_params_tuple in self.optimizer_with_param]) > 0
|
132
|
+
self.use_sample_layer_attention = hessian_cfg and hessian_cfg.per_sample
|
133
|
+
|
134
|
+
if self.use_sample_layer_attention:
|
135
|
+
# normalization is currently not supported, make sure the config reflects it.
|
136
|
+
if hessian_cfg.norm_scores or hessian_cfg.log_norm or hessian_cfg.scale_log_norm:
|
137
|
+
raise NotImplementedError()
|
138
|
+
self.train_dataloader = self._prepare_train_dataloader_sla(representative_data_gen_fn)
|
139
|
+
else:
|
140
|
+
self.train_dataloader = self._prepare_train_dataloader_for_non_sla(representative_data_gen_fn)
|
141
|
+
|
84
142
|
def get_optimizer_with_param(self,
|
85
143
|
flattened_trainable_weights: List[Any],
|
86
144
|
flattened_bias_weights: List[Any],
|
@@ -13,9 +13,8 @@
|
|
13
13
|
# limitations under the License.
|
14
14
|
# ==============================================================================
|
15
15
|
|
16
|
-
from typing import Any, Tuple, List
|
17
|
-
|
18
16
|
import tensorflow as tf
|
17
|
+
from typing import List, Tuple
|
19
18
|
|
20
19
|
|
21
20
|
def mse_loss(y: tf.Tensor, x: tf.Tensor, normalized: bool = True) -> tf.Tensor:
|
@@ -67,6 +66,40 @@ def multiple_tensors_mse_loss(y_list: List[tf.Tensor],
|
|
67
66
|
else:
|
68
67
|
return tf.reduce_mean(tf.stack(loss_values_list))
|
69
68
|
|
69
|
+
def sample_layer_attention_loss(y_list: List[tf.Tensor],
|
70
|
+
x_list: List[tf.Tensor],
|
71
|
+
fxp_w_list,
|
72
|
+
flp_w_list,
|
73
|
+
act_bn_mean,
|
74
|
+
act_bn_std,
|
75
|
+
loss_weights: Tuple[tf.Tensor]) -> tf.Tensor:
|
76
|
+
"""
|
77
|
+
Compute Sample Layer Attention loss between two lists of tensors using TensorFlow.
|
78
|
+
|
79
|
+
Args:
|
80
|
+
y_list: First list of tensors.
|
81
|
+
x_list: Second list of tensors.
|
82
|
+
fxp_w_list, flp_w_list, act_bn_mean, act_bn_std: unused (needed to comply with the interface).
|
83
|
+
loss_weights: layer-sample attention scores (tuplle by the same length as the number of layers, where each element is a tf.Tensor vector of length of number of samples).
|
84
|
+
|
85
|
+
Returns:
|
86
|
+
Sample Layer Attention loss (a scalar).
|
87
|
+
"""
|
88
|
+
loss = 0
|
89
|
+
layers_mean_w = []
|
90
|
+
loss_weights = tf.stack(loss_weights, axis=1)
|
91
|
+
|
92
|
+
for i, (y, x) in enumerate(zip(y_list, x_list)):
|
93
|
+
norm = tf.reduce_sum(tf.square(y - x), axis=1)
|
94
|
+
if len(norm.shape) > 1:
|
95
|
+
norm = tf.reduce_mean(tf.reshape(norm, [norm.shape[0], -1]), axis=1)
|
96
|
+
w = loss_weights[:, i]
|
97
|
+
loss += tf.reduce_mean(w * norm)
|
98
|
+
layers_mean_w.append(tf.reduce_mean(w))
|
99
|
+
|
100
|
+
loss = loss / tf.reduce_max(tf.stack(layers_mean_w))
|
101
|
+
return loss
|
102
|
+
|
70
103
|
|
71
104
|
def mse_loss_per_tensor(y: tf.Tensor,
|
72
105
|
x: tf.Tensor,
|