mct-nightly 2.2.0.20240911.455__tar.gz → 2.2.0.20240913.457__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/PKG-INFO +1 -1
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/mct_nightly.egg-info/PKG-INFO +1 -1
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/__init__.py +1 -1
- mct-nightly-2.2.0.20240913.457/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +73 -0
- mct-nightly-2.2.0.20240913.457/model_compression_toolkit/core/common/quantization/core_config.py +48 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/debug_config.py +12 -17
- mct-nightly-2.2.0.20240913.457/model_compression_toolkit/core/common/quantization/quantization_config.py +92 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +2 -2
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +19 -14
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/runner.py +4 -4
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/quantization_facade.py +1 -1
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +1 -1
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/ptq/keras/quantization_facade.py +1 -1
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +1 -1
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/keras/quantization_facade.py +1 -1
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/pytorch/quantization_facade.py +1 -1
- mct-nightly-2.2.0.20240911.455/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -87
- mct-nightly-2.2.0.20240911.455/model_compression_toolkit/core/common/quantization/core_config.py +0 -52
- mct-nightly-2.2.0.20240911.455/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -134
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/LICENSE.md +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/README.md +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/mct_nightly.egg-info/SOURCES.txt +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/mct_nightly.egg-info/dependency_links.txt +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/mct_nightly.egg-info/requires.txt +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/mct_nightly.egg-info/top_level.txt +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/constants.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/analyzer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/framework_info.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/fusion/graph_fuser.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/edge.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/hessian/hessian_scores_calculator.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/hessian/hessian_scores_request.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/matchers/function.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/memory_computation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/model_collector.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/model_validation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/bit_width_config.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/user_info.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/sigmoid_mul_to_swish.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/hessian/hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/hessian/weights_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/reader/common.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/hessian/hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/hessian/weights_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/pytorch/utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/common/constants.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/common/enums.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/keras/image_operations.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/keras/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/pytorch/image_operations.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/pytorch/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/defaultdict.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/common/gptq_config.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/common/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/gptq/runner.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/logger.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/metadata.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/ptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/ptq/runner.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/common/qat_config.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_weight_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/common/training_method.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/verify_packages.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/common/constants.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/common/core_report_generator.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/common/framework_report_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/common/model_folding_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/common/similarity_calculator.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/common/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/common/xquant_config.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/keras/keras_report_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/setup.cfg +0 -0
- {mct-nightly-2.2.0.20240911.455 → mct-nightly-2.2.0.20240913.457}/setup.py +0 -0
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
|
|
27
27
|
from model_compression_toolkit import pruning
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
29
29
|
|
30
|
-
__version__ = "2.2.0.
|
30
|
+
__version__ = "2.2.0.20240913.000457"
|
@@ -0,0 +1,73 @@
|
|
1
|
+
# Copyright 2021 Sony Semiconductor Israel, Inc. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
|
16
|
+
from dataclasses import dataclass, field
|
17
|
+
from typing import List, Callable, Optional
|
18
|
+
from model_compression_toolkit.constants import MP_DEFAULT_NUM_SAMPLES, ACT_HESSIAN_DEFAULT_BATCH_SIZE
|
19
|
+
from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting
|
20
|
+
|
21
|
+
|
22
|
+
@dataclass
|
23
|
+
class MixedPrecisionQuantizationConfig:
|
24
|
+
"""
|
25
|
+
Class with mixed precision parameters to quantize the input model.
|
26
|
+
|
27
|
+
Args:
|
28
|
+
compute_distance_fn (Callable): Function to compute a distance between two tensors. If None, using pre-defined distance methods based on the layer type for each layer.
|
29
|
+
distance_weighting_method (MpDistanceWeighting): MpDistanceWeighting enum value that provides a function to use when weighting the distances among different layers when computing the sensitivity metric.
|
30
|
+
num_of_images (int): Number of images to use to evaluate the sensitivity of a mixed-precision model comparing to the float model.
|
31
|
+
configuration_overwrite (List[int]): A list of integers that enables overwrite of mixed precision with a predefined one.
|
32
|
+
num_interest_points_factor (float): A multiplication factor between zero and one (represents percentage) to reduce the number of interest points used to calculate the distance metric.
|
33
|
+
use_hessian_based_scores (bool): Whether to use Hessian-based scores for weighted average distance metric computation.
|
34
|
+
norm_scores (bool): Whether to normalize the returned scores for the weighted distance metric (to get values between 0 and 1).
|
35
|
+
refine_mp_solution (bool): Whether to try to improve the final mixed-precision configuration using a greedy algorithm that searches layers to increase their bit-width, or not.
|
36
|
+
metric_normalization_threshold (float): A threshold for checking the mixed precision distance metric values, In case of values larger than this threshold, the metric will be scaled to prevent numerical issues.
|
37
|
+
hessian_batch_size (int): The Hessian computation batch size. used only if using mixed precision with Hessian-based objective.
|
38
|
+
"""
|
39
|
+
|
40
|
+
compute_distance_fn: Optional[Callable] = None
|
41
|
+
distance_weighting_method: MpDistanceWeighting = MpDistanceWeighting.AVG
|
42
|
+
num_of_images: int = MP_DEFAULT_NUM_SAMPLES
|
43
|
+
configuration_overwrite: Optional[List[int]] = None
|
44
|
+
num_interest_points_factor: float = field(default=1.0, metadata={"description": "Should be between 0.0 and 1.0"})
|
45
|
+
use_hessian_based_scores: bool = False
|
46
|
+
norm_scores: bool = True
|
47
|
+
refine_mp_solution: bool = True
|
48
|
+
metric_normalization_threshold: float = 1e10
|
49
|
+
hessian_batch_size: int = ACT_HESSIAN_DEFAULT_BATCH_SIZE
|
50
|
+
_is_mixed_precision_enabled: bool = field(init=False, default=False)
|
51
|
+
|
52
|
+
def __post_init__(self):
|
53
|
+
# Validate num_interest_points_factor
|
54
|
+
assert 0.0 < self.num_interest_points_factor <= 1.0, \
|
55
|
+
"num_interest_points_factor should represent a percentage of " \
|
56
|
+
"the base set of interest points that are required to be " \
|
57
|
+
"used for mixed-precision metric evaluation, " \
|
58
|
+
"thus, it should be between 0 to 1"
|
59
|
+
|
60
|
+
def set_mixed_precision_enable(self):
|
61
|
+
"""
|
62
|
+
Set a flag in mixed precision config indicating that mixed precision is enabled.
|
63
|
+
"""
|
64
|
+
self._is_mixed_precision_enabled = True
|
65
|
+
|
66
|
+
@property
|
67
|
+
def is_mixed_precision_enabled(self):
|
68
|
+
"""
|
69
|
+
A property that indicates whether mixed precision quantization is enabled.
|
70
|
+
|
71
|
+
Returns: True if mixed precision quantization is enabled
|
72
|
+
"""
|
73
|
+
return self._is_mixed_precision_enabled
|
mct-nightly-2.2.0.20240913.457/model_compression_toolkit/core/common/quantization/core_config.py
ADDED
@@ -0,0 +1,48 @@
|
|
1
|
+
# Copyright 2022 Sony Semiconductor Israel, Inc. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
from dataclasses import dataclass, field
|
16
|
+
from typing import Optional
|
17
|
+
|
18
|
+
from model_compression_toolkit.core.common.quantization.bit_width_config import BitWidthConfig
|
19
|
+
from model_compression_toolkit.core.common.quantization.quantization_config import QuantizationConfig
|
20
|
+
from model_compression_toolkit.core.common.quantization.debug_config import DebugConfig
|
21
|
+
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import MixedPrecisionQuantizationConfig
|
22
|
+
|
23
|
+
|
24
|
+
@dataclass
|
25
|
+
class CoreConfig:
|
26
|
+
"""
|
27
|
+
A dataclass to hold the configurations classes of the MCT-core.
|
28
|
+
|
29
|
+
Args:
|
30
|
+
quantization_config (QuantizationConfig): Config for quantization.
|
31
|
+
mixed_precision_config (MixedPrecisionQuantizationConfig): Config for mixed precision quantization.
|
32
|
+
If None, a default MixedPrecisionQuantizationConfig is used.
|
33
|
+
bit_width_config (BitWidthConfig): Config for manual bit-width selection.
|
34
|
+
debug_config (DebugConfig): Config for debugging and editing the network quantization process.
|
35
|
+
"""
|
36
|
+
|
37
|
+
quantization_config: QuantizationConfig = field(default_factory=QuantizationConfig)
|
38
|
+
mixed_precision_config: MixedPrecisionQuantizationConfig = field(default_factory=MixedPrecisionQuantizationConfig)
|
39
|
+
bit_width_config: BitWidthConfig = field(default_factory=BitWidthConfig)
|
40
|
+
debug_config: DebugConfig = field(default_factory=DebugConfig)
|
41
|
+
|
42
|
+
@property
|
43
|
+
def is_mixed_precision_enabled(self) -> bool:
|
44
|
+
"""
|
45
|
+
A property that indicates whether mixed precision is enabled.
|
46
|
+
"""
|
47
|
+
return bool(self.mixed_precision_config and self.mixed_precision_config.is_mixed_precision_enabled)
|
48
|
+
|
@@ -13,29 +13,24 @@
|
|
13
13
|
# limitations under the License.
|
14
14
|
# ==============================================================================
|
15
15
|
|
16
|
-
|
16
|
+
from dataclasses import dataclass, field
|
17
17
|
from typing import List
|
18
18
|
|
19
19
|
from model_compression_toolkit.core.common.network_editors.edit_network import EditRule
|
20
20
|
|
21
21
|
|
22
|
+
@dataclass
|
22
23
|
class DebugConfig:
|
23
24
|
"""
|
24
|
-
A
|
25
|
-
"""
|
26
|
-
def __init__(self,
|
27
|
-
analyze_similarity: bool = False,
|
28
|
-
network_editor: List[EditRule] = [],
|
29
|
-
simulate_scheduler: bool = False):
|
30
|
-
"""
|
25
|
+
A dataclass for MCT core debug information.
|
31
26
|
|
32
|
-
|
27
|
+
Args:
|
28
|
+
analyze_similarity (bool): Whether to plot similarity figures within TensorBoard (when logger is
|
29
|
+
enabled) or not. Can be used to pinpoint problematic layers in the quantization process.
|
30
|
+
network_editor (List[EditRule]): A list of rules and actions to edit the network for quantization.
|
31
|
+
simulate_scheduler (bool): Simulate scheduler behavior to compute operators' order and cuts.
|
32
|
+
"""
|
33
33
|
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
simulate_scheduler (bool): Simulate scheduler behaviour to compute operators order and cuts.
|
38
|
-
"""
|
39
|
-
self.analyze_similarity = analyze_similarity
|
40
|
-
self.network_editor = network_editor
|
41
|
-
self.simulate_scheduler = simulate_scheduler
|
34
|
+
analyze_similarity: bool = False
|
35
|
+
network_editor: List[EditRule] = field(default_factory=list)
|
36
|
+
simulate_scheduler: bool = False
|
@@ -0,0 +1,92 @@
|
|
1
|
+
# Copyright 2021 Sony Semiconductor Israel, Inc. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
|
16
|
+
from dataclasses import dataclass, field
|
17
|
+
import math
|
18
|
+
from enum import Enum
|
19
|
+
|
20
|
+
from model_compression_toolkit.constants import MIN_THRESHOLD
|
21
|
+
|
22
|
+
|
23
|
+
class QuantizationErrorMethod(Enum):
|
24
|
+
"""
|
25
|
+
Method for quantization threshold selection:
|
26
|
+
|
27
|
+
NOCLIPPING - Use min/max values as thresholds.
|
28
|
+
|
29
|
+
MSE - Use mean square error for minimizing quantization noise.
|
30
|
+
|
31
|
+
MAE - Use mean absolute error for minimizing quantization noise.
|
32
|
+
|
33
|
+
KL - Use KL-divergence to make signals distributions to be similar as possible.
|
34
|
+
|
35
|
+
Lp - Use Lp-norm to minimizing quantization noise.
|
36
|
+
|
37
|
+
HMSE - Use Hessian-based mean squared error for minimizing quantization noise. This method is using Hessian scores to factorize more valuable parameters when computing the error induced by quantization.
|
38
|
+
|
39
|
+
"""
|
40
|
+
|
41
|
+
NOCLIPPING = 0
|
42
|
+
MSE = 1
|
43
|
+
MAE = 2
|
44
|
+
KL = 4
|
45
|
+
LP = 5
|
46
|
+
HMSE = 6
|
47
|
+
|
48
|
+
|
49
|
+
@dataclass
|
50
|
+
class QuantizationConfig:
|
51
|
+
"""
|
52
|
+
A class that encapsulates all the different parameters used by the library to quantize a model.
|
53
|
+
|
54
|
+
Examples:
|
55
|
+
You can create a quantization configuration to apply to a model. For example, to quantize a model's weights and
|
56
|
+
activations using thresholds, with weight threshold selection based on MSE and activation threshold selection
|
57
|
+
using NOCLIPPING (min/max), while enabling relu_bound_to_power_of_2 and weights_bias_correction,
|
58
|
+
you can instantiate a quantization configuration like this:
|
59
|
+
|
60
|
+
>>> import model_compression_toolkit as mct
|
61
|
+
>>> qc = mct.core.QuantizationConfig(activation_error_method=mct.core.QuantizationErrorMethod.NOCLIPPING, weights_error_method=mct.core.QuantizationErrorMethod.MSE, relu_bound_to_power_of_2=True, weights_bias_correction=True)
|
62
|
+
|
63
|
+
|
64
|
+
The QuantizationConfig instance can then be used in the quantization workflow,
|
65
|
+
such as with Keras in the function: :func:~model_compression_toolkit.ptq.keras_post_training_quantization`.
|
66
|
+
|
67
|
+
"""
|
68
|
+
|
69
|
+
activation_error_method: QuantizationErrorMethod = QuantizationErrorMethod.MSE
|
70
|
+
weights_error_method: QuantizationErrorMethod = QuantizationErrorMethod.MSE
|
71
|
+
relu_bound_to_power_of_2: bool = False
|
72
|
+
weights_bias_correction: bool = True
|
73
|
+
weights_second_moment_correction: bool = False
|
74
|
+
input_scaling: bool = False
|
75
|
+
softmax_shift: bool = False
|
76
|
+
shift_negative_activation_correction: bool = True
|
77
|
+
activation_channel_equalization: bool = False
|
78
|
+
z_threshold: float = math.inf
|
79
|
+
min_threshold: float = MIN_THRESHOLD
|
80
|
+
l_p_value: int = 2
|
81
|
+
linear_collapsing: bool = True
|
82
|
+
residual_collapsing: bool = True
|
83
|
+
shift_negative_ratio: float = 0.05
|
84
|
+
shift_negative_threshold_recalculation: bool = False
|
85
|
+
shift_negative_params_search: bool = False
|
86
|
+
concat_threshold_update: bool = False
|
87
|
+
|
88
|
+
|
89
|
+
# Default quantization configuration the library use.
|
90
|
+
DEFAULTCONFIG = QuantizationConfig(QuantizationErrorMethod.MSE, QuantizationErrorMethod.MSE,
|
91
|
+
relu_bound_to_power_of_2=False, weights_bias_correction=True,
|
92
|
+
weights_second_moment_correction=False, input_scaling=False, softmax_shift=False)
|
@@ -360,7 +360,7 @@ def shift_negative_function(graph: Graph,
|
|
360
360
|
graph=graph,
|
361
361
|
quant_config=core_config.quantization_config,
|
362
362
|
tpc=graph.tpc,
|
363
|
-
mixed_precision_enable=core_config.
|
363
|
+
mixed_precision_enable=core_config.is_mixed_precision_enabled)
|
364
364
|
|
365
365
|
for candidate_qc in pad_node.candidates_quantization_cfg:
|
366
366
|
candidate_qc.activation_quantization_cfg.enable_activation_quantization = False
|
@@ -377,7 +377,7 @@ def shift_negative_function(graph: Graph,
|
|
377
377
|
graph=graph,
|
378
378
|
quant_config=core_config.quantization_config,
|
379
379
|
tpc=graph.tpc,
|
380
|
-
mixed_precision_enable=core_config.
|
380
|
+
mixed_precision_enable=core_config.is_mixed_precision_enabled)
|
381
381
|
|
382
382
|
original_non_linear_activation_nbits = non_linear_node_cfg_candidate.activation_n_bits
|
383
383
|
# The non-linear node's output should be float, so we approximate it by using 16bits quantization.
|
@@ -80,16 +80,19 @@ def _build_input_alloc_and_call_args(n: Node, input_tensors_in_node_kwargs: Dict
|
|
80
80
|
tensor_input_alloc = []
|
81
81
|
op_call_args = list(n.args)
|
82
82
|
if inputs_as_list:
|
83
|
-
op_call_args
|
83
|
+
# input tensors are a list in the first argument -> remove from op_call_args and go over
|
84
|
+
# the tensors in that list.
|
85
|
+
_args = op_call_args.pop(0)
|
84
86
|
else:
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
87
|
+
_args = n.args
|
88
|
+
for in_node in n.all_input_nodes:
|
89
|
+
# The extra for loop is used to tackle the case of the same input tensor for this node (e.g. torch.add(x, x)).
|
90
|
+
for i, arg in enumerate(_args):
|
91
|
+
if arg == in_node:
|
92
|
+
tensor_input_alloc.append(i)
|
93
|
+
for k, arg in input_tensors_in_node_kwargs.items():
|
94
|
+
if arg == in_node:
|
95
|
+
tensor_input_alloc.append(k)
|
93
96
|
|
94
97
|
return op_call_args, tensor_input_alloc
|
95
98
|
|
@@ -253,11 +256,8 @@ def nodes_builder(model: GraphModule,
|
|
253
256
|
node_kwargs[k] = v
|
254
257
|
|
255
258
|
# Check if node's first input argument is a list of input fx nodes, such as torch.cat:
|
256
|
-
|
259
|
+
inputs_as_list = is_instance_first_arg(node, (list, tuple)) and all(
|
257
260
|
[isinstance(n, Node) for n in node.args[0]])
|
258
|
-
is_placeholder_a_list = is_instance_first_arg(node, Node) and \
|
259
|
-
node.args[0].op == PLACEHOLDER and node.args[0].meta[TYPE] in (list, tuple)
|
260
|
-
inputs_as_list = is_first_input_list_of_nodes or is_placeholder_a_list
|
261
261
|
|
262
262
|
# Build tensor_input_alloc required for the model builder. All input nodes are received as a list in the builder,
|
263
263
|
# so tensor_input_alloc is used to allocate each input tensor in the correct place in the node's args & kwargs.
|
@@ -333,7 +333,12 @@ def edges_builder(model: GraphModule,
|
|
333
333
|
if input_node in fx_node_2_graph_node:
|
334
334
|
# n_edges_for_input_node is for the case that the input node appears more than
|
335
335
|
# once as the input of the node, for example add(x, x)
|
336
|
-
|
336
|
+
if node in fx_node_2_graph_node and isinstance(fx_node_2_graph_node[node], FunctionalNode) and \
|
337
|
+
fx_node_2_graph_node[node].inputs_as_list:
|
338
|
+
_args = node.args[0]
|
339
|
+
else:
|
340
|
+
_args = node.args
|
341
|
+
n_edges_for_input_node = sum([1 for a in _args if input_node == a])
|
337
342
|
n_edges_for_input_node = max(n_edges_for_input_node, 1)
|
338
343
|
|
339
344
|
dst_index = node.all_input_nodes.index(input_node)
|
@@ -119,7 +119,7 @@ def core_runner(in_model: Any,
|
|
119
119
|
tpc,
|
120
120
|
core_config.bit_width_config,
|
121
121
|
tb_w,
|
122
|
-
mixed_precision_enable=core_config.
|
122
|
+
mixed_precision_enable=core_config.is_mixed_precision_enabled,
|
123
123
|
running_gptq=running_gptq)
|
124
124
|
|
125
125
|
hessian_info_service = HessianInfoService(graph=graph, representative_dataset_gen=representative_data_gen,
|
@@ -136,7 +136,7 @@ def core_runner(in_model: Any,
|
|
136
136
|
######################################
|
137
137
|
# Finalize bit widths
|
138
138
|
######################################
|
139
|
-
if core_config.
|
139
|
+
if core_config.is_mixed_precision_enabled:
|
140
140
|
if core_config.mixed_precision_config.configuration_overwrite is None:
|
141
141
|
|
142
142
|
filter_candidates_for_mixed_precision(graph, target_resource_utilization, fw_info, tpc)
|
@@ -161,7 +161,7 @@ def core_runner(in_model: Any,
|
|
161
161
|
else:
|
162
162
|
bit_widths_config = []
|
163
163
|
|
164
|
-
tg = set_bit_widths(core_config.
|
164
|
+
tg = set_bit_widths(core_config.is_mixed_precision_enabled,
|
165
165
|
tg,
|
166
166
|
bit_widths_config)
|
167
167
|
|
@@ -175,7 +175,7 @@ def core_runner(in_model: Any,
|
|
175
175
|
fw_info=fw_info,
|
176
176
|
fw_impl=fw_impl)
|
177
177
|
|
178
|
-
if core_config.
|
178
|
+
if core_config.is_mixed_precision_enabled:
|
179
179
|
# Retrieve lists of tuples (node, node's final weights/activation bitwidth)
|
180
180
|
weights_conf_nodes_bitwidth = tg.get_final_weights_config(fw_info)
|
181
181
|
activation_conf_nodes_bitwidth = tg.get_final_activation_config()
|
@@ -199,7 +199,7 @@ if FOUND_TF:
|
|
199
199
|
KerasModelValidation(model=in_model,
|
200
200
|
fw_info=DEFAULT_KERAS_INFO).validate()
|
201
201
|
|
202
|
-
if core_config.
|
202
|
+
if core_config.is_mixed_precision_enabled:
|
203
203
|
if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfig):
|
204
204
|
Logger.critical("Given quantization config for mixed-precision is not of type 'MixedPrecisionQuantizationConfig'. "
|
205
205
|
"Ensure usage of the correct API for keras_post_training_quantization "
|
@@ -165,7 +165,7 @@ if FOUND_TORCH:
|
|
165
165
|
|
166
166
|
"""
|
167
167
|
|
168
|
-
if core_config.
|
168
|
+
if core_config.is_mixed_precision_enabled:
|
169
169
|
if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfig):
|
170
170
|
Logger.critical("Given quantization config for mixed-precision is not of type 'MixedPrecisionQuantizationConfig'. "
|
171
171
|
"Ensure usage of the correct API for 'pytorch_gradient_post_training_quantization' "
|
@@ -124,7 +124,7 @@ if FOUND_TF:
|
|
124
124
|
KerasModelValidation(model=in_model,
|
125
125
|
fw_info=fw_info).validate()
|
126
126
|
|
127
|
-
if core_config.
|
127
|
+
if core_config.is_mixed_precision_enabled:
|
128
128
|
if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfig):
|
129
129
|
Logger.critical("Given quantization config to mixed-precision facade is not of type "
|
130
130
|
"MixedPrecisionQuantizationConfig. Please use keras_post_training_quantization "
|
@@ -96,7 +96,7 @@ if FOUND_TORCH:
|
|
96
96
|
|
97
97
|
fw_info = DEFAULT_PYTORCH_INFO
|
98
98
|
|
99
|
-
if core_config.
|
99
|
+
if core_config.is_mixed_precision_enabled:
|
100
100
|
if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfig):
|
101
101
|
Logger.critical("Given quantization config to mixed-precision facade is not of type "
|
102
102
|
"MixedPrecisionQuantizationConfig. Please use "
|
@@ -176,7 +176,7 @@ if FOUND_TF:
|
|
176
176
|
KerasModelValidation(model=in_model,
|
177
177
|
fw_info=DEFAULT_KERAS_INFO).validate()
|
178
178
|
|
179
|
-
if core_config.
|
179
|
+
if core_config.is_mixed_precision_enabled:
|
180
180
|
if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfig):
|
181
181
|
Logger.critical("Given quantization config to mixed-precision facade is not of type "
|
182
182
|
"MixedPrecisionQuantizationConfig. Please use keras_post_training_quantization API,"
|
@@ -145,7 +145,7 @@ if FOUND_TORCH:
|
|
145
145
|
f"If you encounter an issue, please open an issue in our GitHub "
|
146
146
|
f"project https://github.com/sony/model_optimization")
|
147
147
|
|
148
|
-
if core_config.
|
148
|
+
if core_config.is_mixed_precision_enabled:
|
149
149
|
if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfig):
|
150
150
|
Logger.critical("Given quantization config to mixed-precision facade is not of type "
|
151
151
|
"MixedPrecisionQuantizationConfig. Please use pytorch_post_training_quantization API,"
|
@@ -1,87 +0,0 @@
|
|
1
|
-
# Copyright 2021 Sony Semiconductor Israel, Inc. All rights reserved.
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
# you may not use this file except in compliance with the License.
|
5
|
-
# You may obtain a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
# See the License for the specific language governing permissions and
|
13
|
-
# limitations under the License.
|
14
|
-
# ==============================================================================
|
15
|
-
|
16
|
-
from typing import List, Callable
|
17
|
-
|
18
|
-
from model_compression_toolkit.constants import MP_DEFAULT_NUM_SAMPLES, ACT_HESSIAN_DEFAULT_BATCH_SIZE
|
19
|
-
from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting
|
20
|
-
|
21
|
-
|
22
|
-
class MixedPrecisionQuantizationConfig:
|
23
|
-
|
24
|
-
def __init__(self,
|
25
|
-
compute_distance_fn: Callable = None,
|
26
|
-
distance_weighting_method: MpDistanceWeighting = MpDistanceWeighting.AVG,
|
27
|
-
num_of_images: int = MP_DEFAULT_NUM_SAMPLES,
|
28
|
-
configuration_overwrite: List[int] = None,
|
29
|
-
num_interest_points_factor: float = 1.0,
|
30
|
-
use_hessian_based_scores: bool = False,
|
31
|
-
norm_scores: bool = True,
|
32
|
-
refine_mp_solution: bool = True,
|
33
|
-
metric_normalization_threshold: float = 1e10,
|
34
|
-
hessian_batch_size: int = ACT_HESSIAN_DEFAULT_BATCH_SIZE):
|
35
|
-
"""
|
36
|
-
Class with mixed precision parameters to quantize the input model.
|
37
|
-
|
38
|
-
Args:
|
39
|
-
compute_distance_fn (Callable): Function to compute a distance between two tensors. If None, using pre-defined distance methods based on the layer type for each layer.
|
40
|
-
distance_weighting_method (MpDistanceWeighting): MpDistanceWeighting enum value that provides a function to use when weighting the distances among different layers when computing the sensitivity metric.
|
41
|
-
num_of_images (int): Number of images to use to evaluate the sensitivity of a mixed-precision model comparing to the float model.
|
42
|
-
configuration_overwrite (List[int]): A list of integers that enables overwrite of mixed precision with a predefined one.
|
43
|
-
num_interest_points_factor (float): A multiplication factor between zero and one (represents percentage) to reduce the number of interest points used to calculate the distance metric.
|
44
|
-
use_hessian_based_scores (bool): Whether to use Hessian-based scores for weighted average distance metric computation.
|
45
|
-
norm_scores (bool): Whether to normalize the returned scores for the weighted distance metric (to get values between 0 and 1).
|
46
|
-
refine_mp_solution (bool): Whether to try to improve the final mixed-precision configuration using a greedy algorithm that searches layers to increase their bit-width, or not.
|
47
|
-
metric_normalization_threshold (float): A threshold for checking the mixed precision distance metric values, In case of values larger than this threshold, the metric will be scaled to prevent numerical issues.
|
48
|
-
hessian_batch_size (int): The Hessian computation batch size. used only if using mixed precision with Hessian-based objective.
|
49
|
-
|
50
|
-
"""
|
51
|
-
|
52
|
-
self.compute_distance_fn = compute_distance_fn
|
53
|
-
self.distance_weighting_method = distance_weighting_method
|
54
|
-
self.num_of_images = num_of_images
|
55
|
-
self.configuration_overwrite = configuration_overwrite
|
56
|
-
self.refine_mp_solution = refine_mp_solution
|
57
|
-
|
58
|
-
assert 0.0 < num_interest_points_factor <= 1.0, "num_interest_points_factor should represent a percentage of " \
|
59
|
-
"the base set of interest points that are required to be " \
|
60
|
-
"used for mixed-precision metric evaluation, " \
|
61
|
-
"thus, it should be between 0 to 1"
|
62
|
-
self.num_interest_points_factor = num_interest_points_factor
|
63
|
-
|
64
|
-
self.use_hessian_based_scores = use_hessian_based_scores
|
65
|
-
self.norm_scores = norm_scores
|
66
|
-
self.hessian_batch_size = hessian_batch_size
|
67
|
-
|
68
|
-
self.metric_normalization_threshold = metric_normalization_threshold
|
69
|
-
|
70
|
-
self._mixed_precision_enable = False
|
71
|
-
|
72
|
-
def set_mixed_precision_enable(self):
|
73
|
-
"""
|
74
|
-
Set a flag in mixed precision config indicating that mixed precision is enabled.
|
75
|
-
"""
|
76
|
-
|
77
|
-
self._mixed_precision_enable = True
|
78
|
-
|
79
|
-
@property
|
80
|
-
def mixed_precision_enable(self):
|
81
|
-
"""
|
82
|
-
A property that indicates whether mixed precision quantization is enabled.
|
83
|
-
|
84
|
-
Returns: True if mixed precision quantization is enabled
|
85
|
-
|
86
|
-
"""
|
87
|
-
return self._mixed_precision_enable
|
mct-nightly-2.2.0.20240911.455/model_compression_toolkit/core/common/quantization/core_config.py
DELETED
@@ -1,52 +0,0 @@
|
|
1
|
-
# Copyright 2022 Sony Semiconductor Israel, Inc. All rights reserved.
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
# you may not use this file except in compliance with the License.
|
5
|
-
# You may obtain a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
# See the License for the specific language governing permissions and
|
13
|
-
# limitations under the License.
|
14
|
-
# ==============================================================================
|
15
|
-
from model_compression_toolkit.core.common.quantization.bit_width_config import BitWidthConfig
|
16
|
-
from model_compression_toolkit.core.common.quantization.quantization_config import QuantizationConfig
|
17
|
-
from model_compression_toolkit.core.common.quantization.debug_config import DebugConfig
|
18
|
-
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import MixedPrecisionQuantizationConfig
|
19
|
-
|
20
|
-
|
21
|
-
class CoreConfig:
|
22
|
-
"""
|
23
|
-
A class to hold the configurations classes of the MCT-core.
|
24
|
-
"""
|
25
|
-
def __init__(self,
|
26
|
-
quantization_config: QuantizationConfig = None,
|
27
|
-
mixed_precision_config: MixedPrecisionQuantizationConfig = None,
|
28
|
-
bit_width_config: BitWidthConfig = None,
|
29
|
-
debug_config: DebugConfig = None
|
30
|
-
):
|
31
|
-
"""
|
32
|
-
|
33
|
-
Args:
|
34
|
-
quantization_config (QuantizationConfig): Config for quantization.
|
35
|
-
mixed_precision_config (MixedPrecisionQuantizationConfig): Config for mixed precision quantization.
|
36
|
-
If None, a default MixedPrecisionQuantizationConfig is used.
|
37
|
-
bit_width_config (BitWidthConfig): Config for manual bit-width selection.
|
38
|
-
debug_config (DebugConfig): Config for debugging and editing the network quantization process.
|
39
|
-
"""
|
40
|
-
self.quantization_config = QuantizationConfig() if quantization_config is None else quantization_config
|
41
|
-
self.bit_width_config = BitWidthConfig() if bit_width_config is None else bit_width_config
|
42
|
-
self.debug_config = DebugConfig() if debug_config is None else debug_config
|
43
|
-
|
44
|
-
if mixed_precision_config is None:
|
45
|
-
self.mixed_precision_config = MixedPrecisionQuantizationConfig()
|
46
|
-
else:
|
47
|
-
self.mixed_precision_config = mixed_precision_config
|
48
|
-
|
49
|
-
@property
|
50
|
-
def mixed_precision_enable(self):
|
51
|
-
return self.mixed_precision_config is not None and self.mixed_precision_config.mixed_precision_enable
|
52
|
-
|