mct-nightly 2.2.0.20241230.534__tar.gz → 2.2.0.20250102.111338__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mct_nightly-2.2.0.20250102.111338/PKG-INFO +232 -0
- mct_nightly-2.2.0.20250102.111338/mct_nightly.egg-info/PKG-INFO +232 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/mct_nightly.egg-info/SOURCES.txt +2 -1
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/mct_nightly.egg-info/requires.txt +1 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/__init__.py +1 -1
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/schema/v1.py +308 -173
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +22 -22
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +22 -22
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +22 -22
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +21 -21
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +22 -22
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +25 -25
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +23 -23
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py +55 -40
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_keras.py +4 -6
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_pytorch.py +2 -4
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +10 -10
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +49 -46
- mct_nightly-2.2.0.20250102.111338/tests/test_suite.py +202 -0
- mct-nightly-2.2.0.20241230.534/PKG-INFO +0 -220
- mct-nightly-2.2.0.20241230.534/mct_nightly.egg-info/PKG-INFO +0 -220
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/LICENSE.md +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/README.md +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/mct_nightly.egg-info/dependency_links.txt +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/mct_nightly.egg-info/top_level.txt +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/constants.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/analyzer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/framework_info.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/fusion/graph_fuser.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/edge.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/hessian/hessian_scores_calculator.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/hessian/hessian_scores_request.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/matchers/function.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/memory_computation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/model_collector.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/model_validation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/bit_width_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/statistics_correction/apply_activation_bias_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/statistics_correction/compute_activation_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/user_info.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/data_util.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/sigmoid_mul_to_swish.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/hessian/hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/hessian/weights_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/reader/common.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/statistics_correction/keras_compute_activation_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/data_util.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_linear.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scaled_dot_product_attention.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/hessian/hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/hessian/weights_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/statistics_correction/pytorch_compute_activation_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/pytorch/utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/core/runner.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/common/enums.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/keras/image_operations.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/keras/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/pytorch/image_operations.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/pytorch/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/defaultdict.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/common/gptq_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/common/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/common/gradual_activation_quantization.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/common/regularization_factory.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/gptq/runner.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/logger.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/metadata.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/ptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/ptq/runner.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/common/qat_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_weight_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_weight_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/schema/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/schema/mct_current_schema.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/schema/schema_functions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2fw.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2keras.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2pytorch.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/common/annealing_schedulers.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/common/training_method.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/common/util.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/base_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/annealing_schedulers.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/pytorch/annealing_schedulers.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/verify_packages.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/common/core_report_generator.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/common/framework_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/common/model_folding_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/common/similarity_calculator.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/common/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/common/xquant_config.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/keras/keras_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/setup.cfg +0 -0
- {mct-nightly-2.2.0.20241230.534 → mct_nightly-2.2.0.20250102.111338}/setup.py +0 -0
@@ -0,0 +1,232 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: mct-nightly
|
3
|
+
Version: 2.2.0.20250102.111338
|
4
|
+
Summary: A Model Compression Toolkit for neural networks
|
5
|
+
Classifier: Programming Language :: Python :: 3
|
6
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
7
|
+
Classifier: Operating System :: OS Independent
|
8
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
9
|
+
Requires-Python: >=3.6
|
10
|
+
Description-Content-Type: text/markdown
|
11
|
+
License-File: LICENSE.md
|
12
|
+
Requires-Dist: networkx!=2.8.1
|
13
|
+
Requires-Dist: tqdm
|
14
|
+
Requires-Dist: Pillow
|
15
|
+
Requires-Dist: numpy<2.0
|
16
|
+
Requires-Dist: opencv-python
|
17
|
+
Requires-Dist: scikit-image
|
18
|
+
Requires-Dist: scikit-learn
|
19
|
+
Requires-Dist: tensorboard
|
20
|
+
Requires-Dist: PuLP
|
21
|
+
Requires-Dist: matplotlib<3.10.0
|
22
|
+
Requires-Dist: scipy
|
23
|
+
Requires-Dist: protobuf
|
24
|
+
Requires-Dist: mct-quantizers==1.5.2
|
25
|
+
Requires-Dist: pydantic<2.0
|
26
|
+
|
27
|
+
<div align="center" markdown="1">
|
28
|
+
<p>
|
29
|
+
<a href="https://sony.github.io/model_optimization/" target="_blank">
|
30
|
+
<img src="/docsrc/images/mctHeader1-cropped.svg" width="1000"></a>
|
31
|
+
</p>
|
32
|
+
|
33
|
+
______________________________________________________________________
|
34
|
+
|
35
|
+
</div>
|
36
|
+
<div align="center">
|
37
|
+
<p align="center">
|
38
|
+
<a href="#getting-started">Getting Started</a> •
|
39
|
+
<a href="#tutorials-and-examples">Tutorials</a> •
|
40
|
+
<a href="#high-level-features-and-techniques">High level features and techniques</a> •
|
41
|
+
<a href="#resources">Resources</a> •
|
42
|
+
<a href="#contributions">Community</a> •
|
43
|
+
<a href="#license">License</a>
|
44
|
+
</p>
|
45
|
+
<p align="center">
|
46
|
+
<a href="https://sony.github.io/model_optimization#prerequisites"><img src="https://img.shields.io/badge/pytorch-2.1%20%7C%202.2%20%7C%202.3-blue" /></a>
|
47
|
+
<a href="https://sony.github.io/model_optimization#prerequisites"><img src="https://img.shields.io/badge/TensorFlow-2.12%20%7C%202.13%20%7C%202.14%20%7C%202.15-blue" /></a>
|
48
|
+
<a href="https://sony.github.io/model_optimization#prerequisites"><img src="https://img.shields.io/badge/python-3.9%20%7C3.10%20%7C3.11-blue" /></a>
|
49
|
+
<a href="https://github.com/sony/model_optimization/releases"><img src="https://img.shields.io/github/v/release/sony/model_optimization" /></a>
|
50
|
+
<a href="https://github.com/sony/model_optimization/blob/main/LICENSE.md"><img src="https://img.shields.io/badge/license-Apache%202.0-blue" /></a>
|
51
|
+
|
52
|
+
</p>
|
53
|
+
</div>
|
54
|
+
|
55
|
+
__________________________________________________________________________________________________________
|
56
|
+
|
57
|
+
## <div align="center">Getting Started</div>
|
58
|
+
### Quick Installation
|
59
|
+
Pip install the model compression toolkit package in a Python>=3.9 environment with PyTorch>=2.1 or Tensorflow>=2.12.
|
60
|
+
```
|
61
|
+
pip install model-compression-toolkit
|
62
|
+
```
|
63
|
+
For installing the nightly version or installing from source, refer to the [installation guide](https://github.com/sony/model_optimization/blob/main/INSTALLATION.md).
|
64
|
+
|
65
|
+
**Important note**: In order to use MCT, you’ll need to provide a floating point .pt or .keras model as an input.
|
66
|
+
|
67
|
+
### Tutorials and Examples
|
68
|
+
|
69
|
+
Our [tutorials](https://github.com/sony/model_optimization/blob/main/tutorials/README.md) section will walk you through the basics of the MCT tool, covering various compression techniques for both Keras and PyTorch models.
|
70
|
+
Access interactive notebooks for hands-on learning with popular models/tasks or move on to [Resources](#resources) section.
|
71
|
+
|
72
|
+
### Supported Quantization Methods</div>
|
73
|
+
MCT supports various quantization methods as appears below.
|
74
|
+
<div align="center">
|
75
|
+
<p align="center">
|
76
|
+
|
77
|
+
Quantization Method | Complexity | Computational Cost | API | Tutorial
|
78
|
+
-------------------- | -----------|--------------------|---------|--------
|
79
|
+
PTQ (Post Training Quantization) | Low | Low (~1-10 CPU minutes) | [PyTorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_post_training_quantization.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_post_training_quantization.html) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_post_training_quantization.ipynb"><img src="https://img.shields.io/badge/Pytorch-green"/></a> <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_post-training_quantization.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
80
|
+
GPTQ (parameters fine-tuning using gradients) | Moderate | Moderate (~1-3 GPU hours) | [PyTorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_gradient_post_training_quantization.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_gradient_post_training_quantization.html) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenet_gptq.ipynb"><img src="https://img.shields.io/badge/PyTorch-green"/></a> <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_mobilenet_gptq.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
81
|
+
QAT (Quantization Aware Training) | High | High (~12-36 GPU hours) | [QAT API](https://sony.github.io/model_optimization/api/api_docs/index.html#qat) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
82
|
+
|
83
|
+
</p>
|
84
|
+
</div>
|
85
|
+
|
86
|
+
For each flow, **Quantization core** utilizes various algorithms and hyper-parameters for optimal [hardware-aware](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/README.md) quantization results.
|
87
|
+
For further details, please see [Supported features and algorithms](#high-level-features-and-techniques).
|
88
|
+
|
89
|
+
**Required input**: Floating point model - 32bit model in either .pt or .keras format
|
90
|
+
|
91
|
+
**Optional input**: Representative dataset - can be either provided by the user, or generated utilizing the [Data Generation](#data-generation-) capability
|
92
|
+
|
93
|
+
<div align="center">
|
94
|
+
<p align="center">
|
95
|
+
|
96
|
+
<img src="/docsrc/images/mctDiagram_clean.svg" width="800">
|
97
|
+
</p>
|
98
|
+
</div>
|
99
|
+
|
100
|
+
## <div align="center">High level features and techniques</div>
|
101
|
+
|
102
|
+
MCT offers a range of powerful features to optimize models for efficient edge deployment. These supported features include:
|
103
|
+
|
104
|
+
### Quantization Core Features
|
105
|
+
|
106
|
+
🏆 **Mixed-precision search** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mixed_precision_ptq.ipynb). Assigning optimal quantization bit-width per layer (for weights/activations)
|
107
|
+
|
108
|
+
📈 **Graph optimizations**.
|
109
|
+
Transforming the model to be best fitted for quantization process.
|
110
|
+
|
111
|
+
🔎 **Quantization parameter search** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_activation_threshold_search.ipynb). Minimizing expected quantization-noise during thresholds search using methods such as MSE, No-Clipping and MAE.
|
112
|
+
|
113
|
+
🧮 **Advanced quantization algorithms** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_activation_z_score_threshold.ipynb). Enhancing quantization performance for advanced cases is available with some algorithms that can be applied, such as Shift negative correction, Outliers filtering and clustering.
|
114
|
+
__________________________________________________________________________________________________________
|
115
|
+
### Hardware-aware optimization
|
116
|
+
|
117
|
+
🎯 **TPC (Target Platform Capabilities)**. Describes the target hardware’s constrains, for which the model optimization is targeted. See [TPC Readme](./model_compression_toolkit/target_platform_capabilities/README.md) for more information.
|
118
|
+
__________________________________________________________________________________________________________
|
119
|
+
### Data-free quantization (Data Generation) [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_data_generation.ipynb)
|
120
|
+
Generates synthetic images based on the statistics stored in the model's batch normalization layers, according to your specific needs, for when image data isn’t available. See [Data Generation Library](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/data_generation/README.md) for more.
|
121
|
+
The specifications of the method are detailed in the paper: _"**Data Generation for Hardware-Friendly Post-Training Quantization**"_ [5].
|
122
|
+
__________________________________________________________________________________________________________
|
123
|
+
### Structured Pruning [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_pruning_mnist.ipynb)
|
124
|
+
Reduces model size/complexity and ensures better channels utilization by removing redundant input channels from layers and reconstruction of layer weights. Read more ([Pytorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_pruning_experimental.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_pruning_experimental.html)).
|
125
|
+
__________________________________________________________________________________________________________
|
126
|
+
### **Debugging and Visualization**
|
127
|
+
**🎛️ Network Editor (Modify Quantization Configurations)** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_network_editor.ipynb).
|
128
|
+
Modify your model's quantization configuration for specific layers or apply a custom edit rule (e.g adjust layer's bit-width) using MCT’s network editor.
|
129
|
+
|
130
|
+
**🖥️ Visualization**. Observe useful information for troubleshooting the quantized model's performance using TensorBoard. [Read more](https://sony.github.io/model_optimization/guidelines/visualization.html).
|
131
|
+
|
132
|
+
**🔑 XQuant (Explainable Quantization)** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_xquant.ipynb). Get valuable insights regarding the quality and success of the quantization process of your model. The report includes histograms and similarity metrics between the original float model and the quantized model in key points of the model. The report can be visualized using TensorBoard.
|
133
|
+
__________________________________________________________________________________________________________
|
134
|
+
### Enhanced Post-Training Quantization (EPTQ)
|
135
|
+
As part of the GPTQ capability, we provide an advanced optimization algorithm called EPTQ.
|
136
|
+
The specifications of the algorithm are detailed in the paper: _"**EPTQ: Enhanced Post-Training Quantization via Hessian-guided Network-wise Optimization**"_ [4].
|
137
|
+
More details on how to use EPTQ via MCT can be found in the [GPTQ guidelines](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/gptq/README.md).
|
138
|
+
|
139
|
+
## <div align="center">Resources</div>
|
140
|
+
* [User Guide](https://sony.github.io/model_optimization/index.html) contains detailed information about MCT and guides you from installation through optimizing models for your edge AI applications.
|
141
|
+
|
142
|
+
* MCT's [API Docs](https://sony.github.io/model_optimization/api/api_docs/) is separated per quantization methods:
|
143
|
+
|
144
|
+
* [Post-training quantization](https://sony.github.io/model_optimization/api/api_docs/index.html#ptq) | PTQ API docs
|
145
|
+
* [Gradient-based post-training quantization](https://sony.github.io/model_optimization/api/api_docs/index.html#gptq) | GPTQ API docs
|
146
|
+
* [Quantization-aware training](https://sony.github.io/model_optimization/api/api_docs/index.html#qat) | QAT API docs
|
147
|
+
|
148
|
+
* [Debug](https://sony.github.io/model_optimization/guidelines/visualization.html) – modify optimization process or generate an explainable report
|
149
|
+
|
150
|
+
* [Release notes](https://github.com/sony/model_optimization/releases)
|
151
|
+
|
152
|
+
|
153
|
+
## <div align="center">Supported Versions</div>
|
154
|
+
|
155
|
+
Currently, MCT is being tested on various Python, Pytorch and TensorFlow versions:
|
156
|
+
<details id="supported-versions">
|
157
|
+
<summary>Supported Versions Table</summary>
|
158
|
+
|
159
|
+
| | PyTorch 2.2 | PyTorch 2.3 | PyTorch 2.4 | PyTorch 2.5 |
|
160
|
+
|-------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
161
|
+
| Python 3.9 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch24.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch25.yml) |
|
162
|
+
| Python 3.10 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch24.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch25.yml) |
|
163
|
+
| Python 3.11 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch24.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch25.yml) |
|
164
|
+
| Python 3.12 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python312_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python312_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python312_pytorch24.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python312_pytorch25.yml) |
|
165
|
+
|
166
|
+
| | TensorFlow 2.12 | TensorFlow 2.13 | TensorFlow 2.14 | TensorFlow 2.15 |
|
167
|
+
|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
168
|
+
| Python 3.9 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_keras212.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_keras213.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_keras214.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_keras215.yml) |
|
169
|
+
| Python 3.10 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_keras212.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_keras213.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_keras214.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_keras215.yml) |
|
170
|
+
| Python 3.11 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_keras212.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_keras213.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_keras214.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_keras215.yml) |
|
171
|
+
|
172
|
+
</details>
|
173
|
+
|
174
|
+
## <div align="center">Results</div>
|
175
|
+
|
176
|
+
<p align="center">
|
177
|
+
<img src="/docsrc/images/Classification.png" width="200">
|
178
|
+
<img src="/docsrc/images/SemSeg.png" width="200">
|
179
|
+
<img src="/docsrc/images/PoseEst.png" width="200">
|
180
|
+
<img src="/docsrc/images/ObjDet.png" width="200">
|
181
|
+
|
182
|
+
MCT can quantize an existing 32-bit floating-point model to an 8-bit fixed-point (or less) model without compromising accuracy.
|
183
|
+
Below is a graph of [MobileNetV2](https://pytorch.org/vision/main/models/generated/torchvision.models.mobilenet_v2.html) accuracy on ImageNet vs average bit-width of weights (X-axis), using **single-precision** quantization, **mixed-precision** quantization, and mixed-precision quantization with GPTQ.
|
184
|
+
|
185
|
+
<p align="center">
|
186
|
+
<img src="/docsrc/images/torch_mobilenetv2.png" width="800">
|
187
|
+
|
188
|
+
For more results, please see [1]
|
189
|
+
|
190
|
+
|
191
|
+
### Pruning Results
|
192
|
+
|
193
|
+
Results for applying pruning to reduce the parameters of the following models by 50%:
|
194
|
+
|
195
|
+
| Model | Dense Model Accuracy | Pruned Model Accuracy |
|
196
|
+
|-----------------|----------------------|-----------------------|
|
197
|
+
| ResNet50 [2] | 75.1 | 72.4 |
|
198
|
+
| DenseNet121 [3] | 74.44 | 71.71 |
|
199
|
+
|
200
|
+
## <div align="center">Troubleshooting and Community</div>
|
201
|
+
|
202
|
+
If you encountered a large accuracy degradation with MCT, check out the [Quantization Troubleshooting](https://github.com/sony/model_optimization/tree/main/quantization_troubleshooting.md)
|
203
|
+
for common pitfalls and some tools to improve the quantized model's accuracy.
|
204
|
+
|
205
|
+
Check out the [FAQ](https://github.com/sony/model_optimization/tree/main/FAQ.md) for common issues.
|
206
|
+
|
207
|
+
You are welcome to ask questions and get support on our [issues section](https://github.com/sony/model_optimization/issues) and manage community discussions under the [discussions section](https://github.com/sony/model_optimization/discussions).
|
208
|
+
|
209
|
+
|
210
|
+
## <div align="center">Contributions</div>
|
211
|
+
We'd love your input! MCT would not be possible without help from our community, and welcomes contributions from anyone!
|
212
|
+
|
213
|
+
*Checkout our [Contribution guide](https://github.com/sony/model_optimization/blob/main/CONTRIBUTING.md) for more details.
|
214
|
+
|
215
|
+
Thank you 🙏 to all our contributors!
|
216
|
+
|
217
|
+
## <div align="center">License</div>
|
218
|
+
MCT is licensed under Apache License Version 2.0. By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms.
|
219
|
+
|
220
|
+
<a href="https://github.com/sony/model_optimization/blob/main/LICENSE.md"><img src="https://img.shields.io/badge/license-Apache%202.0-blue" /></a>
|
221
|
+
|
222
|
+
## <div align="center">References</div>
|
223
|
+
|
224
|
+
[1] Habi, H.V., Peretz, R., Cohen, E., Dikstein, L., Dror, O., Diamant, I., Jennings, R.H. and Netzer, A., 2021. [HPTQ: Hardware-Friendly Post Training Quantization. arXiv preprint](https://arxiv.org/abs/2109.09113).
|
225
|
+
|
226
|
+
[2] [Keras Applications](https://keras.io/api/applications/)
|
227
|
+
|
228
|
+
[3] [TORCHVISION.MODELS](https://pytorch.org/vision/stable/models.html)
|
229
|
+
|
230
|
+
[4] Gordon, O., Cohen, E., Habi, H. V., & Netzer, A., 2024. [EPTQ: Enhanced Post-Training Quantization via Hessian-guided Network-wise Optimization, European Conference on Computer Vision Workshop 2024, Computational Aspects of Deep Learning (CADL)](https://arxiv.org/abs/2309.11531)
|
231
|
+
|
232
|
+
[5] Dikstein, L., Lapid, A., Netzer, A., & Habi, H. V., 2024. [Data Generation for Hardware-Friendly Post-Training Quantization, Accepted to IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) 2025](https://arxiv.org/abs/2410.22110)
|
@@ -0,0 +1,232 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: mct-nightly
|
3
|
+
Version: 2.2.0.20250102.111338
|
4
|
+
Summary: A Model Compression Toolkit for neural networks
|
5
|
+
Classifier: Programming Language :: Python :: 3
|
6
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
7
|
+
Classifier: Operating System :: OS Independent
|
8
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
9
|
+
Requires-Python: >=3.6
|
10
|
+
Description-Content-Type: text/markdown
|
11
|
+
License-File: LICENSE.md
|
12
|
+
Requires-Dist: networkx!=2.8.1
|
13
|
+
Requires-Dist: tqdm
|
14
|
+
Requires-Dist: Pillow
|
15
|
+
Requires-Dist: numpy<2.0
|
16
|
+
Requires-Dist: opencv-python
|
17
|
+
Requires-Dist: scikit-image
|
18
|
+
Requires-Dist: scikit-learn
|
19
|
+
Requires-Dist: tensorboard
|
20
|
+
Requires-Dist: PuLP
|
21
|
+
Requires-Dist: matplotlib<3.10.0
|
22
|
+
Requires-Dist: scipy
|
23
|
+
Requires-Dist: protobuf
|
24
|
+
Requires-Dist: mct-quantizers==1.5.2
|
25
|
+
Requires-Dist: pydantic<2.0
|
26
|
+
|
27
|
+
<div align="center" markdown="1">
|
28
|
+
<p>
|
29
|
+
<a href="https://sony.github.io/model_optimization/" target="_blank">
|
30
|
+
<img src="/docsrc/images/mctHeader1-cropped.svg" width="1000"></a>
|
31
|
+
</p>
|
32
|
+
|
33
|
+
______________________________________________________________________
|
34
|
+
|
35
|
+
</div>
|
36
|
+
<div align="center">
|
37
|
+
<p align="center">
|
38
|
+
<a href="#getting-started">Getting Started</a> •
|
39
|
+
<a href="#tutorials-and-examples">Tutorials</a> •
|
40
|
+
<a href="#high-level-features-and-techniques">High level features and techniques</a> •
|
41
|
+
<a href="#resources">Resources</a> •
|
42
|
+
<a href="#contributions">Community</a> •
|
43
|
+
<a href="#license">License</a>
|
44
|
+
</p>
|
45
|
+
<p align="center">
|
46
|
+
<a href="https://sony.github.io/model_optimization#prerequisites"><img src="https://img.shields.io/badge/pytorch-2.1%20%7C%202.2%20%7C%202.3-blue" /></a>
|
47
|
+
<a href="https://sony.github.io/model_optimization#prerequisites"><img src="https://img.shields.io/badge/TensorFlow-2.12%20%7C%202.13%20%7C%202.14%20%7C%202.15-blue" /></a>
|
48
|
+
<a href="https://sony.github.io/model_optimization#prerequisites"><img src="https://img.shields.io/badge/python-3.9%20%7C3.10%20%7C3.11-blue" /></a>
|
49
|
+
<a href="https://github.com/sony/model_optimization/releases"><img src="https://img.shields.io/github/v/release/sony/model_optimization" /></a>
|
50
|
+
<a href="https://github.com/sony/model_optimization/blob/main/LICENSE.md"><img src="https://img.shields.io/badge/license-Apache%202.0-blue" /></a>
|
51
|
+
|
52
|
+
</p>
|
53
|
+
</div>
|
54
|
+
|
55
|
+
__________________________________________________________________________________________________________
|
56
|
+
|
57
|
+
## <div align="center">Getting Started</div>
|
58
|
+
### Quick Installation
|
59
|
+
Pip install the model compression toolkit package in a Python>=3.9 environment with PyTorch>=2.1 or Tensorflow>=2.12.
|
60
|
+
```
|
61
|
+
pip install model-compression-toolkit
|
62
|
+
```
|
63
|
+
For installing the nightly version or installing from source, refer to the [installation guide](https://github.com/sony/model_optimization/blob/main/INSTALLATION.md).
|
64
|
+
|
65
|
+
**Important note**: In order to use MCT, you’ll need to provide a floating point .pt or .keras model as an input.
|
66
|
+
|
67
|
+
### Tutorials and Examples
|
68
|
+
|
69
|
+
Our [tutorials](https://github.com/sony/model_optimization/blob/main/tutorials/README.md) section will walk you through the basics of the MCT tool, covering various compression techniques for both Keras and PyTorch models.
|
70
|
+
Access interactive notebooks for hands-on learning with popular models/tasks or move on to [Resources](#resources) section.
|
71
|
+
|
72
|
+
### Supported Quantization Methods</div>
|
73
|
+
MCT supports various quantization methods as appears below.
|
74
|
+
<div align="center">
|
75
|
+
<p align="center">
|
76
|
+
|
77
|
+
Quantization Method | Complexity | Computational Cost | API | Tutorial
|
78
|
+
-------------------- | -----------|--------------------|---------|--------
|
79
|
+
PTQ (Post Training Quantization) | Low | Low (~1-10 CPU minutes) | [PyTorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_post_training_quantization.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_post_training_quantization.html) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_post_training_quantization.ipynb"><img src="https://img.shields.io/badge/Pytorch-green"/></a> <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_post-training_quantization.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
80
|
+
GPTQ (parameters fine-tuning using gradients) | Moderate | Moderate (~1-3 GPU hours) | [PyTorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_gradient_post_training_quantization.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_gradient_post_training_quantization.html) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenet_gptq.ipynb"><img src="https://img.shields.io/badge/PyTorch-green"/></a> <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_mobilenet_gptq.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
81
|
+
QAT (Quantization Aware Training) | High | High (~12-36 GPU hours) | [QAT API](https://sony.github.io/model_optimization/api/api_docs/index.html#qat) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
82
|
+
|
83
|
+
</p>
|
84
|
+
</div>
|
85
|
+
|
86
|
+
For each flow, **Quantization core** utilizes various algorithms and hyper-parameters for optimal [hardware-aware](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/README.md) quantization results.
|
87
|
+
For further details, please see [Supported features and algorithms](#high-level-features-and-techniques).
|
88
|
+
|
89
|
+
**Required input**: Floating point model - 32bit model in either .pt or .keras format
|
90
|
+
|
91
|
+
**Optional input**: Representative dataset - can be either provided by the user, or generated utilizing the [Data Generation](#data-generation-) capability
|
92
|
+
|
93
|
+
<div align="center">
|
94
|
+
<p align="center">
|
95
|
+
|
96
|
+
<img src="/docsrc/images/mctDiagram_clean.svg" width="800">
|
97
|
+
</p>
|
98
|
+
</div>
|
99
|
+
|
100
|
+
## <div align="center">High level features and techniques</div>
|
101
|
+
|
102
|
+
MCT offers a range of powerful features to optimize models for efficient edge deployment. These supported features include:
|
103
|
+
|
104
|
+
### Quantization Core Features
|
105
|
+
|
106
|
+
🏆 **Mixed-precision search** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mixed_precision_ptq.ipynb). Assigning optimal quantization bit-width per layer (for weights/activations)
|
107
|
+
|
108
|
+
📈 **Graph optimizations**.
|
109
|
+
Transforming the model to be best fitted for quantization process.
|
110
|
+
|
111
|
+
🔎 **Quantization parameter search** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_activation_threshold_search.ipynb). Minimizing expected quantization-noise during thresholds search using methods such as MSE, No-Clipping and MAE.
|
112
|
+
|
113
|
+
🧮 **Advanced quantization algorithms** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_activation_z_score_threshold.ipynb). Enhancing quantization performance for advanced cases is available with some algorithms that can be applied, such as Shift negative correction, Outliers filtering and clustering.
|
114
|
+
__________________________________________________________________________________________________________
|
115
|
+
### Hardware-aware optimization
|
116
|
+
|
117
|
+
🎯 **TPC (Target Platform Capabilities)**. Describes the target hardware’s constrains, for which the model optimization is targeted. See [TPC Readme](./model_compression_toolkit/target_platform_capabilities/README.md) for more information.
|
118
|
+
__________________________________________________________________________________________________________
|
119
|
+
### Data-free quantization (Data Generation) [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_data_generation.ipynb)
|
120
|
+
Generates synthetic images based on the statistics stored in the model's batch normalization layers, according to your specific needs, for when image data isn’t available. See [Data Generation Library](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/data_generation/README.md) for more.
|
121
|
+
The specifications of the method are detailed in the paper: _"**Data Generation for Hardware-Friendly Post-Training Quantization**"_ [5].
|
122
|
+
__________________________________________________________________________________________________________
|
123
|
+
### Structured Pruning [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_pruning_mnist.ipynb)
|
124
|
+
Reduces model size/complexity and ensures better channels utilization by removing redundant input channels from layers and reconstruction of layer weights. Read more ([Pytorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_pruning_experimental.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_pruning_experimental.html)).
|
125
|
+
__________________________________________________________________________________________________________
|
126
|
+
### **Debugging and Visualization**
|
127
|
+
**🎛️ Network Editor (Modify Quantization Configurations)** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_network_editor.ipynb).
|
128
|
+
Modify your model's quantization configuration for specific layers or apply a custom edit rule (e.g adjust layer's bit-width) using MCT’s network editor.
|
129
|
+
|
130
|
+
**🖥️ Visualization**. Observe useful information for troubleshooting the quantized model's performance using TensorBoard. [Read more](https://sony.github.io/model_optimization/guidelines/visualization.html).
|
131
|
+
|
132
|
+
**🔑 XQuant (Explainable Quantization)** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_xquant.ipynb). Get valuable insights regarding the quality and success of the quantization process of your model. The report includes histograms and similarity metrics between the original float model and the quantized model in key points of the model. The report can be visualized using TensorBoard.
|
133
|
+
__________________________________________________________________________________________________________
|
134
|
+
### Enhanced Post-Training Quantization (EPTQ)
|
135
|
+
As part of the GPTQ capability, we provide an advanced optimization algorithm called EPTQ.
|
136
|
+
The specifications of the algorithm are detailed in the paper: _"**EPTQ: Enhanced Post-Training Quantization via Hessian-guided Network-wise Optimization**"_ [4].
|
137
|
+
More details on how to use EPTQ via MCT can be found in the [GPTQ guidelines](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/gptq/README.md).
|
138
|
+
|
139
|
+
## <div align="center">Resources</div>
|
140
|
+
* [User Guide](https://sony.github.io/model_optimization/index.html) contains detailed information about MCT and guides you from installation through optimizing models for your edge AI applications.
|
141
|
+
|
142
|
+
* MCT's [API Docs](https://sony.github.io/model_optimization/api/api_docs/) is separated per quantization methods:
|
143
|
+
|
144
|
+
* [Post-training quantization](https://sony.github.io/model_optimization/api/api_docs/index.html#ptq) | PTQ API docs
|
145
|
+
* [Gradient-based post-training quantization](https://sony.github.io/model_optimization/api/api_docs/index.html#gptq) | GPTQ API docs
|
146
|
+
* [Quantization-aware training](https://sony.github.io/model_optimization/api/api_docs/index.html#qat) | QAT API docs
|
147
|
+
|
148
|
+
* [Debug](https://sony.github.io/model_optimization/guidelines/visualization.html) – modify optimization process or generate an explainable report
|
149
|
+
|
150
|
+
* [Release notes](https://github.com/sony/model_optimization/releases)
|
151
|
+
|
152
|
+
|
153
|
+
## <div align="center">Supported Versions</div>
|
154
|
+
|
155
|
+
Currently, MCT is being tested on various Python, Pytorch and TensorFlow versions:
|
156
|
+
<details id="supported-versions">
|
157
|
+
<summary>Supported Versions Table</summary>
|
158
|
+
|
159
|
+
| | PyTorch 2.2 | PyTorch 2.3 | PyTorch 2.4 | PyTorch 2.5 |
|
160
|
+
|-------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
161
|
+
| Python 3.9 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch24.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch25.yml) |
|
162
|
+
| Python 3.10 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch24.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch25.yml) |
|
163
|
+
| Python 3.11 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch24.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch25.yml) |
|
164
|
+
| Python 3.12 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python312_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python312_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python312_pytorch24.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python312_pytorch25.yml) |
|
165
|
+
|
166
|
+
| | TensorFlow 2.12 | TensorFlow 2.13 | TensorFlow 2.14 | TensorFlow 2.15 |
|
167
|
+
|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
168
|
+
| Python 3.9 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_keras212.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_keras213.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_keras214.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_keras215.yml) |
|
169
|
+
| Python 3.10 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_keras212.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_keras213.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_keras214.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_keras215.yml) |
|
170
|
+
| Python 3.11 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_keras212.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_keras213.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_keras214.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_keras215.yml) |
|
171
|
+
|
172
|
+
</details>
|
173
|
+
|
174
|
+
## <div align="center">Results</div>
|
175
|
+
|
176
|
+
<p align="center">
|
177
|
+
<img src="/docsrc/images/Classification.png" width="200">
|
178
|
+
<img src="/docsrc/images/SemSeg.png" width="200">
|
179
|
+
<img src="/docsrc/images/PoseEst.png" width="200">
|
180
|
+
<img src="/docsrc/images/ObjDet.png" width="200">
|
181
|
+
|
182
|
+
MCT can quantize an existing 32-bit floating-point model to an 8-bit fixed-point (or less) model without compromising accuracy.
|
183
|
+
Below is a graph of [MobileNetV2](https://pytorch.org/vision/main/models/generated/torchvision.models.mobilenet_v2.html) accuracy on ImageNet vs average bit-width of weights (X-axis), using **single-precision** quantization, **mixed-precision** quantization, and mixed-precision quantization with GPTQ.
|
184
|
+
|
185
|
+
<p align="center">
|
186
|
+
<img src="/docsrc/images/torch_mobilenetv2.png" width="800">
|
187
|
+
|
188
|
+
For more results, please see [1]
|
189
|
+
|
190
|
+
|
191
|
+
### Pruning Results
|
192
|
+
|
193
|
+
Results for applying pruning to reduce the parameters of the following models by 50%:
|
194
|
+
|
195
|
+
| Model | Dense Model Accuracy | Pruned Model Accuracy |
|
196
|
+
|-----------------|----------------------|-----------------------|
|
197
|
+
| ResNet50 [2] | 75.1 | 72.4 |
|
198
|
+
| DenseNet121 [3] | 74.44 | 71.71 |
|
199
|
+
|
200
|
+
## <div align="center">Troubleshooting and Community</div>
|
201
|
+
|
202
|
+
If you encountered a large accuracy degradation with MCT, check out the [Quantization Troubleshooting](https://github.com/sony/model_optimization/tree/main/quantization_troubleshooting.md)
|
203
|
+
for common pitfalls and some tools to improve the quantized model's accuracy.
|
204
|
+
|
205
|
+
Check out the [FAQ](https://github.com/sony/model_optimization/tree/main/FAQ.md) for common issues.
|
206
|
+
|
207
|
+
You are welcome to ask questions and get support on our [issues section](https://github.com/sony/model_optimization/issues) and manage community discussions under the [discussions section](https://github.com/sony/model_optimization/discussions).
|
208
|
+
|
209
|
+
|
210
|
+
## <div align="center">Contributions</div>
|
211
|
+
We'd love your input! MCT would not be possible without help from our community, and welcomes contributions from anyone!
|
212
|
+
|
213
|
+
*Checkout our [Contribution guide](https://github.com/sony/model_optimization/blob/main/CONTRIBUTING.md) for more details.
|
214
|
+
|
215
|
+
Thank you 🙏 to all our contributors!
|
216
|
+
|
217
|
+
## <div align="center">License</div>
|
218
|
+
MCT is licensed under Apache License Version 2.0. By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms.
|
219
|
+
|
220
|
+
<a href="https://github.com/sony/model_optimization/blob/main/LICENSE.md"><img src="https://img.shields.io/badge/license-Apache%202.0-blue" /></a>
|
221
|
+
|
222
|
+
## <div align="center">References</div>
|
223
|
+
|
224
|
+
[1] Habi, H.V., Peretz, R., Cohen, E., Dikstein, L., Dror, O., Diamant, I., Jennings, R.H. and Netzer, A., 2021. [HPTQ: Hardware-Friendly Post Training Quantization. arXiv preprint](https://arxiv.org/abs/2109.09113).
|
225
|
+
|
226
|
+
[2] [Keras Applications](https://keras.io/api/applications/)
|
227
|
+
|
228
|
+
[3] [TORCHVISION.MODELS](https://pytorch.org/vision/stable/models.html)
|
229
|
+
|
230
|
+
[4] Gordon, O., Cohen, E., Habi, H. V., & Netzer, A., 2024. [EPTQ: Enhanced Post-Training Quantization via Hessian-guided Network-wise Optimization, European Conference on Computer Vision Workshop 2024, Computational Aspects of Deep Learning (CADL)](https://arxiv.org/abs/2309.11531)
|
231
|
+
|
232
|
+
[5] Dikstein, L., Lapid, A., Netzer, A., & Habi, H. V., 2024. [Data Generation for Hardware-Friendly Post-Training Quantization, Accepted to IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) 2025](https://arxiv.org/abs/2410.22110)
|
@@ -568,4 +568,5 @@ model_compression_toolkit/xquant/pytorch/facade_xquant_report.py
|
|
568
568
|
model_compression_toolkit/xquant/pytorch/model_analyzer.py
|
569
569
|
model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py
|
570
570
|
model_compression_toolkit/xquant/pytorch/similarity_functions.py
|
571
|
-
model_compression_toolkit/xquant/pytorch/tensorboard_utils.py
|
571
|
+
model_compression_toolkit/xquant/pytorch/tensorboard_utils.py
|
572
|
+
tests/test_suite.py
|
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
|
|
27
27
|
from model_compression_toolkit import pruning
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
29
29
|
|
30
|
-
__version__ = "2.2.0.
|
30
|
+
__version__ = "2.2.0.20250102.111338"
|