mct-nightly 2.2.0.20241012.448__tar.gz → 2.2.0.20241018.449__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/PKG-INFO +1 -1
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/mct_nightly.egg-info/PKG-INFO +1 -1
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/mct_nightly.egg-info/SOURCES.txt +9 -1
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/__init__.py +1 -1
- mct-nightly-2.2.0.20241012.448/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py → mct-nightly-2.2.0.20241018.449/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_weight_quantizer.py +3 -13
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +5 -126
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +4 -121
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +7 -6
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +4 -119
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +5 -95
- mct-nightly-2.2.0.20241018.449/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/__init__.py +20 -0
- mct-nightly-2.2.0.20241018.449/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/base_activation_quantizer.py +22 -0
- mct-nightly-2.2.0.20241018.449/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py +127 -0
- mct-nightly-2.2.0.20241018.449/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/uniform_lsq.py +129 -0
- mct-nightly-2.2.0.20241018.449/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/symmetric_ste.py +148 -0
- mct-nightly-2.2.0.20241018.449/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/uniform_ste.py +122 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +12 -10
- mct-nightly-2.2.0.20241018.449/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +108 -0
- mct-nightly-2.2.0.20241018.449/tests_pytest/pytorch/gptq/__init__.py +14 -0
- mct-nightly-2.2.0.20241018.449/tests_pytest/pytorch/trainable_infrastructure/__init__.py +14 -0
- mct-nightly-2.2.0.20241012.448/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -48
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/LICENSE.md +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/README.md +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/mct_nightly.egg-info/dependency_links.txt +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/mct_nightly.egg-info/requires.txt +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/mct_nightly.egg-info/top_level.txt +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/constants.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/analyzer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/framework_info.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/fusion/graph_fuser.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/edge.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/hessian/hessian_scores_calculator.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/hessian/hessian_scores_request.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/matchers/function.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/memory_computation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/model_collector.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/model_validation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/bit_width_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/user_info.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/sigmoid_mul_to_swish.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/hessian/hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/hessian/weights_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/reader/common.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scaled_dot_product_attention.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/hessian/hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/hessian/weights_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/pytorch/utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/core/runner.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/common/enums.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/keras/image_operations.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/keras/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/pytorch/image_operations.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/pytorch/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/defaultdict.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/common/gptq_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/common/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/quantizer/gradual_activation_quantization.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/gptq/runner.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/logger.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/metadata.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/ptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/ptq/runner.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/common/qat_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_weight_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/common/training_method.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448/model_compression_toolkit/trainable_infrastructure/pytorch → mct-nightly-2.2.0.20241018.449/model_compression_toolkit/trainable_infrastructure/keras}/activation_quantizers/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448/model_compression_toolkit/trainable_infrastructure/pytorch → mct-nightly-2.2.0.20241018.449/model_compression_toolkit/trainable_infrastructure/keras}/activation_quantizers/ste/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448/tests_pytest → mct-nightly-2.2.0.20241018.449/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq}/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241012.448/tests_pytest/pytorch → mct-nightly-2.2.0.20241018.449/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste}/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/pytorch/annealing_schedulers.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/trainable_infrastructure/pytorch/util.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/verify_packages.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/common/core_report_generator.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/common/framework_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/common/model_folding_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/common/similarity_calculator.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/common/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/common/xquant_config.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/keras/keras_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/setup.cfg +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/setup.py +0 -0
- {mct-nightly-2.2.0.20241012.448/tests_pytest/pytorch/gptq → mct-nightly-2.2.0.20241018.449/tests_pytest}/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448/tests_pytest/pytorch/trainable_infrastructure → mct-nightly-2.2.0.20241018.449/tests_pytest/pytorch}/__init__.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/tests_pytest/pytorch/gptq/test_annealing_cfg.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/tests_pytest/pytorch/gptq/test_gradual_act_quantization.py +0 -0
- {mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/tests_pytest/pytorch/trainable_infrastructure/test_linear_annealing.py +0 -0
{mct-nightly-2.2.0.20241012.448 → mct-nightly-2.2.0.20241018.449}/mct_nightly.egg-info/SOURCES.txt
RENAMED
@@ -411,7 +411,7 @@ model_compression_toolkit/qat/common/qat_config.py
|
|
411
411
|
model_compression_toolkit/qat/keras/__init__.py
|
412
412
|
model_compression_toolkit/qat/keras/quantization_facade.py
|
413
413
|
model_compression_toolkit/qat/keras/quantizer/__init__.py
|
414
|
-
model_compression_toolkit/qat/keras/quantizer/
|
414
|
+
model_compression_toolkit/qat/keras/quantizer/base_keras_qat_weight_quantizer.py
|
415
415
|
model_compression_toolkit/qat/keras/quantizer/quant_utils.py
|
416
416
|
model_compression_toolkit/qat/keras/quantizer/quantization_builder.py
|
417
417
|
model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py
|
@@ -514,6 +514,14 @@ model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py
|
|
514
514
|
model_compression_toolkit/trainable_infrastructure/keras/load_model.py
|
515
515
|
model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py
|
516
516
|
model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py
|
517
|
+
model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/__init__.py
|
518
|
+
model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/base_activation_quantizer.py
|
519
|
+
model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/__init__.py
|
520
|
+
model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py
|
521
|
+
model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/uniform_lsq.py
|
522
|
+
model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/__init__.py
|
523
|
+
model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/symmetric_ste.py
|
524
|
+
model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/uniform_ste.py
|
517
525
|
model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py
|
518
526
|
model_compression_toolkit/trainable_infrastructure/pytorch/annealing_schedulers.py
|
519
527
|
model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py
|
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
|
|
27
27
|
from model_compression_toolkit import pruning
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
29
29
|
|
30
|
-
__version__ = "2.2.0.
|
30
|
+
__version__ = "2.2.0.20241018.000449"
|
@@ -22,24 +22,14 @@ from model_compression_toolkit.trainable_infrastructure import TrainableQuantize
|
|
22
22
|
|
23
23
|
if FOUND_TF:
|
24
24
|
|
25
|
-
class
|
25
|
+
class BaseKerasQATWeightTrainableQuantizer(BaseKerasTrainableQuantizer):
|
26
26
|
"""
|
27
27
|
A base class for trainable Keras quantizer for QAT.
|
28
28
|
"""
|
29
|
-
|
30
|
-
def __init__(self,
|
31
|
-
quantization_config: Union[TrainableQuantizerWeightsConfig, TrainableQuantizerActivationConfig]):
|
32
|
-
"""
|
33
|
-
Initializes BaseKerasQATTrainableQuantizer object.
|
34
|
-
|
35
|
-
Args:
|
36
|
-
quantization_config: quantizer config class contains all the information about a quantizer configuration.
|
37
|
-
"""
|
38
|
-
|
39
|
-
super().__init__(quantization_config)
|
29
|
+
pass
|
40
30
|
|
41
31
|
else: # pragma: no cover
|
42
|
-
class
|
32
|
+
class BaseKerasQATWeightTrainableQuantizer(BaseKerasTrainableQuantizer):
|
43
33
|
def __init__(self,
|
44
34
|
quantization_config: Union[TrainableQuantizerWeightsConfig, TrainableQuantizerActivationConfig]):
|
45
35
|
|
@@ -28,47 +28,18 @@ from mct_quantizers import QuantizationTarget, mark_quantizer
|
|
28
28
|
from model_compression_toolkit.qat.common import THRESHOLD_TENSOR
|
29
29
|
from model_compression_toolkit import constants as C
|
30
30
|
|
31
|
-
from model_compression_toolkit.qat.keras.quantizer.
|
32
|
-
from model_compression_toolkit.trainable_infrastructure import TrainableQuantizerWeightsConfig,
|
33
|
-
|
34
|
-
from mct_quantizers.keras.quantizers import WeightsPOTInferableQuantizer, WeightsSymmetricInferableQuantizer, \
|
35
|
-
ActivationPOTInferableQuantizer, ActivationSymmetricInferableQuantizer
|
31
|
+
from model_compression_toolkit.qat.keras.quantizer.base_keras_qat_weight_quantizer import BaseKerasQATWeightTrainableQuantizer
|
32
|
+
from model_compression_toolkit.trainable_infrastructure import TrainableQuantizerWeightsConfig, TrainableQuantizerActivationConfig
|
33
|
+
from mct_quantizers.keras.quantizers import WeightsPOTInferableQuantizer, WeightsSymmetricInferableQuantizer, ActivationPOTInferableQuantizer, ActivationSymmetricInferableQuantizer
|
36
34
|
from model_compression_toolkit.trainable_infrastructure.common.base_trainable_quantizer import VariableGroup
|
37
35
|
from model_compression_toolkit.qat.keras.quantizer.quant_utils import ste_round, grad_scale
|
38
|
-
|
39
|
-
|
40
|
-
def symmetric_lsq_quantizer(x: tf.Tensor,
|
41
|
-
thresholds: tf.Tensor,
|
42
|
-
num_bits: int,
|
43
|
-
sign: bool,
|
44
|
-
min_int: int,
|
45
|
-
max_int:int,
|
46
|
-
scale_factor: float) -> tf.Tensor:
|
47
|
-
"""
|
48
|
-
Symmetric quantizer according to LSQ algorithm: https://arxiv.org/pdf/1902.08153.pdf
|
49
|
-
Args:
|
50
|
-
x: input to quantize
|
51
|
-
thresholds: thresholds of quantization levels
|
52
|
-
num_bits: number of bits for quantization
|
53
|
-
sign: whether x is signed or not
|
54
|
-
min_int: min clipping integer value
|
55
|
-
max_int: max clipping integer value
|
56
|
-
scale_factor: grad scale of LSQ algorithm
|
57
|
-
Returns:
|
58
|
-
A quantized tensor
|
59
|
-
"""
|
60
|
-
delta = thresholds / (2 ** (num_bits - int(sign)))
|
61
|
-
delta_scaled = grad_scale(delta, scale_factor)
|
62
|
-
rounded = ste_round(x / delta_scaled)
|
63
|
-
clipped = tf.math.minimum(tf.math.maximum(rounded, min_int), max_int)
|
64
|
-
quantized = delta_scaled * clipped
|
65
|
-
return quantized
|
36
|
+
from model_compression_toolkit.trainable_infrastructure.keras.quantizer_utils import symmetric_lsq_quantizer
|
66
37
|
|
67
38
|
|
68
39
|
@mark_quantizer(quantization_target=QuantizationTarget.Weights,
|
69
40
|
quantization_method=[QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC],
|
70
41
|
identifier=TrainingMethod.LSQ)
|
71
|
-
class LSQWeightQATQuantizer(
|
42
|
+
class LSQWeightQATQuantizer(BaseKerasQATWeightTrainableQuantizer):
|
72
43
|
"""
|
73
44
|
Trainable constrained quantizer to quantize layer's weights.
|
74
45
|
"""
|
@@ -159,95 +130,3 @@ class LSQWeightQATQuantizer(BaseKerasQATTrainableQuantizer):
|
|
159
130
|
input_rank=len(self.threshold_shape))
|
160
131
|
|
161
132
|
|
162
|
-
@mark_quantizer(quantization_target=QuantizationTarget.Activation,
|
163
|
-
quantization_method=[QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC],
|
164
|
-
identifier=TrainingMethod.LSQ)
|
165
|
-
class LSQActivationQATQuantizer(BaseKerasQATTrainableQuantizer):
|
166
|
-
"""
|
167
|
-
Trainable constrained quantizer to quantize layer activations.
|
168
|
-
"""
|
169
|
-
|
170
|
-
def __init__(self, quantization_config: TrainableQuantizerActivationConfig):
|
171
|
-
"""
|
172
|
-
Initialize a LSQActivationQATQuantizer object with parameters to use
|
173
|
-
for the quantization.
|
174
|
-
|
175
|
-
Args:
|
176
|
-
quantization_config: trainable quantizer config class
|
177
|
-
"""
|
178
|
-
super().__init__(quantization_config)
|
179
|
-
self.power_of_two = quantization_config.activation_quantization_method == QuantizationMethod.POWER_OF_TWO
|
180
|
-
self.threshold_values = float(quantization_config.activation_quantization_params[C.THRESHOLD])
|
181
|
-
self.threshold_shape = np.asarray(self.threshold_values).shape
|
182
|
-
self.sign = quantization_config.activation_quantization_params[SIGNED]
|
183
|
-
self.num_bits = quantization_config.activation_n_bits
|
184
|
-
n_pos_bits = self.num_bits - int(self.sign)
|
185
|
-
self.min_int = -int(self.sign) * (2 ** n_pos_bits)
|
186
|
-
self.max_int = (2 ** n_pos_bits) - 1
|
187
|
-
if self.power_of_two:
|
188
|
-
self.threshold_values = np.power(2.0, np.ceil(np.log2(np.maximum(self.threshold_values, C.MIN_THRESHOLD))))
|
189
|
-
|
190
|
-
|
191
|
-
def initialize_quantization(self,
|
192
|
-
tensor_shape: TensorShape,
|
193
|
-
name: str,
|
194
|
-
layer: KerasTrainableQuantizationWrapper):
|
195
|
-
"""
|
196
|
-
Add quantizer parameters to the quantizer parameters dictionary
|
197
|
-
|
198
|
-
Args:
|
199
|
-
tensor_shape: tensor shape of the quantized tensor.
|
200
|
-
name: Tensor name.
|
201
|
-
layer: Layer to quantize.
|
202
|
-
"""
|
203
|
-
ptq_threshold_tensor = layer.add_weight(
|
204
|
-
name + THRESHOLD_TENSOR,
|
205
|
-
shape=(),
|
206
|
-
initializer=tf.keras.initializers.Constant(1.0),
|
207
|
-
trainable=True)
|
208
|
-
ptq_threshold_tensor.assign(self.threshold_values)
|
209
|
-
|
210
|
-
# save the quantizer added parameters for later calculations
|
211
|
-
self.add_quantizer_variable(THRESHOLD_TENSOR, ptq_threshold_tensor, VariableGroup.QPARAMS)
|
212
|
-
|
213
|
-
def __call__(self,
|
214
|
-
inputs: tf.Tensor,
|
215
|
-
training: bool):
|
216
|
-
"""
|
217
|
-
Quantize a tensor.
|
218
|
-
Args:
|
219
|
-
inputs: Input tensor to quantize.
|
220
|
-
training: Whether the graph is in training mode.
|
221
|
-
|
222
|
-
Returns:
|
223
|
-
The quantized tensor.
|
224
|
-
"""
|
225
|
-
|
226
|
-
thresholds = self.get_quantizer_variable(THRESHOLD_TENSOR)
|
227
|
-
n_channels = inputs.shape[-1]
|
228
|
-
scale_factor = 1.0 / np.sqrt(self.max_int * n_channels)
|
229
|
-
q_tensor = symmetric_lsq_quantizer(inputs, thresholds, self.num_bits, self.sign, self.min_int, self.max_int, scale_factor)
|
230
|
-
return q_tensor
|
231
|
-
|
232
|
-
def convert2inferable(self) -> Union[ActivationPOTInferableQuantizer, ActivationSymmetricInferableQuantizer]:
|
233
|
-
"""
|
234
|
-
Convert quantizer to inferable quantizer.
|
235
|
-
|
236
|
-
Returns:
|
237
|
-
BaseKerasInferableQuantizer object.
|
238
|
-
"""
|
239
|
-
|
240
|
-
if self.power_of_two:
|
241
|
-
thresholds = 2 ** np.ceil(np.log2(self.get_quantizer_variable(THRESHOLD_TENSOR).numpy()))
|
242
|
-
return ActivationPOTInferableQuantizer(num_bits=self.num_bits,
|
243
|
-
# In activation quantization is per-tensor only - thus we pass
|
244
|
-
# the threshold as a list with a len of 1
|
245
|
-
threshold=[thresholds],
|
246
|
-
signed=self.sign)
|
247
|
-
else:
|
248
|
-
thresholds = self.get_quantizer_variable(THRESHOLD_TENSOR).numpy()
|
249
|
-
return ActivationSymmetricInferableQuantizer(num_bits=self.num_bits,
|
250
|
-
# In activation quantization is per-tensor only - thus we
|
251
|
-
# pass the threshold as a list with a len of 1
|
252
|
-
threshold=[thresholds],
|
253
|
-
signed=self.sign)
|
@@ -16,6 +16,8 @@ import numpy as np
|
|
16
16
|
import tensorflow as tf
|
17
17
|
from tensorflow.python.framework.tensor_shape import TensorShape
|
18
18
|
from model_compression_toolkit.constants import RANGE_MIN, RANGE_MAX
|
19
|
+
from model_compression_toolkit.qat.keras.quantizer.base_keras_qat_weight_quantizer import \
|
20
|
+
BaseKerasQATWeightTrainableQuantizer
|
19
21
|
from model_compression_toolkit.trainable_infrastructure.common.constants import FQ_MIN, FQ_MAX
|
20
22
|
from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
|
21
23
|
from model_compression_toolkit.trainable_infrastructure import TrainingMethod
|
@@ -26,47 +28,18 @@ from mct_quantizers.keras.quantizers import \
|
|
26
28
|
|
27
29
|
from model_compression_toolkit import constants as C
|
28
30
|
|
29
|
-
from model_compression_toolkit.qat.keras.quantizer.base_keras_qat_quantizer import BaseKerasQATTrainableQuantizer
|
30
31
|
from model_compression_toolkit.trainable_infrastructure import TrainableQuantizerWeightsConfig, \
|
31
32
|
TrainableQuantizerActivationConfig
|
32
33
|
from model_compression_toolkit.trainable_infrastructure.common.base_trainable_quantizer import VariableGroup
|
33
34
|
from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import fix_range_to_include_zero
|
34
35
|
from model_compression_toolkit.qat.keras.quantizer.quant_utils import ste_round, grad_scale, adjust_range_to_include_zero
|
35
|
-
|
36
|
-
|
37
|
-
def uniform_lsq_quantizer(x: tf.Tensor,
|
38
|
-
min_range: tf.Tensor,
|
39
|
-
max_range: tf.Tensor,
|
40
|
-
num_bits: int,
|
41
|
-
min_int: int,
|
42
|
-
max_int:int,
|
43
|
-
scale_factor: float) -> tf.Tensor:
|
44
|
-
"""
|
45
|
-
Uniform quantizer according to LSQ algorithm: https://arxiv.org/pdf/1902.08153.pdf
|
46
|
-
Args:
|
47
|
-
x: input to quantize
|
48
|
-
min_range: min range of quantization values
|
49
|
-
max_range: min range of quantization values
|
50
|
-
num_bits: number of bits for quantization
|
51
|
-
min_int: min clipping integer value
|
52
|
-
max_int: max clipping integer value
|
53
|
-
scale_factor: grad scale of LSQ algorithm
|
54
|
-
Returns:
|
55
|
-
A quantized tensor
|
56
|
-
"""
|
57
|
-
min_range, max_range = adjust_range_to_include_zero(min_range, max_range, num_bits)
|
58
|
-
delta = (max_range - min_range) / (2 ** num_bits - 1)
|
59
|
-
delta_scaled = grad_scale(delta, scale_factor)
|
60
|
-
rounded = ste_round((x-min_range) / delta_scaled)
|
61
|
-
clipped = tf.math.minimum(tf.math.maximum(rounded, min_int), max_int)
|
62
|
-
quantized = delta_scaled * clipped + min_range
|
63
|
-
return quantized
|
36
|
+
from model_compression_toolkit.trainable_infrastructure.keras.quantizer_utils import uniform_lsq_quantizer
|
64
37
|
|
65
38
|
|
66
39
|
@mark_quantizer(quantization_target=QuantizationTarget.Weights,
|
67
40
|
quantization_method=[QuantizationMethod.UNIFORM],
|
68
41
|
identifier=TrainingMethod.LSQ)
|
69
|
-
class LSQUniformWeightQATQuantizer(
|
42
|
+
class LSQUniformWeightQATQuantizer(BaseKerasQATWeightTrainableQuantizer):
|
70
43
|
"""
|
71
44
|
Trainable constrained quantizer to quantize layer's weights.
|
72
45
|
"""
|
@@ -158,93 +131,3 @@ class LSQUniformWeightQATQuantizer(BaseKerasQATTrainableQuantizer):
|
|
158
131
|
channel_axis=self.channel_axis,
|
159
132
|
input_rank=len(self.min_max_shape))
|
160
133
|
|
161
|
-
|
162
|
-
@mark_quantizer(quantization_target=QuantizationTarget.Activation,
|
163
|
-
quantization_method=[QuantizationMethod.UNIFORM],
|
164
|
-
identifier=TrainingMethod.LSQ)
|
165
|
-
class LSQUniformActivationQATQuantizer(BaseKerasQATTrainableQuantizer):
|
166
|
-
"""
|
167
|
-
Trainable constrained quantizer to quantize layer activations.
|
168
|
-
"""
|
169
|
-
|
170
|
-
def __init__(self, quantization_config: TrainableQuantizerActivationConfig):
|
171
|
-
"""
|
172
|
-
Initialize a LSQUniformActivationQATQuantizer object with parameters to use
|
173
|
-
for the quantization.
|
174
|
-
|
175
|
-
Args:
|
176
|
-
quantization_config: trainable quantizer config class
|
177
|
-
"""
|
178
|
-
super().__init__(quantization_config)
|
179
|
-
|
180
|
-
self.num_bits = quantization_config.activation_n_bits
|
181
|
-
self.min_range = np.array(quantization_config.activation_quantization_params[C.RANGE_MIN])
|
182
|
-
self.max_range = np.array(quantization_config.activation_quantization_params[C.RANGE_MAX])
|
183
|
-
self.min_int = 0
|
184
|
-
self.max_int = 2**self.num_bits - 1
|
185
|
-
|
186
|
-
def initialize_quantization(self,
|
187
|
-
tensor_shape: TensorShape,
|
188
|
-
name: str,
|
189
|
-
layer: KerasTrainableQuantizationWrapper):
|
190
|
-
"""
|
191
|
-
Add quantizer parameters to the quantizer parameters dictionary
|
192
|
-
|
193
|
-
Args:
|
194
|
-
tensor_shape: tensor shape of the quantized tensor.
|
195
|
-
name: Tensor name.
|
196
|
-
layer: Layer to quantize.
|
197
|
-
"""
|
198
|
-
fq_min = layer.add_weight(
|
199
|
-
name + FQ_MIN,
|
200
|
-
shape=(),
|
201
|
-
initializer=tf.keras.initializers.Constant(-1.0),
|
202
|
-
trainable=True)
|
203
|
-
fq_min.assign(self.min_range)
|
204
|
-
|
205
|
-
fq_max = layer.add_weight(
|
206
|
-
name + FQ_MAX,
|
207
|
-
shape=(),
|
208
|
-
initializer=tf.keras.initializers.Constant(1.0),
|
209
|
-
trainable=True)
|
210
|
-
fq_max.assign(self.max_range)
|
211
|
-
|
212
|
-
# save the quantizer added parameters for later calculations
|
213
|
-
self.add_quantizer_variable(FQ_MIN, fq_min, VariableGroup.QPARAMS)
|
214
|
-
self.add_quantizer_variable(FQ_MAX, fq_max, VariableGroup.QPARAMS)
|
215
|
-
|
216
|
-
def __call__(self,
|
217
|
-
inputs: tf.Tensor,
|
218
|
-
training: bool):
|
219
|
-
"""
|
220
|
-
Quantize a tensor.
|
221
|
-
Args:
|
222
|
-
inputs: Input tensor to quantize.
|
223
|
-
training: Whether the graph is in training mode.
|
224
|
-
|
225
|
-
Returns:
|
226
|
-
The quantized tensor.
|
227
|
-
"""
|
228
|
-
|
229
|
-
min_range = self.get_quantizer_variable(FQ_MIN)
|
230
|
-
max_range = self.get_quantizer_variable(FQ_MAX)
|
231
|
-
n_channels = inputs.shape[-1]
|
232
|
-
scale_factor = 1.0 / np.sqrt(self.max_int * n_channels)
|
233
|
-
q_tensor = uniform_lsq_quantizer(inputs, min_range, max_range, self.num_bits, self.min_int, self.max_int, scale_factor)
|
234
|
-
return q_tensor
|
235
|
-
|
236
|
-
def convert2inferable(self) -> BaseKerasInferableQuantizer:
|
237
|
-
"""
|
238
|
-
Convert quantizer to inferable quantizer.
|
239
|
-
|
240
|
-
Returns:
|
241
|
-
BaseKerasInferableQuantizer object.
|
242
|
-
"""
|
243
|
-
min_range, max_range = fix_range_to_include_zero(self.get_quantizer_variable(FQ_MIN).numpy(),
|
244
|
-
self.get_quantizer_variable(FQ_MAX).numpy(),
|
245
|
-
self.num_bits)
|
246
|
-
return ActivationUniformInferableQuantizer(num_bits=self.num_bits,
|
247
|
-
# In activation quantization is per-tensor only - thus we pass
|
248
|
-
# the min/max as lists with a len of 1
|
249
|
-
min_range=[min_range],
|
250
|
-
max_range=[max_range])
|
@@ -15,17 +15,18 @@
|
|
15
15
|
from typing import Tuple, Dict, List, Callable
|
16
16
|
|
17
17
|
from model_compression_toolkit.core import common
|
18
|
-
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
|
19
|
-
from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
|
20
18
|
from model_compression_toolkit.logger import Logger
|
21
19
|
from model_compression_toolkit.qat.common.qat_config import QATConfig
|
22
|
-
from model_compression_toolkit.qat.keras.quantizer.base_keras_qat_quantizer import BaseKerasQATTrainableQuantizer
|
23
20
|
from mct_quantizers import QuantizationTarget, KerasActivationQuantizationHolder
|
21
|
+
from model_compression_toolkit.qat.keras.quantizer.base_keras_qat_weight_quantizer import \
|
22
|
+
BaseKerasQATWeightTrainableQuantizer
|
24
23
|
from model_compression_toolkit.trainable_infrastructure.common.get_quantizer_config import \
|
25
24
|
get_trainable_quantizer_weights_config, get_trainable_quantizer_activation_config, \
|
26
25
|
get_trainable_quantizer_quantization_candidates
|
27
26
|
from model_compression_toolkit.trainable_infrastructure.common.get_quantizers import \
|
28
27
|
get_trainable_quantizer_class
|
28
|
+
from model_compression_toolkit.trainable_infrastructure.keras.activation_quantizers import \
|
29
|
+
BaseKerasActivationTrainableQuantizer
|
29
30
|
|
30
31
|
|
31
32
|
def get_activation_quantizer_holder(n: common.BaseNode,
|
@@ -55,7 +56,7 @@ def get_activation_quantizer_holder(n: common.BaseNode,
|
|
55
56
|
def quantization_builder(n: common.BaseNode,
|
56
57
|
qat_config: QATConfig,
|
57
58
|
kernel_attr: str = None,
|
58
|
-
) -> Tuple[Dict[str,
|
59
|
+
) -> Tuple[Dict[str, BaseKerasQATWeightTrainableQuantizer], List[BaseKerasActivationTrainableQuantizer]]:
|
59
60
|
"""
|
60
61
|
Build quantizers for a node according to its quantization configuration.
|
61
62
|
|
@@ -82,7 +83,7 @@ def quantization_builder(n: common.BaseNode,
|
|
82
83
|
quantizer_class = get_trainable_quantizer_class(QuantizationTarget.Weights,
|
83
84
|
qat_config.weight_training_method,
|
84
85
|
quant_method,
|
85
|
-
|
86
|
+
BaseKerasQATWeightTrainableQuantizer)
|
86
87
|
|
87
88
|
weight_quantizers.update({kernel_attr: quantizer_class(get_trainable_quantizer_weights_config(n,
|
88
89
|
attr_name=kernel_attr,
|
@@ -98,7 +99,7 @@ def quantization_builder(n: common.BaseNode,
|
|
98
99
|
quantizer_class = get_trainable_quantizer_class(QuantizationTarget.Activation,
|
99
100
|
qat_config.activation_training_method,
|
100
101
|
quant_method,
|
101
|
-
|
102
|
+
BaseKerasActivationTrainableQuantizer)
|
102
103
|
|
103
104
|
activation_quantizers = [quantizer_class(get_trainable_quantizer_activation_config(n, aq_cand),
|
104
105
|
**qat_config.activation_quantizer_params_override)] * len(output_shapes)
|
@@ -18,7 +18,6 @@ from typing import Union
|
|
18
18
|
import numpy as np
|
19
19
|
import tensorflow as tf
|
20
20
|
from tensorflow.python.framework.tensor_shape import TensorShape
|
21
|
-
from model_compression_toolkit.constants import SIGNED
|
22
21
|
from model_compression_toolkit.trainable_infrastructure.common.constants import FQ_MIN, FQ_MAX
|
23
22
|
|
24
23
|
from model_compression_toolkit.trainable_infrastructure import TrainingMethod
|
@@ -29,18 +28,16 @@ from mct_quantizers import QuantizationTarget, mark_quantizer
|
|
29
28
|
from model_compression_toolkit.qat.common import THRESHOLD_TENSOR
|
30
29
|
from model_compression_toolkit import constants as C
|
31
30
|
|
32
|
-
from model_compression_toolkit.qat.keras.quantizer.
|
33
|
-
from model_compression_toolkit.trainable_infrastructure import TrainableQuantizerWeightsConfig
|
34
|
-
|
35
|
-
from mct_quantizers.keras.quantizers import WeightsPOTInferableQuantizer, WeightsSymmetricInferableQuantizer, \
|
36
|
-
ActivationPOTInferableQuantizer, ActivationSymmetricInferableQuantizer
|
31
|
+
from model_compression_toolkit.qat.keras.quantizer.base_keras_qat_weight_quantizer import BaseKerasQATWeightTrainableQuantizer
|
32
|
+
from model_compression_toolkit.trainable_infrastructure import TrainableQuantizerWeightsConfig
|
33
|
+
from mct_quantizers.keras.quantizers import WeightsPOTInferableQuantizer, WeightsSymmetricInferableQuantizer
|
37
34
|
from model_compression_toolkit.trainable_infrastructure.common.base_trainable_quantizer import VariableGroup
|
38
35
|
|
39
36
|
|
40
37
|
@mark_quantizer(quantization_target=QuantizationTarget.Weights,
|
41
38
|
quantization_method=[QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC],
|
42
39
|
identifier=TrainingMethod.STE)
|
43
|
-
class STEWeightQATQuantizer(
|
40
|
+
class STEWeightQATQuantizer(BaseKerasQATWeightTrainableQuantizer):
|
44
41
|
"""
|
45
42
|
Trainable constrained quantizer to quantize a layer inputs.
|
46
43
|
"""
|
@@ -171,115 +168,3 @@ class STEWeightQATQuantizer(BaseKerasQATTrainableQuantizer):
|
|
171
168
|
input_rank=len(self.threshold_shape))
|
172
169
|
|
173
170
|
|
174
|
-
@mark_quantizer(quantization_target=QuantizationTarget.Activation,
|
175
|
-
quantization_method=[QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC],
|
176
|
-
identifier=TrainingMethod.STE)
|
177
|
-
class STEActivationQATQuantizer(BaseKerasQATTrainableQuantizer):
|
178
|
-
"""
|
179
|
-
Trainable constrained quantizer to quantize a layer outputs.
|
180
|
-
"""
|
181
|
-
|
182
|
-
def __init__(self, quantization_config: TrainableQuantizerActivationConfig):
|
183
|
-
"""
|
184
|
-
Initialize a STEActivationQATQuantizer object with parameters to use
|
185
|
-
for the quantization.
|
186
|
-
|
187
|
-
Args:
|
188
|
-
quantization_config: trainable quantizer config class
|
189
|
-
"""
|
190
|
-
super().__init__(quantization_config)
|
191
|
-
self.power_of_two = quantization_config.activation_quantization_method == QuantizationMethod.POWER_OF_TWO
|
192
|
-
self.threshold_values = quantization_config.activation_quantization_params[C.THRESHOLD]
|
193
|
-
self.threshold_shape = np.asarray(self.threshold_values).shape
|
194
|
-
self.np_threshold_values = float(self.threshold_values)
|
195
|
-
self.signed = quantization_config.activation_quantization_params[SIGNED]
|
196
|
-
if self.power_of_two:
|
197
|
-
self.np_threshold_values = np.power(2.0,
|
198
|
-
np.ceil(np.log2(np.maximum(self.np_threshold_values, C.MIN_THRESHOLD))))
|
199
|
-
self.num_bits = quantization_config.activation_n_bits
|
200
|
-
delta = self.np_threshold_values / np.power(2.0, self.num_bits - int(self.signed))
|
201
|
-
min_int = -int(self.signed) * (2 ** (self.num_bits - int(self.signed)))
|
202
|
-
max_int = (2 ** (self.num_bits - int(self.signed))) - 1
|
203
|
-
self.min = delta * min_int
|
204
|
-
self.max = delta * max_int
|
205
|
-
|
206
|
-
def initialize_quantization(self,
|
207
|
-
tensor_shape: TensorShape,
|
208
|
-
name: str,
|
209
|
-
layer: KerasTrainableQuantizationWrapper):
|
210
|
-
"""
|
211
|
-
Add quantizer parameters to the quantizer parameters dictionary
|
212
|
-
|
213
|
-
Args:
|
214
|
-
tensor_shape: tensor shape of the quantized tensor.
|
215
|
-
name: Tensor name.
|
216
|
-
layer: Layer to quantize.
|
217
|
-
"""
|
218
|
-
ptq_threshold_tensor = layer.add_weight(
|
219
|
-
name + THRESHOLD_TENSOR,
|
220
|
-
shape=(),
|
221
|
-
initializer=tf.keras.initializers.Constant(1.0),
|
222
|
-
trainable=False)
|
223
|
-
ptq_threshold_tensor.assign(self.np_threshold_values)
|
224
|
-
|
225
|
-
fq_min = layer.add_weight(
|
226
|
-
name + FQ_MIN,
|
227
|
-
shape=(),
|
228
|
-
initializer=tf.keras.initializers.Constant(-1.0),
|
229
|
-
trainable=False)
|
230
|
-
fq_min.assign(self.min)
|
231
|
-
|
232
|
-
fq_max = layer.add_weight(
|
233
|
-
name + FQ_MAX,
|
234
|
-
shape=(),
|
235
|
-
initializer=tf.keras.initializers.Constant(1.0),
|
236
|
-
trainable=False)
|
237
|
-
fq_max.assign(self.max)
|
238
|
-
|
239
|
-
# save the quantizer added parameters for later calculations
|
240
|
-
self.add_quantizer_variable(THRESHOLD_TENSOR, ptq_threshold_tensor, VariableGroup.QPARAMS)
|
241
|
-
self.add_quantizer_variable(FQ_MIN, fq_min, VariableGroup.QPARAMS)
|
242
|
-
self.add_quantizer_variable(FQ_MAX, fq_max, VariableGroup.QPARAMS)
|
243
|
-
|
244
|
-
|
245
|
-
def __call__(self,
|
246
|
-
inputs: tf.Tensor,
|
247
|
-
training: bool):
|
248
|
-
"""
|
249
|
-
Quantize a tensor.
|
250
|
-
Args:
|
251
|
-
inputs: Input tensor to quantize.
|
252
|
-
training: Whether the graph is in training mode.
|
253
|
-
|
254
|
-
Returns:
|
255
|
-
The quantized tensor.
|
256
|
-
"""
|
257
|
-
|
258
|
-
_min = self.get_quantizer_variable(FQ_MIN)
|
259
|
-
_max = self.get_quantizer_variable(FQ_MAX)
|
260
|
-
q_tensor = tf.quantization.fake_quant_with_min_max_vars(inputs, _min, _max,
|
261
|
-
num_bits=self.num_bits)
|
262
|
-
|
263
|
-
return q_tensor
|
264
|
-
|
265
|
-
def convert2inferable(self) -> Union[ActivationPOTInferableQuantizer, ActivationSymmetricInferableQuantizer]:
|
266
|
-
"""
|
267
|
-
Convert quantizer to inferable quantizer.
|
268
|
-
|
269
|
-
Returns:
|
270
|
-
BaseKerasInferableQuantizer object.
|
271
|
-
"""
|
272
|
-
|
273
|
-
if self.power_of_two:
|
274
|
-
pot_threshold = 2 ** np.ceil(np.log2(self.get_quantizer_variable(THRESHOLD_TENSOR)))
|
275
|
-
return ActivationPOTInferableQuantizer(num_bits=self.num_bits,
|
276
|
-
# In activation quantization is per-tensor only - thus we pass
|
277
|
-
# the threshold as a list with a len of 1
|
278
|
-
threshold=[pot_threshold],
|
279
|
-
signed=self.signed)
|
280
|
-
else:
|
281
|
-
return ActivationSymmetricInferableQuantizer(num_bits=self.num_bits,
|
282
|
-
# In activation quantization is per-tensor only - thus we
|
283
|
-
# pass the threshold as a list with a len of 1
|
284
|
-
threshold=[self.get_quantizer_variable(THRESHOLD_TENSOR).numpy()],
|
285
|
-
signed=self.signed)
|