mct-nightly 2.4.0.20250925.543__tar.gz → 2.4.2.20250927.534__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/PKG-INFO +6 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/README.md +4 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/mct_nightly.egg-info/PKG-INFO +6 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/mct_nightly.egg-info/SOURCES.txt +19 -13
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/mct_nightly.egg-info/requires.txt +1 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/__init__.py +1 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/analyzer.py +5 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +4 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/collectors/base_collector.py +1 -4
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/collectors/mean_collector.py +4 -7
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +4 -7
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/framework_implementation.py +22 -10
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/common/framework_info.py +150 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/fusion/graph_fuser.py +9 -12
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/base_graph.py +72 -45
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/base_node.py +141 -121
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/functional_node.py +2 -19
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +21 -17
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +18 -8
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +9 -14
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py +21 -12
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_ru_helper.py +3 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +5 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +6 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_calculator.py +10 -5
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +5 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/sensitivity_eval/metric_calculators.py +9 -4
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/sensitivity_eval/sensitivity_evaluation.py +7 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +5 -7
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/model_collector.py +18 -22
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/common/model_validation.py +44 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/network_editors/__init__.py +1 -8
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/network_editors/actions.py +130 -14
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/network_editors/edit_network.py +4 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/channels_grouping.py +5 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +6 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +15 -5
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +7 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +4 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/memory_calculator.py +13 -5
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/prune_graph.py +4 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/pruner.py +6 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +13 -5
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/pruning_section.py +18 -9
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/bit_width_config.py +10 -10
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +83 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +14 -20
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +228 -43
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantization_config.py +1 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +1 -21
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +78 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +5 -8
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +155 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +66 -36
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +70 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantize_node.py +8 -8
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +518 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/statistics_correction/apply_activation_bias_correction_to_graph.py +7 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +19 -6
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +19 -11
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/statistics_correction/compute_activation_bias_correction_of_graph.py +15 -15
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +20 -4
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +9 -4
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +12 -8
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +6 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +21 -5
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +55 -43
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +3 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +1 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +8 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +12 -8
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/graph_prep_runner.py +35 -22
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +4 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +5 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +15 -8
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +6 -5
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/keras/default_framework_info.py +114 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +7 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +1 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +18 -29
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +16 -8
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +5 -4
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/hessian/weights_hessian_scores_calculator_keras.py +13 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/keras_implementation.py +37 -17
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/keras/keras_model_validation.py +38 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/keras_node_prior_info.py +13 -4
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +1 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +34 -19
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +2 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/statistics_correction/keras_compute_activation_bias_correction_of_graph.py +5 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +12 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +16 -9
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +5 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +3 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +6 -5
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/pytorch/default_framework_info.py +98 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +4 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +5 -5
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +8 -4
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +4 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/hessian/weights_hessian_scores_calculator_pytorch.py +12 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +1 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +41 -24
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +33 -13
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +5 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +2 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/statistics_correction/pytorch_compute_activation_bias_correction_of_graph.py +5 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/quantization_prep_runner.py +11 -6
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/runner.py +15 -5
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/keras/optimization_functions/lr_scheduler.py +8 -8
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/pytorch/optimization_functions/lr_scheduler.py +11 -11
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +1 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +9 -13
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/common/gptq_graph.py +11 -5
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/common/gptq_training.py +8 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/gptq_training.py +9 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/graph_info.py +6 -4
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/quantization_facade.py +10 -4
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +3 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/gptq_training.py +9 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/graph_info.py +3 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +7 -5
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +3 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/runner.py +7 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/pruning/keras/pruning_facade.py +12 -7
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +8 -4
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/ptq/keras/quantization_facade.py +13 -5
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +8 -4
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/ptq/runner.py +4 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/common/qat_config.py +6 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/keras/quantization_facade.py +13 -7
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/pytorch/quantization_facade.py +11 -7
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/constants.py +1 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py +3 -3
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +2 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +6 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +4 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/__init__.py +1 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/common/constants.py +1 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/common/model_folding_utils.py +6 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/common/tensorboard_utils.py +4 -1
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/xquant/common/xquant_config.py +63 -0
- {mct_nightly-2.4.0.20250925.543/model_compression_toolkit/xquant/common → mct_nightly-2.4.2.20250927.534/model_compression_toolkit/xquant/keras}/core_report_generator.py +2 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +1 -1
- {mct_nightly-2.4.0.20250925.543/model_compression_toolkit/xquant/common → mct_nightly-2.4.2.20250927.534/model_compression_toolkit/xquant/keras}/framework_report_utils.py +23 -2
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/keras/keras_report_utils.py +10 -5
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/xquant/keras/similarity_calculator.py +199 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +3 -0
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/xquant/pytorch/core_detect_degrade_layer.py +77 -0
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/xquant/pytorch/core_judge_troubleshoot.py +66 -0
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/xquant/pytorch/core_report_generator.py +177 -0
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/xquant/pytorch/detect_degrade_utils.py +78 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +41 -1
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/xquant/pytorch/framework_report_utils.py +98 -0
- mct_nightly-2.4.2.20250927.534/model_compression_toolkit/xquant/pytorch/judge_troubleshoot_utils.py +562 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +10 -7
- {mct_nightly-2.4.0.20250925.543/model_compression_toolkit/xquant/common → mct_nightly-2.4.2.20250927.534/model_compression_toolkit/xquant/pytorch}/similarity_calculator.py +6 -1
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +3 -0
- mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/common/framework_info.py +0 -160
- mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -144
- mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -170
- mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -99
- mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -199
- mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/keras/default_framework_info.py +0 -154
- mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/keras/quantization/activation_quantization_fn_factory.py +0 -47
- mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -112
- mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/pytorch/quantization/activation_quantization_fn_factory.py +0 -45
- mct_nightly-2.4.0.20250925.543/model_compression_toolkit/quantization_preparation/__init__.py +0 -14
- mct_nightly-2.4.0.20250925.543/model_compression_toolkit/quantization_preparation/load_fqc.py +0 -223
- mct_nightly-2.4.0.20250925.543/model_compression_toolkit/xquant/common/xquant_config.py +0 -37
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/LICENSE.md +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/mct_nightly.egg-info/dependency_links.txt +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/mct_nightly.egg-info/top_level.txt +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/constants.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/collectors/weighted_histogram_collector.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/fusion/fusing_info.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/edge.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/hessian/hessian_scores_calculator.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/hessian/hessian_scores_request.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/matchers/function.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/memory_computation.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/sensitivity_eval/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/mixed_precision/sensitivity_eval/set_layer_to_bitwidth.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/user_info.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/constants.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/data_util.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/sigmoid_mul_to_swish.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/hessian/hessian_scores_calculator_keras.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/keras/quantization → mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/keras/quantizer}/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/keras/quantization → mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/keras/quantizer}/fake_quant_builder.py +0 -0
- {mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/keras/quantization → mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/keras/quantizer}/lut_fake_quant.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/reader/common.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/constants.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/data_util.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/convtranspose_dynamic_padding.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_linear.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/matmul_decomposition.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scaled_dot_product_attention.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/hessian/hessian_scores_calculator_pytorch.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
- {mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/pytorch/quantization → mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/pytorch/quantizer}/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/pytorch/quantization → mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/pytorch/quantizer}/fake_quant_builder.py +0 -0
- {mct_nightly-2.4.0.20250925.543/model_compression_toolkit/core/pytorch/quantization → mct_nightly-2.4.2.20250927.534/model_compression_toolkit/core/pytorch/quantizer}/lut_fake_quant.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/core/pytorch/utils.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/common/constants.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/common/enums.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/keras/image_operations.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/pytorch/image_operations.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/defaultdict.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/common/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/common/gptq_config.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/common/gradual_activation_quantization.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/common/regularization_factory.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/logger.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/metadata.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/pruning/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/ptq/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/common/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/keras/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_weight_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_weight_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/schema/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/schema/mct_current_schema.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/schema/schema_compatability.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/schema/schema_functions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/schema/v1.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/schema/v2.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2fw.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2keras.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attribute_filter.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/current_tpc.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities_component.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/layer_filter_params.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/operations_to_layers.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_io_handler.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/common/annealing_schedulers.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/common/training_method.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/common/util.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/base_activation_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/uniform_lsq.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/symmetric_ste.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/uniform_ste.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/annealing_schedulers.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/symmetric_lsq.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/uniform_lsq.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/symmetric_ste.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/uniform_ste.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/pytorch/annealing_schedulers.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/verify_packages.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/common/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/setup.cfg +0 -0
- {mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/setup.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: mct-nightly
|
3
|
-
Version: 2.4.
|
3
|
+
Version: 2.4.2.20250927.534
|
4
4
|
Summary: A Model Compression Toolkit for neural networks
|
5
5
|
Author-email: ssi-dnn-dev@sony.com
|
6
6
|
Classifier: Programming Language :: Python :: 3
|
@@ -23,7 +23,7 @@ Requires-Dist: scipy
|
|
23
23
|
Requires-Dist: protobuf
|
24
24
|
Requires-Dist: mct-quantizers==1.6.0
|
25
25
|
Requires-Dist: pydantic>=2.0
|
26
|
-
Requires-Dist: edge-mdt-cl
|
26
|
+
Requires-Dist: edge-mdt-cl-dev
|
27
27
|
Dynamic: author-email
|
28
28
|
Dynamic: classifier
|
29
29
|
Dynamic: description
|
@@ -69,7 +69,7 @@ Pip install the model compression toolkit package in a Python>=3.9 environment w
|
|
69
69
|
```
|
70
70
|
pip install model-compression-toolkit
|
71
71
|
```
|
72
|
-
For installing the nightly version or installing from source, refer to the [installation guide](https://github.com/
|
72
|
+
For installing the nightly version or installing from source, refer to the [installation guide](https://github.com/SonySemiconductorSolutions/mct-model-optimization/blob/main/INSTALLATION.md).
|
73
73
|
|
74
74
|
**Important note**: In order to use MCT, you’ll need to provide a pre-trained floating point model (PyTorch/Keras) as an input.
|
75
75
|
|
@@ -139,6 +139,9 @@ Modify your model's quantization configuration for specific layers or apply a cu
|
|
139
139
|
**🖥️ Visualization**. Observe useful information for troubleshooting the quantized model's performance using TensorBoard. [Read more](https://sonysemiconductorsolutions.github.io/mct-model-optimization/guidelines/visualization.html).
|
140
140
|
|
141
141
|
**🔑 XQuant (Explainable Quantization)** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_xquant.ipynb). Get valuable insights regarding the quality and success of the quantization process of your model. The report includes histograms and similarity metrics between the original float model and the quantized model in key points of the model. The report can be visualized using TensorBoard.
|
142
|
+
|
143
|
+
**🔑 XQuant Extension Tool.** Calculates the error for each layer by comparing the float model and quantized model, using both models along with the quantization log. The results are presented in reports. It identifies the causes of the detected errors and recommends appropriate improvement measures for each cause. [Read more](docs/guidelines/XQuant_Extension_Tool.html) [Troubleshoot Manual](docs/docs_troubleshoot/index.html)
|
144
|
+
|
142
145
|
__________________________________________________________________________________________________________
|
143
146
|
### Enhanced Post-Training Quantization (EPTQ)
|
144
147
|
As part of the GPTQ capability, we provide an advanced optimization algorithm called EPTQ.
|
@@ -34,7 +34,7 @@ Pip install the model compression toolkit package in a Python>=3.9 environment w
|
|
34
34
|
```
|
35
35
|
pip install model-compression-toolkit
|
36
36
|
```
|
37
|
-
For installing the nightly version or installing from source, refer to the [installation guide](https://github.com/
|
37
|
+
For installing the nightly version or installing from source, refer to the [installation guide](https://github.com/SonySemiconductorSolutions/mct-model-optimization/blob/main/INSTALLATION.md).
|
38
38
|
|
39
39
|
**Important note**: In order to use MCT, you’ll need to provide a pre-trained floating point model (PyTorch/Keras) as an input.
|
40
40
|
|
@@ -104,6 +104,9 @@ Modify your model's quantization configuration for specific layers or apply a cu
|
|
104
104
|
**🖥️ Visualization**. Observe useful information for troubleshooting the quantized model's performance using TensorBoard. [Read more](https://sonysemiconductorsolutions.github.io/mct-model-optimization/guidelines/visualization.html).
|
105
105
|
|
106
106
|
**🔑 XQuant (Explainable Quantization)** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_xquant.ipynb). Get valuable insights regarding the quality and success of the quantization process of your model. The report includes histograms and similarity metrics between the original float model and the quantized model in key points of the model. The report can be visualized using TensorBoard.
|
107
|
+
|
108
|
+
**🔑 XQuant Extension Tool.** Calculates the error for each layer by comparing the float model and quantized model, using both models along with the quantization log. The results are presented in reports. It identifies the causes of the detected errors and recommends appropriate improvement measures for each cause. [Read more](docs/guidelines/XQuant_Extension_Tool.html) [Troubleshoot Manual](docs/docs_troubleshoot/index.html)
|
109
|
+
|
107
110
|
__________________________________________________________________________________________________________
|
108
111
|
### Enhanced Post-Training Quantization (EPTQ)
|
109
112
|
As part of the GPTQ capability, we provide an advanced optimization algorithm called EPTQ.
|
{mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/mct_nightly.egg-info/PKG-INFO
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: mct-nightly
|
3
|
-
Version: 2.4.
|
3
|
+
Version: 2.4.2.20250927.534
|
4
4
|
Summary: A Model Compression Toolkit for neural networks
|
5
5
|
Author-email: ssi-dnn-dev@sony.com
|
6
6
|
Classifier: Programming Language :: Python :: 3
|
@@ -23,7 +23,7 @@ Requires-Dist: scipy
|
|
23
23
|
Requires-Dist: protobuf
|
24
24
|
Requires-Dist: mct-quantizers==1.6.0
|
25
25
|
Requires-Dist: pydantic>=2.0
|
26
|
-
Requires-Dist: edge-mdt-cl
|
26
|
+
Requires-Dist: edge-mdt-cl-dev
|
27
27
|
Dynamic: author-email
|
28
28
|
Dynamic: classifier
|
29
29
|
Dynamic: description
|
@@ -69,7 +69,7 @@ Pip install the model compression toolkit package in a Python>=3.9 environment w
|
|
69
69
|
```
|
70
70
|
pip install model-compression-toolkit
|
71
71
|
```
|
72
|
-
For installing the nightly version or installing from source, refer to the [installation guide](https://github.com/
|
72
|
+
For installing the nightly version or installing from source, refer to the [installation guide](https://github.com/SonySemiconductorSolutions/mct-model-optimization/blob/main/INSTALLATION.md).
|
73
73
|
|
74
74
|
**Important note**: In order to use MCT, you’ll need to provide a pre-trained floating point model (PyTorch/Keras) as an input.
|
75
75
|
|
@@ -139,6 +139,9 @@ Modify your model's quantization configuration for specific layers or apply a cu
|
|
139
139
|
**🖥️ Visualization**. Observe useful information for troubleshooting the quantized model's performance using TensorBoard. [Read more](https://sonysemiconductorsolutions.github.io/mct-model-optimization/guidelines/visualization.html).
|
140
140
|
|
141
141
|
**🔑 XQuant (Explainable Quantization)** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_xquant.ipynb). Get valuable insights regarding the quality and success of the quantization process of your model. The report includes histograms and similarity metrics between the original float model and the quantized model in key points of the model. The report can be visualized using TensorBoard.
|
142
|
+
|
143
|
+
**🔑 XQuant Extension Tool.** Calculates the error for each layer by comparing the float model and quantized model, using both models along with the quantization log. The results are presented in reports. It identifies the causes of the detected errors and recommends appropriate improvement measures for each cause. [Read more](docs/guidelines/XQuant_Extension_Tool.html) [Troubleshoot Manual](docs/docs_troubleshoot/index.html)
|
144
|
+
|
142
145
|
__________________________________________________________________________________________________________
|
143
146
|
### Enhanced Post-Training Quantization (EPTQ)
|
144
147
|
As part of the GPTQ capability, we provide an advanced optimization algorithm called EPTQ.
|
{mct_nightly-2.4.0.20250925.543 → mct_nightly-2.4.2.20250927.534}/mct_nightly.egg-info/SOURCES.txt
RENAMED
@@ -25,6 +25,7 @@ model_compression_toolkit/core/common/framework_info.py
|
|
25
25
|
model_compression_toolkit/core/common/memory_computation.py
|
26
26
|
model_compression_toolkit/core/common/model_builder_mode.py
|
27
27
|
model_compression_toolkit/core/common/model_collector.py
|
28
|
+
model_compression_toolkit/core/common/model_validation.py
|
28
29
|
model_compression_toolkit/core/common/node_prior_info.py
|
29
30
|
model_compression_toolkit/core/common/similarity_analyzer.py
|
30
31
|
model_compression_toolkit/core/common/user_info.py
|
@@ -117,6 +118,7 @@ model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py
|
|
117
118
|
model_compression_toolkit/core/common/quantization/node_quantization_config.py
|
118
119
|
model_compression_toolkit/core/common/quantization/quantization_config.py
|
119
120
|
model_compression_toolkit/core/common/quantization/quantization_fn_selection.py
|
121
|
+
model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py
|
120
122
|
model_compression_toolkit/core/common/quantization/quantize_graph_weights.py
|
121
123
|
model_compression_toolkit/core/common/quantization/quantize_node.py
|
122
124
|
model_compression_toolkit/core/common/quantization/set_node_quantization_config.py
|
@@ -166,6 +168,7 @@ model_compression_toolkit/core/keras/custom_layer_validation.py
|
|
166
168
|
model_compression_toolkit/core/keras/data_util.py
|
167
169
|
model_compression_toolkit/core/keras/default_framework_info.py
|
168
170
|
model_compression_toolkit/core/keras/keras_implementation.py
|
171
|
+
model_compression_toolkit/core/keras/keras_model_validation.py
|
169
172
|
model_compression_toolkit/core/keras/keras_node_prior_info.py
|
170
173
|
model_compression_toolkit/core/keras/resource_utilization_data_facade.py
|
171
174
|
model_compression_toolkit/core/keras/tf_tensor_numpy.py
|
@@ -208,10 +211,9 @@ model_compression_toolkit/core/keras/mixed_precision/configurable_activation_qua
|
|
208
211
|
model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py
|
209
212
|
model_compression_toolkit/core/keras/pruning/__init__.py
|
210
213
|
model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py
|
211
|
-
model_compression_toolkit/core/keras/
|
212
|
-
model_compression_toolkit/core/keras/
|
213
|
-
model_compression_toolkit/core/keras/
|
214
|
-
model_compression_toolkit/core/keras/quantization/lut_fake_quant.py
|
214
|
+
model_compression_toolkit/core/keras/quantizer/__init__.py
|
215
|
+
model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py
|
216
|
+
model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py
|
215
217
|
model_compression_toolkit/core/keras/reader/__init__.py
|
216
218
|
model_compression_toolkit/core/keras/reader/common.py
|
217
219
|
model_compression_toolkit/core/keras/reader/connectivity_handler.py
|
@@ -279,10 +281,9 @@ model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_q
|
|
279
281
|
model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py
|
280
282
|
model_compression_toolkit/core/pytorch/pruning/__init__.py
|
281
283
|
model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py
|
282
|
-
model_compression_toolkit/core/pytorch/
|
283
|
-
model_compression_toolkit/core/pytorch/
|
284
|
-
model_compression_toolkit/core/pytorch/
|
285
|
-
model_compression_toolkit/core/pytorch/quantization/lut_fake_quant.py
|
284
|
+
model_compression_toolkit/core/pytorch/quantizer/__init__.py
|
285
|
+
model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py
|
286
|
+
model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py
|
286
287
|
model_compression_toolkit/core/pytorch/reader/__init__.py
|
287
288
|
model_compression_toolkit/core/pytorch/reader/graph_builders.py
|
288
289
|
model_compression_toolkit/core/pytorch/reader/node_holders.py
|
@@ -438,8 +439,6 @@ model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py
|
|
438
439
|
model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py
|
439
440
|
model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py
|
440
441
|
model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py
|
441
|
-
model_compression_toolkit/quantization_preparation/__init__.py
|
442
|
-
model_compression_toolkit/quantization_preparation/load_fqc.py
|
443
442
|
model_compression_toolkit/target_platform_capabilities/__init__.py
|
444
443
|
model_compression_toolkit/target_platform_capabilities/constants.py
|
445
444
|
model_compression_toolkit/target_platform_capabilities/immutable.py
|
@@ -515,26 +514,33 @@ model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers
|
|
515
514
|
model_compression_toolkit/xquant/__init__.py
|
516
515
|
model_compression_toolkit/xquant/common/__init__.py
|
517
516
|
model_compression_toolkit/xquant/common/constants.py
|
518
|
-
model_compression_toolkit/xquant/common/core_report_generator.py
|
519
517
|
model_compression_toolkit/xquant/common/dataset_utils.py
|
520
|
-
model_compression_toolkit/xquant/common/framework_report_utils.py
|
521
518
|
model_compression_toolkit/xquant/common/model_analyzer.py
|
522
519
|
model_compression_toolkit/xquant/common/model_folding_utils.py
|
523
|
-
model_compression_toolkit/xquant/common/similarity_calculator.py
|
524
520
|
model_compression_toolkit/xquant/common/similarity_functions.py
|
525
521
|
model_compression_toolkit/xquant/common/tensorboard_utils.py
|
526
522
|
model_compression_toolkit/xquant/common/xquant_config.py
|
527
523
|
model_compression_toolkit/xquant/keras/__init__.py
|
524
|
+
model_compression_toolkit/xquant/keras/core_report_generator.py
|
528
525
|
model_compression_toolkit/xquant/keras/dataset_utils.py
|
529
526
|
model_compression_toolkit/xquant/keras/facade_xquant_report.py
|
527
|
+
model_compression_toolkit/xquant/keras/framework_report_utils.py
|
530
528
|
model_compression_toolkit/xquant/keras/keras_report_utils.py
|
531
529
|
model_compression_toolkit/xquant/keras/model_analyzer.py
|
530
|
+
model_compression_toolkit/xquant/keras/similarity_calculator.py
|
532
531
|
model_compression_toolkit/xquant/keras/similarity_functions.py
|
533
532
|
model_compression_toolkit/xquant/keras/tensorboard_utils.py
|
534
533
|
model_compression_toolkit/xquant/pytorch/__init__.py
|
534
|
+
model_compression_toolkit/xquant/pytorch/core_detect_degrade_layer.py
|
535
|
+
model_compression_toolkit/xquant/pytorch/core_judge_troubleshoot.py
|
536
|
+
model_compression_toolkit/xquant/pytorch/core_report_generator.py
|
535
537
|
model_compression_toolkit/xquant/pytorch/dataset_utils.py
|
538
|
+
model_compression_toolkit/xquant/pytorch/detect_degrade_utils.py
|
536
539
|
model_compression_toolkit/xquant/pytorch/facade_xquant_report.py
|
540
|
+
model_compression_toolkit/xquant/pytorch/framework_report_utils.py
|
541
|
+
model_compression_toolkit/xquant/pytorch/judge_troubleshoot_utils.py
|
537
542
|
model_compression_toolkit/xquant/pytorch/model_analyzer.py
|
538
543
|
model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py
|
544
|
+
model_compression_toolkit/xquant/pytorch/similarity_calculator.py
|
539
545
|
model_compression_toolkit/xquant/pytorch/similarity_functions.py
|
540
546
|
model_compression_toolkit/xquant/pytorch/tensorboard_utils.py
|
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
|
|
27
27
|
from model_compression_toolkit import pruning
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
29
29
|
|
30
|
-
__version__ = "2.4.
|
30
|
+
__version__ = "2.4.2.20250927.000534"
|
@@ -32,7 +32,8 @@ def analyzer_model_quantization(representative_data_gen: Callable,
|
|
32
32
|
tb_w: TensorboardWriter,
|
33
33
|
float_graph: Graph,
|
34
34
|
quantized_graph: Graph,
|
35
|
-
fw_impl: FrameworkImplementation
|
35
|
+
fw_impl: FrameworkImplementation,
|
36
|
+
fw_info: FrameworkInfo):
|
36
37
|
"""
|
37
38
|
Plot the cosine similarity of different points on the graph between the float and quantized
|
38
39
|
graphs. Add them to the passed TensorboardWriter object and close all tensorboard writer open
|
@@ -44,12 +45,14 @@ def analyzer_model_quantization(representative_data_gen: Callable,
|
|
44
45
|
float_graph: Graph of float model.
|
45
46
|
quantized_graph: Graph of quantized model.
|
46
47
|
fw_impl: FrameworkImplementation object with a specific framework methods implementation.
|
48
|
+
fw_info: Information needed for quantization about the specific framework.
|
47
49
|
|
48
50
|
"""
|
49
51
|
if tb_w is not None:
|
50
52
|
visual = NNVisualizer(float_graph,
|
51
53
|
quantized_graph,
|
52
|
-
fw_impl=fw_impl
|
54
|
+
fw_impl=fw_impl,
|
55
|
+
fw_info=fw_info)
|
53
56
|
if not visual.has_compare_points():
|
54
57
|
Logger.error(f'No comparing points were found to plot analyze similarity.')
|
55
58
|
else:
|
@@ -15,6 +15,7 @@
|
|
15
15
|
from abc import ABC, abstractmethod
|
16
16
|
from typing import Any, Tuple
|
17
17
|
|
18
|
+
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
|
18
19
|
from model_compression_toolkit.core import common
|
19
20
|
from model_compression_toolkit.core.common.user_info import UserInformation
|
20
21
|
|
@@ -27,17 +28,20 @@ class BaseModelBuilder(ABC):
|
|
27
28
|
def __init__(self,
|
28
29
|
graph: common.Graph,
|
29
30
|
append2output=None,
|
31
|
+
fw_info: FrameworkInfo = None,
|
30
32
|
return_float_outputs: bool = False):
|
31
33
|
"""
|
32
34
|
|
33
35
|
Args:
|
34
36
|
graph: Graph to build the model from.
|
35
37
|
append2output: Nodes of graph to append to model's output.
|
38
|
+
fw_info: Information about the specific framework of the model that is built.
|
36
39
|
return_float_outputs: Whether the model returns float tensors or not.
|
37
40
|
"""
|
38
41
|
|
39
42
|
self.graph = graph
|
40
43
|
self.append2output = append2output
|
44
|
+
self.fw_info = fw_info
|
41
45
|
self.return_float_outputs = return_float_outputs
|
42
46
|
|
43
47
|
@abstractmethod
|
@@ -13,12 +13,11 @@
|
|
13
13
|
# limitations under the License.
|
14
14
|
# ==============================================================================
|
15
15
|
|
16
|
-
from abc import ABC, abstractmethod
|
17
16
|
import numpy as np
|
18
17
|
from model_compression_toolkit.logger import Logger
|
19
18
|
|
20
19
|
|
21
|
-
class BaseCollector(
|
20
|
+
class BaseCollector(object):
|
22
21
|
"""
|
23
22
|
Base class for statistics collection object.
|
24
23
|
"""
|
@@ -27,7 +26,6 @@ class BaseCollector(ABC):
|
|
27
26
|
# When manipulation statistics in a granularity they were not collected by, the data is invalid.
|
28
27
|
self.is_legal = True
|
29
28
|
|
30
|
-
@abstractmethod
|
31
29
|
def scale(self, scale_factor: np.ndarray):
|
32
30
|
"""
|
33
31
|
Scale all statistics in collector by some factor.
|
@@ -39,7 +37,6 @@ class BaseCollector(ABC):
|
|
39
37
|
raise NotImplemented(
|
40
38
|
f'{self.__class__.__name__} needs to implement scale operation for its state.') # pragma: no cover
|
41
39
|
|
42
|
-
@abstractmethod
|
43
40
|
def shift(self, shift_value: np.ndarray):
|
44
41
|
"""
|
45
42
|
Shift all statistics in collector by some value.
|
@@ -87,13 +87,10 @@ class MeanCollector(BaseCollector):
|
|
87
87
|
x: Tensor that goes through the mean collector and needs to be considered in the mean computation.
|
88
88
|
"""
|
89
89
|
self.i += 1 # Update the iteration index
|
90
|
-
if self.axis
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
n = x.shape[axis]
|
95
|
-
transpose_index = [axis, *[i for i in range(len(x.shape)) if i != axis]]
|
96
|
-
mu = np.mean(np.reshape(np.transpose(x, transpose_index), [n, -1]), axis=-1) # mean per channel for a batch
|
90
|
+
axis = (len(x.shape) - 1) if self.axis == LAST_AXIS else self.axis
|
91
|
+
n = x.shape[axis]
|
92
|
+
transpose_index = [axis, *[i for i in range(len(x.shape)) if i != axis]]
|
93
|
+
mu = np.mean(np.reshape(np.transpose(x, transpose_index), [n, -1]), axis=-1) # mean per channel for a batch
|
97
94
|
self.current_sum += mu # sum of all batches
|
98
95
|
self.current_mean = self.current_sum / self.i # mean of all batches
|
99
96
|
|
@@ -130,13 +130,10 @@ class MinMaxPerChannelCollector(BaseCollector):
|
|
130
130
|
x: Tensor that goes through the collector and needs to be considered in the min/max computation.
|
131
131
|
"""
|
132
132
|
|
133
|
-
if self.axis
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
n = x.shape[axis]
|
138
|
-
transpose_index = [axis, *[i for i in range(len(x.shape)) if i != axis]]
|
139
|
-
x_reshape = np.reshape(np.transpose(x, transpose_index), [n, -1])
|
133
|
+
axis = (len(x.shape) - 1) if self.axis == LAST_AXIS else self.axis
|
134
|
+
n = x.shape[axis]
|
135
|
+
transpose_index = [axis, *[i for i in range(len(x.shape)) if i != axis]]
|
136
|
+
x_reshape = np.reshape(np.transpose(x, transpose_index), [n, -1])
|
140
137
|
if self.state is None:
|
141
138
|
x_max = np.max(x_reshape, axis=-1)
|
142
139
|
x_min = np.min(x_reshape, axis=-1)
|
@@ -125,16 +125,18 @@ class FrameworkImplementation(ABC):
|
|
125
125
|
graph: Graph,
|
126
126
|
mode: ModelBuilderMode,
|
127
127
|
append2output: List[Any],
|
128
|
+
fw_info: FrameworkInfo,
|
128
129
|
return_float_outputs: bool = False) -> Tuple:
|
129
130
|
"""
|
130
131
|
Build a framework model from a graph.
|
131
|
-
The mode determines how the model should be
|
132
|
+
The mode determines how the model should be build. append2output is a list of Nodes
|
132
133
|
to set as the model outputs.
|
133
134
|
|
134
135
|
Args:
|
135
136
|
graph: Graph to build the model from it.
|
136
137
|
mode: Mode for how to build the model.
|
137
138
|
append2output: List of Nodes to set as the model's outputs.
|
139
|
+
fw_info: FrameworkInfo object with information about the specific framework's model
|
138
140
|
return_float_outputs (bool): whether to return outputs before or after quantization nodes (default)
|
139
141
|
|
140
142
|
Returns:
|
@@ -168,13 +170,15 @@ class FrameworkImplementation(ABC):
|
|
168
170
|
@abstractmethod
|
169
171
|
def shift_negative_correction(self,
|
170
172
|
graph: Graph,
|
171
|
-
core_config: CoreConfig
|
173
|
+
core_config: CoreConfig,
|
174
|
+
fw_info: FrameworkInfo) -> Graph:
|
172
175
|
"""
|
173
176
|
Apply shift negative correction (SNC) on a graph.
|
174
177
|
|
175
178
|
Args:
|
176
179
|
graph: Graph to apply SNC on.
|
177
180
|
core_config: Quantization configuration.
|
181
|
+
fw_info: FrameworkInfo object with information about the specific framework's model.
|
178
182
|
|
179
183
|
Returns:
|
180
184
|
Graph after SNC.
|
@@ -185,13 +189,15 @@ class FrameworkImplementation(ABC):
|
|
185
189
|
@abstractmethod
|
186
190
|
def compute_activation_bias_correction(self,
|
187
191
|
graph: Graph,
|
188
|
-
quant_config: QuantizationConfig
|
192
|
+
quant_config: QuantizationConfig,
|
193
|
+
fw_info: FrameworkInfo) -> Graph:
|
189
194
|
"""
|
190
195
|
Compute activation bias correction on a graph.
|
191
196
|
|
192
197
|
Args:
|
193
198
|
graph: Graph to apply activation bias correction on.
|
194
199
|
quant_config: QuantizationConfig of how the model should be quantized.
|
200
|
+
fw_info: FrameworkInfo object with information about the specific framework's model.
|
195
201
|
|
196
202
|
Returns:
|
197
203
|
Graph after activation bias correction computing.
|
@@ -201,28 +207,30 @@ class FrameworkImplementation(ABC):
|
|
201
207
|
|
202
208
|
@abstractmethod
|
203
209
|
def get_substitutions_channel_equalization(self,
|
204
|
-
quant_config: QuantizationConfig
|
210
|
+
quant_config: QuantizationConfig,
|
211
|
+
fw_info: FrameworkInfo) -> List[common.BaseSubstitution]:
|
205
212
|
"""
|
206
213
|
Return a list of the framework substitutions used for channel equalization.
|
207
214
|
|
208
215
|
Args:
|
209
216
|
quant_config: QuantizationConfig to determine which substitutions to return.
|
217
|
+
fw_info: FrameworkInfo object with information about the specific framework's model.
|
210
218
|
|
211
219
|
Returns:
|
212
220
|
A list of the framework substitutions used after we collect statistics.
|
213
221
|
"""
|
214
222
|
raise NotImplementedError(f'{self.__class__.__name__} has to implement the '
|
215
|
-
|
223
|
+
f'framework\'s get_substitutions_channel_equalization method.') # pragma: no cover
|
216
224
|
|
217
225
|
@abstractmethod
|
218
|
-
def get_substitutions_prepare_graph(self) -> List[common.BaseSubstitution]:
|
226
|
+
def get_substitutions_prepare_graph(self, fw_info: FrameworkInfo = None) -> List[common.BaseSubstitution]:
|
219
227
|
"""
|
220
228
|
|
221
229
|
Returns: A list of the framework substitutions used to prepare the graph.
|
222
230
|
|
223
231
|
"""
|
224
232
|
raise NotImplementedError(f'{self.__class__.__name__} has to implement the '
|
225
|
-
|
233
|
+
f'framework\'s get_substitutions_prepare_graph method.') # pragma: no cover
|
226
234
|
|
227
235
|
@abstractmethod
|
228
236
|
def get_substitutions_pre_statistics_collection(self, quant_config: QuantizationConfig) -> \
|
@@ -320,12 +328,14 @@ class FrameworkImplementation(ABC):
|
|
320
328
|
f'method.') # pragma: no cover
|
321
329
|
|
322
330
|
def get_node_prior_info(self, node: BaseNode,
|
331
|
+
fw_info: FrameworkInfo,
|
323
332
|
graph: Graph) -> NodePriorInfo:
|
324
333
|
"""
|
325
334
|
Get a NodePriorInfo object for a node.
|
326
335
|
|
327
336
|
Args:
|
328
337
|
node: Node to get its prior info.
|
338
|
+
fw_info: Framework specific information needed to create the prior info of the node.
|
329
339
|
graph: Graph to check the next node type.
|
330
340
|
|
331
341
|
Returns:
|
@@ -333,7 +343,7 @@ class FrameworkImplementation(ABC):
|
|
333
343
|
"""
|
334
344
|
|
335
345
|
raise NotImplementedError(f'{self.__class__.__name__} has to implement the '
|
336
|
-
|
346
|
+
f'framework\'s get_node_prior_info method.') # pragma: no cover
|
337
347
|
|
338
348
|
def count_node_for_mixed_precision_interest_points(self, node: BaseNode) -> bool:
|
339
349
|
"""
|
@@ -384,18 +394,20 @@ class FrameworkImplementation(ABC):
|
|
384
394
|
|
385
395
|
@abstractmethod
|
386
396
|
def get_node_mac_operations(self,
|
387
|
-
node: BaseNode
|
397
|
+
node: BaseNode,
|
398
|
+
fw_info: FrameworkInfo) -> float:
|
388
399
|
"""
|
389
400
|
Gets the MAC operation count for a given operation.
|
390
401
|
|
391
402
|
Args:
|
392
403
|
node: A graph node that wraps the operation for which the MAC count is computed.
|
404
|
+
fw_info: FrameworkInfo object with information about the specific framework's model.
|
393
405
|
|
394
406
|
Returns: The MAC count of the operation
|
395
407
|
"""
|
396
408
|
|
397
409
|
raise NotImplementedError(f'{self.__class__.__name__} has to implement the '
|
398
|
-
|
410
|
+
f'framework\'s get_node_mac_operations method.') # pragma: no cover
|
399
411
|
|
400
412
|
@abstractmethod
|
401
413
|
def apply_second_moment_correction(self,
|
@@ -0,0 +1,150 @@
|
|
1
|
+
# Copyright 2021 Sony Semiconductor Israel, Inc. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
|
16
|
+
|
17
|
+
from collections.abc import Callable
|
18
|
+
from enum import Enum
|
19
|
+
from typing import Dict, Any, List
|
20
|
+
|
21
|
+
from mct_quantizers import QuantizationMethod
|
22
|
+
from model_compression_toolkit.defaultdict import DefaultDict
|
23
|
+
|
24
|
+
|
25
|
+
# Default value to use for ops without kernel.
|
26
|
+
# This is a weird default, but it's used all over the place, so for now only extract it to const so that it can be
|
27
|
+
# referenced by variable instead of hard-coded.
|
28
|
+
DEFAULT_KERNEL_ATTRIBUTES = [None]
|
29
|
+
|
30
|
+
|
31
|
+
class ChannelAxis(Enum):
|
32
|
+
"""
|
33
|
+
|
34
|
+
Index of output channels axis:
|
35
|
+
|
36
|
+
NHWC - Output channels index is last.
|
37
|
+
|
38
|
+
NCHW - Output channels index is 1.
|
39
|
+
|
40
|
+
"""
|
41
|
+
NHWC = -1
|
42
|
+
NCHW = 1
|
43
|
+
|
44
|
+
|
45
|
+
class FrameworkInfo:
|
46
|
+
|
47
|
+
def __init__(self,
|
48
|
+
activation_quantizer_mapping: Dict[QuantizationMethod, Callable],
|
49
|
+
kernel_channels_mapping: DefaultDict,
|
50
|
+
activation_min_max_mapping: Dict[str, tuple],
|
51
|
+
layer_min_max_mapping: Dict[Any, tuple],
|
52
|
+
kernel_ops_attributes_mapping: DefaultDict,
|
53
|
+
out_channel_axis_mapping: DefaultDict):
|
54
|
+
"""
|
55
|
+
A class to wrap all information about a specific framework the library needs to quantize a model.
|
56
|
+
Specifically, FrameworkInfo holds lists of layers by how they should be quantized, and multiple mappings such as
|
57
|
+
layer to it kernel channels indices, and a layer to its min/max values, etc.
|
58
|
+
The layers lists are divided into three groups:
|
59
|
+
kernel_ops: Layers that have coefficients and need to get quantized (e.g., Conv2D, Dense, etc.)
|
60
|
+
activation_ops: Layers that their outputs should get quantized (e.g., Add, ReLU, etc.)
|
61
|
+
no_quantization_ops:Layers that should not get quantized (e.g., Reshape, Transpose, etc.)
|
62
|
+
|
63
|
+
Args:
|
64
|
+
activation_quantizer_mapping (Dict[QuantizationMethod, Callable]): A dictionary mapping from QuantizationMethod to a quantization function.
|
65
|
+
kernel_channels_mapping (DefaultDict): Dictionary from a layer to a tuple of its kernel in/out channels indices.
|
66
|
+
activation_min_max_mapping (Dict[str, tuple]): Dictionary from an activation function to its min/max output values.
|
67
|
+
layer_min_max_mapping (Dict[Any, tuple]): Dictionary from a layer to its min/max output values.
|
68
|
+
kernel_ops_attributes_mapping (DefaultDict): Dictionary from a framework operator to a list of its weights attirbutes to quantize.
|
69
|
+
out_channel_axis_mapping (DefaultDict): Dictionary of output channels of the model's layers (for computing statistics per-channel).
|
70
|
+
|
71
|
+
Examples:
|
72
|
+
When quantizing a Keras model, if we want to quantize the kernels of Conv2D layers only, we can
|
73
|
+
set, and we know it's kernel out/in channel indices are (3, 2) respectivly:
|
74
|
+
|
75
|
+
>>> import tensorflow as tf
|
76
|
+
>>> kernel_ops = [tf.keras.layers.Conv2D]
|
77
|
+
>>> kernel_channels_mapping = DefaultDict({tf.keras.layers.Conv2D: (3,2)})
|
78
|
+
|
79
|
+
Then, we can create a FrameworkInfo object:
|
80
|
+
|
81
|
+
>>> FrameworkInfo(kernel_channels_mapping, {}, {})
|
82
|
+
|
83
|
+
If an activation layer (tf.keras.layers.Activation) should be quantized and we know it's min/max outputs range in advanced, we can add it to activation_min_max_mapping for saving the statistics collection time. For example:
|
84
|
+
|
85
|
+
>>> activation_min_max_mapping = {'softmax': (0, 1)}
|
86
|
+
>>> FrameworkInfo(kernel_channels_mapping, activation_min_max_mapping, {})
|
87
|
+
|
88
|
+
If a layer's activations should be quantized and we know it's min/max outputs range in advanced, we can add it to layer_min_max_mapping for saving the statistics collection time. For example:
|
89
|
+
|
90
|
+
>>> layer_min_max_mapping = {tf.keras.layers.Softmax: (0, 1)}
|
91
|
+
>>> FrameworkInfo(kernel_channels_mapping, activation_min_max_mapping, layer_min_max_mapping)
|
92
|
+
|
93
|
+
"""
|
94
|
+
|
95
|
+
self.activation_quantizer_mapping = activation_quantizer_mapping
|
96
|
+
self.kernel_channels_mapping = kernel_channels_mapping
|
97
|
+
self.activation_min_max_mapping = activation_min_max_mapping
|
98
|
+
self.layer_min_max_mapping = layer_min_max_mapping
|
99
|
+
self.kernel_ops_attributes_mapping = kernel_ops_attributes_mapping
|
100
|
+
self.out_channel_axis_mapping = out_channel_axis_mapping
|
101
|
+
|
102
|
+
def get_kernel_op_attributes(self, node_type: Any) -> List[str]:
|
103
|
+
"""
|
104
|
+
Get a list of attributes of a layer's weights to quantize.
|
105
|
+
|
106
|
+
Args:
|
107
|
+
node_type: Layer to get its attributes.
|
108
|
+
|
109
|
+
Returns:
|
110
|
+
A list of attributes the layer has and should be quantized.
|
111
|
+
"""
|
112
|
+
attr_list = self.kernel_ops_attributes_mapping.get(node_type)
|
113
|
+
return attr_list
|
114
|
+
|
115
|
+
def is_kernel_op(self, node_type: Any) -> bool:
|
116
|
+
"""
|
117
|
+
Check is the node is a kernel operation.
|
118
|
+
|
119
|
+
Args:
|
120
|
+
node_type: Layer to get its attributes.
|
121
|
+
|
122
|
+
Returns:
|
123
|
+
True if node type is a kernel operation, else False.
|
124
|
+
"""
|
125
|
+
return node_type in self.kernel_ops_attributes_mapping.keys()
|
126
|
+
|
127
|
+
def layers_has_min_max(self, layer: Any) -> bool:
|
128
|
+
"""
|
129
|
+
Check if a layer is in a layer to min/max mapping the FrameworkInfo holds.
|
130
|
+
Args:
|
131
|
+
layer: A layer to check if has a min/max known values.
|
132
|
+
|
133
|
+
Returns:
|
134
|
+
Whether a layer has a min/max known values or not.
|
135
|
+
"""
|
136
|
+
|
137
|
+
return layer in self.layer_min_max_mapping
|
138
|
+
|
139
|
+
def activation_has_min_max(self, activation_name: str) -> bool:
|
140
|
+
"""
|
141
|
+
Check if an activation layer has a min/max mapping.
|
142
|
+
|
143
|
+
Args:
|
144
|
+
activation_name: String of the activation function to check for its min/max values.
|
145
|
+
|
146
|
+
Returns:
|
147
|
+
Whether an activation layer has a min/max known values or not.
|
148
|
+
"""
|
149
|
+
|
150
|
+
return activation_name in self.activation_min_max_mapping
|