mct-nightly 2.2.0.20240915.556__tar.gz → 2.2.0.20240917.426__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/PKG-INFO +1 -1
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/mct_nightly.egg-info/PKG-INFO +1 -1
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/mct_nightly.egg-info/SOURCES.txt +4 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/__init__.py +1 -1
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +67 -1
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +6 -0
- mct-nightly-2.2.0.20240917.426/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/__init__.py +16 -0
- mct-nightly-2.2.0.20240917.426/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py +258 -0
- mct-nightly-2.2.0.20240917.426/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_keras.py +133 -0
- mct-nightly-2.2.0.20240917.426/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_pytorch.py +113 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/LICENSE.md +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/README.md +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/mct_nightly.egg-info/dependency_links.txt +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/mct_nightly.egg-info/requires.txt +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/mct_nightly.egg-info/top_level.txt +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/constants.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/analyzer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/framework_info.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/fusion/graph_fuser.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/edge.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/hessian/hessian_scores_calculator.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/hessian/hessian_scores_request.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/matchers/function.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/memory_computation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/model_collector.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/model_validation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/bit_width_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/user_info.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/sigmoid_mul_to_swish.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/hessian/hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/hessian/weights_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/reader/common.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/hessian/hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/hessian/weights_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/pytorch/utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/core/runner.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/common/constants.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/common/enums.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/keras/image_operations.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/keras/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/pytorch/image_operations.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/pytorch/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/defaultdict.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/common/gptq_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/common/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/gptq/runner.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/logger.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/metadata.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/ptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/ptq/runner.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/common/qat_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_weight_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/common/training_method.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/verify_packages.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/common/constants.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/common/core_report_generator.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/common/framework_report_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/common/model_folding_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/common/similarity_calculator.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/common/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/common/xquant_config.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/keras/keras_report_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/setup.cfg +0 -0
- {mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/setup.py +0 -0
{mct-nightly-2.2.0.20240915.556 → mct-nightly-2.2.0.20240917.426}/mct_nightly.egg-info/SOURCES.txt
RENAMED
@@ -479,6 +479,10 @@ model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_
|
|
479
479
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py
|
480
480
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py
|
481
481
|
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py
|
482
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/__init__.py
|
483
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py
|
484
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_keras.py
|
485
|
+
model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_pytorch.py
|
482
486
|
model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py
|
483
487
|
model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py
|
484
488
|
model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py
|
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
|
|
27
27
|
from model_compression_toolkit import pruning
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
29
29
|
|
30
|
-
__version__ = "2.2.0.
|
30
|
+
__version__ = "2.2.0.20240917.000426"
|
@@ -79,6 +79,72 @@ def set_quantization_configuration_to_graph(graph: Graph,
|
|
79
79
|
return graph
|
80
80
|
|
81
81
|
|
82
|
+
def filter_node_qco_by_graph(node: BaseNode,
|
83
|
+
tpc: TargetPlatformCapabilities,
|
84
|
+
graph: Graph,
|
85
|
+
node_qc_options: QuantizationConfigOptions
|
86
|
+
) -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig]]:
|
87
|
+
"""
|
88
|
+
Filter quantization config options that don't match the graph.
|
89
|
+
A node may have several quantization config options with 'activation_n_bits' values, and
|
90
|
+
the next nodes in the graph may support different bit-width as input activation. This function
|
91
|
+
filters out quantization config that don't comply to these attributes.
|
92
|
+
|
93
|
+
Args:
|
94
|
+
node: Node for filtering.
|
95
|
+
tpc: TPC to extract the QuantizationConfigOptions for the next nodes.
|
96
|
+
graph: Graph object.
|
97
|
+
node_qc_options: Node's QuantizationConfigOptions.
|
98
|
+
|
99
|
+
Returns:
|
100
|
+
A base config (OpQuantizationConfig) and a config options list (list of OpQuantizationConfig)
|
101
|
+
that are compatible with next nodes supported input bit-widths.
|
102
|
+
|
103
|
+
"""
|
104
|
+
# Filter quantization config options that don't match the graph.
|
105
|
+
_base_config = node_qc_options.base_config
|
106
|
+
_node_qc_options = node_qc_options.quantization_config_list
|
107
|
+
|
108
|
+
# Build next_nodes list by appending to the node's next nodes list all nodes that are quantization preserving.
|
109
|
+
_next_nodes = graph.get_next_nodes(node)
|
110
|
+
next_nodes = []
|
111
|
+
while len(_next_nodes):
|
112
|
+
n = _next_nodes.pop(0)
|
113
|
+
qco = n.get_qco(tpc)
|
114
|
+
qp = [qc.quantization_preserving for qc in qco.quantization_config_list]
|
115
|
+
if not all(qp) and any(qp):
|
116
|
+
Logger.error(f'Attribute "quantization_preserving" should be the same for all QuantizaionConfigOptions in {n}.')
|
117
|
+
if qp[0]:
|
118
|
+
_next_nodes.extend(graph.get_next_nodes(n))
|
119
|
+
next_nodes.append(n)
|
120
|
+
|
121
|
+
if len(next_nodes):
|
122
|
+
next_nodes_qc_options = [_node.get_qco(tpc) for _node in next_nodes]
|
123
|
+
next_nodes_supported_input_bitwidth = min([op_cfg.max_input_activation_n_bits
|
124
|
+
for qc_opts in next_nodes_qc_options
|
125
|
+
for op_cfg in qc_opts.quantization_config_list])
|
126
|
+
|
127
|
+
# Filter node's QC options that match next nodes input bit-width.
|
128
|
+
_node_qc_options = [_option for _option in _node_qc_options
|
129
|
+
if _option.activation_n_bits <= next_nodes_supported_input_bitwidth]
|
130
|
+
if len(_node_qc_options) == 0:
|
131
|
+
Logger.critical(f"Graph doesn't match TPC bit configurations: {node} -> {next_nodes}.")
|
132
|
+
|
133
|
+
# Verify base config match
|
134
|
+
if any([node_qc_options.base_config.activation_n_bits > qc_opt.base_config.max_input_activation_n_bits
|
135
|
+
for qc_opt in next_nodes_qc_options]):
|
136
|
+
# base_config activation bits doesn't match next node supported input bit-width -> replace with
|
137
|
+
# a qco from quantization_config_list with maximum activation bit-width.
|
138
|
+
if len(_node_qc_options) > 0:
|
139
|
+
output_act_bitwidth = {qco.activation_n_bits: i for i, qco in enumerate(_node_qc_options)}
|
140
|
+
_base_config = _node_qc_options[output_act_bitwidth[max(output_act_bitwidth)]]
|
141
|
+
Logger.warning(f"Node {node} base quantization config changed to match Graph and TPC configuration.\nCause: {node} -> {next_nodes}.")
|
142
|
+
else:
|
143
|
+
Logger.critical(f"Graph doesn't match TPC bit configurations: {node} -> {next_nodes}.") # pragma: no cover
|
144
|
+
|
145
|
+
return _base_config, _node_qc_options
|
146
|
+
|
147
|
+
|
82
148
|
def set_quantization_configs_to_node(node: BaseNode,
|
83
149
|
graph: Graph,
|
84
150
|
quant_config: QuantizationConfig,
|
@@ -99,7 +165,7 @@ def set_quantization_configs_to_node(node: BaseNode,
|
|
99
165
|
manual_bit_width_override (Optional[int]): Specifies a custom bit-width to override the node's activation bit-width. Defaults to None.
|
100
166
|
"""
|
101
167
|
node_qc_options = node.get_qco(tpc)
|
102
|
-
base_config, node_qc_options_list =
|
168
|
+
base_config, node_qc_options_list = filter_node_qco_by_graph(node, tpc, graph, node_qc_options)
|
103
169
|
|
104
170
|
# If a manual_bit_width_override is given, filter node_qc_options_list to retain only the options with activation bits equal to manual_bit_width_override,
|
105
171
|
# and update base_config accordingly.
|
@@ -42,6 +42,8 @@ def get_tpc_dict_by_fw(fw_name):
|
|
42
42
|
get_keras_tpc as get_keras_tpc_v3
|
43
43
|
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v3_lut.tpc_keras import \
|
44
44
|
get_keras_tpc as get_keras_tpc_v3_lut
|
45
|
+
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v4.tpc_keras import \
|
46
|
+
get_keras_tpc as get_keras_tpc_v4
|
45
47
|
|
46
48
|
# Keras: TPC versioning
|
47
49
|
tpc_models_dict = {'v1': get_keras_tpc_v1,
|
@@ -51,6 +53,7 @@ def get_tpc_dict_by_fw(fw_name):
|
|
51
53
|
'v2_lut': get_keras_tpc_v2_lut,
|
52
54
|
'v3': get_keras_tpc_v3,
|
53
55
|
'v3_lut': get_keras_tpc_v3_lut,
|
56
|
+
'v4': get_keras_tpc_v4,
|
54
57
|
LATEST: get_keras_tpc_latest}
|
55
58
|
elif fw_name == PYTORCH:
|
56
59
|
###############################
|
@@ -73,6 +76,8 @@ def get_tpc_dict_by_fw(fw_name):
|
|
73
76
|
get_pytorch_tpc as get_pytorch_tpc_v3
|
74
77
|
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v3_lut.tpc_pytorch import \
|
75
78
|
get_pytorch_tpc as get_pytorch_tpc_v3_lut
|
79
|
+
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v4.tpc_pytorch import \
|
80
|
+
get_pytorch_tpc as get_pytorch_tpc_v4
|
76
81
|
|
77
82
|
# Pytorch: TPC versioning
|
78
83
|
tpc_models_dict = {'v1': get_pytorch_tpc_v1,
|
@@ -82,6 +87,7 @@ def get_tpc_dict_by_fw(fw_name):
|
|
82
87
|
'v2_lut': get_pytorch_tpc_v2_lut,
|
83
88
|
'v3': get_pytorch_tpc_v3,
|
84
89
|
'v3_lut': get_pytorch_tpc_v3_lut,
|
90
|
+
'v4': get_pytorch_tpc_v4,
|
85
91
|
LATEST: get_pytorch_tpc_latest}
|
86
92
|
if tpc_models_dict is not None:
|
87
93
|
return tpc_models_dict
|
@@ -0,0 +1,16 @@
|
|
1
|
+
# Copyright 2024 Sony Semiconductor Israel, Inc. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
|
16
|
+
__version__ = 'v4'
|
@@ -0,0 +1,258 @@
|
|
1
|
+
# Copyright 2024 Sony Semiconductor Israel, Inc. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
from typing import List, Tuple
|
16
|
+
|
17
|
+
import model_compression_toolkit as mct
|
18
|
+
from model_compression_toolkit.constants import FLOAT_BITWIDTH
|
19
|
+
from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS
|
20
|
+
from model_compression_toolkit.target_platform_capabilities.target_platform import OpQuantizationConfig, \
|
21
|
+
TargetPlatformModel, Signedness
|
22
|
+
from model_compression_toolkit.target_platform_capabilities.target_platform.op_quantization_config import \
|
23
|
+
AttributeQuantizationConfig
|
24
|
+
|
25
|
+
tp = mct.target_platform
|
26
|
+
|
27
|
+
|
28
|
+
def get_tp_model() -> TargetPlatformModel:
|
29
|
+
"""
|
30
|
+
A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
|
31
|
+
bits configuration list for mixed-precision quantization.
|
32
|
+
NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets
|
33
|
+
(for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
|
34
|
+
'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
|
35
|
+
This version enables metadata by default.
|
36
|
+
|
37
|
+
Returns: A TargetPlatformModel object.
|
38
|
+
|
39
|
+
"""
|
40
|
+
base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
|
41
|
+
return generate_tp_model(default_config=default_config,
|
42
|
+
base_config=base_config,
|
43
|
+
mixed_precision_cfg_list=mixed_precision_cfg_list,
|
44
|
+
name='imx500_tp_model')
|
45
|
+
|
46
|
+
|
47
|
+
def get_op_quantization_configs() -> \
|
48
|
+
Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
|
49
|
+
"""
|
50
|
+
Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformModel.
|
51
|
+
In addition, creates a default configuration objects list (with 8, 4 and 2 bit quantization) to be used as
|
52
|
+
default configuration for mixed-precision quantization.
|
53
|
+
|
54
|
+
Returns: An OpQuantizationConfig config object and a list of OpQuantizationConfig objects.
|
55
|
+
|
56
|
+
"""
|
57
|
+
|
58
|
+
# TODO: currently, we don't want to quantize any attribute but the kernel by default,
|
59
|
+
# to preserve the current behavior of MCT, so quantization is disabled for all other attributes.
|
60
|
+
# Other quantization parameters are set to what we eventually want to quantize by default
|
61
|
+
# when we enable multi-attributes quantization - THIS NEED TO BE MODIFIED IN ALL TP MODELS!
|
62
|
+
|
63
|
+
# define a default quantization config for all non-specified weights attributes.
|
64
|
+
default_weight_attr_config = AttributeQuantizationConfig(
|
65
|
+
weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
|
66
|
+
weights_n_bits=8,
|
67
|
+
weights_per_channel_threshold=False,
|
68
|
+
enable_weights_quantization=False, # TODO: this will changed to True once implementing multi-attributes quantization
|
69
|
+
lut_values_bitwidth=None)
|
70
|
+
|
71
|
+
# define a quantization config to quantize the kernel (for layers where there is a kernel attribute).
|
72
|
+
kernel_base_config = AttributeQuantizationConfig(
|
73
|
+
weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
|
74
|
+
weights_n_bits=8,
|
75
|
+
weights_per_channel_threshold=True,
|
76
|
+
enable_weights_quantization=True,
|
77
|
+
lut_values_bitwidth=None)
|
78
|
+
|
79
|
+
# define a quantization config to quantize the bias (for layers where there is a bias attribute).
|
80
|
+
bias_config = AttributeQuantizationConfig(
|
81
|
+
weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
|
82
|
+
weights_n_bits=FLOAT_BITWIDTH,
|
83
|
+
weights_per_channel_threshold=False,
|
84
|
+
enable_weights_quantization=False,
|
85
|
+
lut_values_bitwidth=None)
|
86
|
+
|
87
|
+
# Create a quantization config.
|
88
|
+
# A quantization configuration defines how an operator
|
89
|
+
# should be quantized on the modeled hardware:
|
90
|
+
|
91
|
+
# We define a default config for operation without kernel attribute.
|
92
|
+
# This is the default config that should be used for non-linear operations.
|
93
|
+
eight_bits_default = tp.OpQuantizationConfig(
|
94
|
+
default_weight_attr_config=default_weight_attr_config,
|
95
|
+
attr_weights_configs_mapping={},
|
96
|
+
activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
|
97
|
+
activation_n_bits=8,
|
98
|
+
supported_input_activation_n_bits=8,
|
99
|
+
enable_activation_quantization=True,
|
100
|
+
quantization_preserving=False,
|
101
|
+
fixed_scale=None,
|
102
|
+
fixed_zero_point=None,
|
103
|
+
simd_size=32,
|
104
|
+
signedness=Signedness.AUTO)
|
105
|
+
|
106
|
+
# We define an 8-bit config for linear operations quantization, that include a kernel and bias attributes.
|
107
|
+
linear_eight_bits = tp.OpQuantizationConfig(
|
108
|
+
default_weight_attr_config=default_weight_attr_config,
|
109
|
+
attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config},
|
110
|
+
activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
|
111
|
+
activation_n_bits=8,
|
112
|
+
supported_input_activation_n_bits=8,
|
113
|
+
enable_activation_quantization=True,
|
114
|
+
quantization_preserving=False,
|
115
|
+
fixed_scale=None,
|
116
|
+
fixed_zero_point=None,
|
117
|
+
simd_size=32,
|
118
|
+
signedness=Signedness.AUTO)
|
119
|
+
|
120
|
+
# To quantize a model using mixed-precision, create
|
121
|
+
# a list with more than one OpQuantizationConfig.
|
122
|
+
# In this example, we quantize some operations' weights
|
123
|
+
# using 2, 4 or 8 bits, and when using 2 or 4 bits, it's possible
|
124
|
+
# to quantize the operations' activations using LUT.
|
125
|
+
four_bits = linear_eight_bits.clone_and_edit(attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: 4}},
|
126
|
+
simd_size=linear_eight_bits.simd_size * 2)
|
127
|
+
two_bits = linear_eight_bits.clone_and_edit(attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: 2}},
|
128
|
+
simd_size=linear_eight_bits.simd_size * 4)
|
129
|
+
|
130
|
+
mixed_precision_cfg_list = [linear_eight_bits, four_bits, two_bits]
|
131
|
+
|
132
|
+
return linear_eight_bits, mixed_precision_cfg_list, eight_bits_default
|
133
|
+
|
134
|
+
|
135
|
+
def generate_tp_model(default_config: OpQuantizationConfig,
|
136
|
+
base_config: OpQuantizationConfig,
|
137
|
+
mixed_precision_cfg_list: List[OpQuantizationConfig],
|
138
|
+
name: str) -> TargetPlatformModel:
|
139
|
+
"""
|
140
|
+
Generates TargetPlatformModel with default defined Operators Sets, based on the given base configuration and
|
141
|
+
mixed-precision configurations options list.
|
142
|
+
|
143
|
+
Args
|
144
|
+
default_config: A default OpQuantizationConfig to set as the TP model default configuration.
|
145
|
+
base_config: An OpQuantizationConfig to set as the TargetPlatformModel base configuration for mixed-precision purposes only.
|
146
|
+
mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
|
147
|
+
quantization configuration options.
|
148
|
+
name: The name of the TargetPlatformModel.
|
149
|
+
|
150
|
+
Returns: A TargetPlatformModel object.
|
151
|
+
|
152
|
+
"""
|
153
|
+
# Create a QuantizationConfigOptions, which defines a set
|
154
|
+
# of possible configurations to consider when quantizing a set of operations (in mixed-precision, for example).
|
155
|
+
# If the QuantizationConfigOptions contains only one configuration,
|
156
|
+
# this configuration will be used for the operation quantization:
|
157
|
+
default_configuration_options = tp.QuantizationConfigOptions([default_config])
|
158
|
+
|
159
|
+
# Create a QuantizationConfigOptions for quantizing constants in functional ops.
|
160
|
+
# Constant configuration is similar to the default eight bit configuration except for PoT
|
161
|
+
# quantization method for the constant.
|
162
|
+
# Since the constants are not named attributes of the layer, we use the default_weight_attr_config to
|
163
|
+
# define the desired quantization properties for them.
|
164
|
+
const_config = default_config.clone_and_edit(
|
165
|
+
default_weight_attr_config=default_config.default_weight_attr_config.clone_and_edit(
|
166
|
+
enable_weights_quantization=True, weights_per_channel_threshold=True,
|
167
|
+
weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO))
|
168
|
+
const_configuration_options = tp.QuantizationConfigOptions([const_config])
|
169
|
+
|
170
|
+
# 16 bits inputs and outputs. Currently, only defined for consts since they are used in operators that
|
171
|
+
# support 16 bit as input and output.
|
172
|
+
const_config_input16 = const_config.clone_and_edit(
|
173
|
+
supported_input_activation_n_bits=(8, 16))
|
174
|
+
const_config_input16_output16 = const_config_input16.clone_and_edit(
|
175
|
+
activation_n_bits=16, signedness=Signedness.SIGNED)
|
176
|
+
const_configuration_options_inout16 = tp.QuantizationConfigOptions([const_config_input16_output16,
|
177
|
+
const_config_input16],
|
178
|
+
base_config=const_config_input16)
|
179
|
+
|
180
|
+
const_config_input16_per_tensor = const_config.clone_and_edit(
|
181
|
+
supported_input_activation_n_bits=(8, 16),
|
182
|
+
default_weight_attr_config=default_config.default_weight_attr_config.clone_and_edit(
|
183
|
+
enable_weights_quantization=True, weights_per_channel_threshold=True,
|
184
|
+
weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO)
|
185
|
+
)
|
186
|
+
const_config_input16_output16_per_tensor = const_config_input16_per_tensor.clone_and_edit(
|
187
|
+
activation_n_bits=16, signedness=Signedness.SIGNED)
|
188
|
+
const_configuration_options_inout16_per_tensor = tp.QuantizationConfigOptions([const_config_input16_output16_per_tensor,
|
189
|
+
const_config_input16_per_tensor],
|
190
|
+
base_config=const_config_input16_per_tensor)
|
191
|
+
|
192
|
+
# Create a TargetPlatformModel and set its default quantization config.
|
193
|
+
# This default configuration will be used for all operations
|
194
|
+
# unless specified otherwise (see OperatorsSet, for example):
|
195
|
+
generated_tpm = tp.TargetPlatformModel(default_configuration_options, add_metadata=True, name=name)
|
196
|
+
|
197
|
+
# To start defining the model's components (such as operator sets, and fusing patterns),
|
198
|
+
# use 'with' the TargetPlatformModel instance, and create them as below:
|
199
|
+
with generated_tpm:
|
200
|
+
# Create an OperatorsSet to represent a set of operations.
|
201
|
+
# Each OperatorsSet has a unique label.
|
202
|
+
# If a quantization configuration options is passed, these options will
|
203
|
+
# be used for operations that will be attached to this set's label.
|
204
|
+
# Otherwise, it will be a configure-less set (used in fusing):
|
205
|
+
|
206
|
+
generated_tpm.set_simd_padding(is_simd_padding=True)
|
207
|
+
|
208
|
+
# May suit for operations like: Dropout, Reshape, etc.
|
209
|
+
default_qco = tp.get_default_quantization_config_options()
|
210
|
+
tp.OperatorsSet("NoQuantization",
|
211
|
+
default_qco.clone_and_edit(enable_activation_quantization=False)
|
212
|
+
.clone_and_edit_weight_attribute(enable_weights_quantization=False))
|
213
|
+
tp.OperatorsSet("QuantizationPreserving",
|
214
|
+
default_qco.clone_and_edit(enable_activation_quantization=False,
|
215
|
+
quantization_preserving=True)
|
216
|
+
.clone_and_edit_weight_attribute(enable_weights_quantization=False))
|
217
|
+
tp.OperatorsSet("DimensionManipulationOps",
|
218
|
+
default_qco.clone_and_edit(enable_activation_quantization=False,
|
219
|
+
quantization_preserving=True,
|
220
|
+
supported_input_activation_n_bits=(8, 16))
|
221
|
+
.clone_and_edit_weight_attribute(enable_weights_quantization=False))
|
222
|
+
tp.OperatorsSet("MergeOps", const_configuration_options_inout16_per_tensor)
|
223
|
+
|
224
|
+
# Create Mixed-Precision quantization configuration options from the given list of OpQuantizationConfig objects
|
225
|
+
mixed_precision_configuration_options = tp.QuantizationConfigOptions(mixed_precision_cfg_list,
|
226
|
+
base_config=base_config)
|
227
|
+
|
228
|
+
# Define operator sets that use mixed_precision_configuration_options:
|
229
|
+
conv = tp.OperatorsSet("Conv", mixed_precision_configuration_options)
|
230
|
+
fc = tp.OperatorsSet("FullyConnected", mixed_precision_configuration_options)
|
231
|
+
|
232
|
+
# Define operations sets without quantization configuration
|
233
|
+
# options (useful for creating fusing patterns, for example):
|
234
|
+
any_relu = tp.OperatorsSet("AnyReLU")
|
235
|
+
add = tp.OperatorsSet("Add", const_configuration_options_inout16)
|
236
|
+
sub = tp.OperatorsSet("Sub", const_configuration_options_inout16)
|
237
|
+
mul = tp.OperatorsSet("Mul", const_configuration_options_inout16)
|
238
|
+
div = tp.OperatorsSet("Div", const_configuration_options)
|
239
|
+
prelu = tp.OperatorsSet("PReLU")
|
240
|
+
swish = tp.OperatorsSet("Swish")
|
241
|
+
sigmoid = tp.OperatorsSet("Sigmoid")
|
242
|
+
tanh = tp.OperatorsSet("Tanh")
|
243
|
+
|
244
|
+
# Combine multiple operators into a single operator to avoid quantization between
|
245
|
+
# them. To do this we define fusing patterns using the OperatorsSets that were created.
|
246
|
+
# To group multiple sets with regard to fusing, an OperatorSetConcat can be created
|
247
|
+
activations_after_conv_to_fuse = tp.OperatorSetConcat(any_relu, swish, prelu, sigmoid, tanh)
|
248
|
+
activations_after_fc_to_fuse = tp.OperatorSetConcat(any_relu, swish, sigmoid)
|
249
|
+
any_binary = tp.OperatorSetConcat(add, sub, mul, div)
|
250
|
+
|
251
|
+
# ------------------- #
|
252
|
+
# Fusions
|
253
|
+
# ------------------- #
|
254
|
+
tp.Fusing([conv, activations_after_conv_to_fuse])
|
255
|
+
tp.Fusing([fc, activations_after_fc_to_fuse])
|
256
|
+
tp.Fusing([any_binary, any_relu])
|
257
|
+
|
258
|
+
return generated_tpm
|
@@ -0,0 +1,133 @@
|
|
1
|
+
# Copyright 2024 Sony Semiconductor Israel, Inc. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
import tensorflow as tf
|
16
|
+
from packaging import version
|
17
|
+
|
18
|
+
from model_compression_toolkit.defaultdict import DefaultDict
|
19
|
+
from model_compression_toolkit.verify_packages import FOUND_SONY_CUSTOM_LAYERS
|
20
|
+
from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, KERAS_DEPTHWISE_KERNEL, \
|
21
|
+
KERAS_KERNEL, BIAS_ATTR, BIAS
|
22
|
+
|
23
|
+
if FOUND_SONY_CUSTOM_LAYERS:
|
24
|
+
from sony_custom_layers.keras.object_detection.ssd_post_process import SSDPostProcess
|
25
|
+
|
26
|
+
if version.parse(tf.__version__) >= version.parse("2.13"):
|
27
|
+
from keras.src.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
|
28
|
+
MaxPooling2D, Activation, ReLU, Add, Subtract, Multiply, PReLU, Flatten, Cropping2D, LeakyReLU, Permute, \
|
29
|
+
Conv2DTranspose, Identity, Concatenate
|
30
|
+
else:
|
31
|
+
from keras.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
|
32
|
+
MaxPooling2D, Activation, ReLU, Add, Subtract, Multiply, PReLU, Flatten, Cropping2D, LeakyReLU, Permute, \
|
33
|
+
Conv2DTranspose, Identity, Concatenate
|
34
|
+
|
35
|
+
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v4.tp_model import get_tp_model
|
36
|
+
import model_compression_toolkit as mct
|
37
|
+
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v4 import __version__ as TPC_VERSION
|
38
|
+
|
39
|
+
tp = mct.target_platform
|
40
|
+
|
41
|
+
|
42
|
+
def get_keras_tpc() -> tp.TargetPlatformCapabilities:
|
43
|
+
"""
|
44
|
+
get a Keras TargetPlatformCapabilities object with default operation sets to layers mapping.
|
45
|
+
|
46
|
+
Returns: a Keras TargetPlatformCapabilities object for the given TargetPlatformModel.
|
47
|
+
"""
|
48
|
+
imx500_tpc_tp_model = get_tp_model()
|
49
|
+
return generate_keras_tpc(name='imx500_tpc_keras_tpc', tp_model=imx500_tpc_tp_model)
|
50
|
+
|
51
|
+
|
52
|
+
def generate_keras_tpc(name: str, tp_model: tp.TargetPlatformModel):
|
53
|
+
"""
|
54
|
+
Generates a TargetPlatformCapabilities object with default operation sets to layers mapping.
|
55
|
+
|
56
|
+
Args:
|
57
|
+
name: Name of the TargetPlatformCapabilities.
|
58
|
+
tp_model: TargetPlatformModel object.
|
59
|
+
|
60
|
+
Returns: a TargetPlatformCapabilities object for the given TargetPlatformModel.
|
61
|
+
"""
|
62
|
+
|
63
|
+
keras_tpc = tp.TargetPlatformCapabilities(tp_model, name=name, version=TPC_VERSION)
|
64
|
+
|
65
|
+
no_quant_list = [tf.quantization.fake_quant_with_min_max_vars,
|
66
|
+
tf.math.argmax,
|
67
|
+
tf.shape,
|
68
|
+
tf.math.equal,
|
69
|
+
tf.nn.top_k,
|
70
|
+
tf.image.combined_non_max_suppression,
|
71
|
+
tf.compat.v1.shape]
|
72
|
+
quantization_preserving = [Cropping2D,
|
73
|
+
ZeroPadding2D,
|
74
|
+
Dropout,
|
75
|
+
MaxPooling2D,
|
76
|
+
tf.split,
|
77
|
+
tf.gather,
|
78
|
+
tf.cast,
|
79
|
+
tf.unstack,
|
80
|
+
tf.compat.v1.gather,
|
81
|
+
tf.__operators__.getitem,
|
82
|
+
tf.strided_slice]
|
83
|
+
quantization_preserving_list_16bit_input = [Reshape,
|
84
|
+
tf.reshape,
|
85
|
+
Permute,
|
86
|
+
tf.transpose,
|
87
|
+
Flatten]
|
88
|
+
|
89
|
+
if FOUND_SONY_CUSTOM_LAYERS:
|
90
|
+
no_quant_list.append(SSDPostProcess)
|
91
|
+
|
92
|
+
with keras_tpc:
|
93
|
+
tp.OperationsSetToLayers("NoQuantization", no_quant_list)
|
94
|
+
tp.OperationsSetToLayers("QuantizationPreserving", quantization_preserving)
|
95
|
+
tp.OperationsSetToLayers("DimensionManipulationOps", quantization_preserving_list_16bit_input)
|
96
|
+
tp.OperationsSetToLayers("MergeOps", [tf.stack, tf.concat, Concatenate])
|
97
|
+
tp.OperationsSetToLayers("Conv",
|
98
|
+
[Conv2D,
|
99
|
+
DepthwiseConv2D,
|
100
|
+
Conv2DTranspose,
|
101
|
+
tf.nn.conv2d,
|
102
|
+
tf.nn.depthwise_conv2d,
|
103
|
+
tf.nn.conv2d_transpose],
|
104
|
+
# we provide attributes mapping that maps each layer type in the operations set
|
105
|
+
# that has weights attributes with provided quantization config (in the tp model) to
|
106
|
+
# its framework-specific attribute name.
|
107
|
+
# note that a DefaultDict should be provided if not all the layer types in the
|
108
|
+
# operation set are provided separately in the mapping.
|
109
|
+
attr_mapping={
|
110
|
+
KERNEL_ATTR: DefaultDict({
|
111
|
+
DepthwiseConv2D: KERAS_DEPTHWISE_KERNEL,
|
112
|
+
tf.nn.depthwise_conv2d: KERAS_DEPTHWISE_KERNEL}, default_value=KERAS_KERNEL),
|
113
|
+
BIAS_ATTR: DefaultDict(default_value=BIAS)})
|
114
|
+
tp.OperationsSetToLayers("FullyConnected", [Dense],
|
115
|
+
attr_mapping={KERNEL_ATTR: DefaultDict(default_value=KERAS_KERNEL),
|
116
|
+
BIAS_ATTR: DefaultDict(default_value=BIAS)})
|
117
|
+
tp.OperationsSetToLayers("AnyReLU", [tf.nn.relu,
|
118
|
+
tf.nn.relu6,
|
119
|
+
tf.nn.leaky_relu,
|
120
|
+
ReLU,
|
121
|
+
LeakyReLU,
|
122
|
+
tp.LayerFilterParams(Activation, activation="relu"),
|
123
|
+
tp.LayerFilterParams(Activation, activation="leaky_relu")])
|
124
|
+
tp.OperationsSetToLayers("Add", [tf.add, Add])
|
125
|
+
tp.OperationsSetToLayers("Sub", [tf.subtract, Subtract])
|
126
|
+
tp.OperationsSetToLayers("Mul", [tf.math.multiply, Multiply])
|
127
|
+
tp.OperationsSetToLayers("Div", [tf.math.divide, tf.math.truediv])
|
128
|
+
tp.OperationsSetToLayers("PReLU", [PReLU])
|
129
|
+
tp.OperationsSetToLayers("Swish", [tf.nn.swish, tp.LayerFilterParams(Activation, activation="swish")])
|
130
|
+
tp.OperationsSetToLayers("Sigmoid", [tf.nn.sigmoid, tp.LayerFilterParams(Activation, activation="sigmoid")])
|
131
|
+
tp.OperationsSetToLayers("Tanh", [tf.nn.tanh, tp.LayerFilterParams(Activation, activation="tanh")])
|
132
|
+
|
133
|
+
return keras_tpc
|