mct-nightly 2.1.0.20240623.439__tar.gz → 2.1.0.20240624.520__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/PKG-INFO +1 -1
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/mct_nightly.egg-info/PKG-INFO +1 -1
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/__init__.py +1 -1
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/functional_node.py +3 -4
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +10 -12
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +11 -4
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +21 -15
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +19 -17
- mct-nightly-2.1.0.20240624.520/model_compression_toolkit/core/pytorch/reader/graph_builders.py +353 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/runner.py +1 -1
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +1 -1
- mct-nightly-2.1.0.20240623.439/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -276
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/LICENSE.md +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/README.md +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/mct_nightly.egg-info/SOURCES.txt +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/mct_nightly.egg-info/dependency_links.txt +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/mct_nightly.egg-info/requires.txt +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/mct_nightly.egg-info/top_level.txt +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/constants.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/analyzer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/framework_info.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/edge.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/hessian/trace_hessian_calculator.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/hessian/trace_hessian_request.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/matchers/function.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/memory_computation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/model_collector.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/model_validation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/user_info.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/constants.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/hessian/activation_trace_hessian_calculator_keras.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/hessian/trace_hessian_calculator_keras.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/hessian/weights_trace_hessian_calculator_keras.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/reader/common.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/constants.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/hessian/activation_trace_hessian_calculator_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/hessian/trace_hessian_calculator_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/hessian/weights_trace_hessian_calculator_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/pytorch/utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/common/constants.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/common/enums.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/defaultdict.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/common/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/common/gptq_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/common/gptq_training.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/gptq_training.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/gptq/runner.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/logger.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/metadata.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/pruning/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/ptq/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/ptq/runner.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/common/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/common/qat_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/pytorch/quantizer/quantizer_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/common/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/common/constants.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/common/core_report_generator.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/common/framework_report_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/common/model_folding_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/common/similarity_calculator.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/common/tensorboard_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/common/xquant_config.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/keras/keras_report_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/setup.cfg +0 -0
- {mct-nightly-2.1.0.20240623.439 → mct-nightly-2.1.0.20240624.520}/setup.py +0 -0
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
|
|
27
27
|
from model_compression_toolkit import pruning
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
29
29
|
|
30
|
-
__version__ = "2.1.0.
|
30
|
+
__version__ = "2.1.0.20240624.000520"
|
@@ -1,4 +1,4 @@
|
|
1
|
-
from typing import Dict, Any, Tuple, Type
|
1
|
+
from typing import Dict, Any, Tuple, Type, List, Union
|
2
2
|
|
3
3
|
from model_compression_toolkit.constants import FOUND_TF
|
4
4
|
from model_compression_toolkit.core.common.graph.base_node import BaseNode
|
@@ -25,7 +25,7 @@ class FunctionalNode(BaseNode):
|
|
25
25
|
functional_op: Any = None,
|
26
26
|
inputs_as_list: bool = False,
|
27
27
|
has_activation: bool = True,
|
28
|
-
tensor_input_allocs = None):
|
28
|
+
tensor_input_allocs: List[Union[int, str]] = None):
|
29
29
|
"""
|
30
30
|
Init a FunctionalNode object.
|
31
31
|
|
@@ -44,8 +44,7 @@ class FunctionalNode(BaseNode):
|
|
44
44
|
functional_op: The op the node implements.
|
45
45
|
inputs_as_list: Whether to pass the node its input tensors as a list or not when calling the layer.
|
46
46
|
has_activation: Whether the node has activations that we might want to quantize.
|
47
|
-
tensor_input_allocs: A list of indices for
|
48
|
-
|
47
|
+
tensor_input_allocs: A list of indices and strings for allocations input tensors in the node's args and kwargs.
|
49
48
|
"""
|
50
49
|
|
51
50
|
super().__init__(name,
|
@@ -106,7 +106,7 @@ def _run_operation(n: BaseNode,
|
|
106
106
|
input_tensors: List,
|
107
107
|
op_func: Any,
|
108
108
|
quantize_node_activation_fn,
|
109
|
-
use_activation_quantization: bool) -> Tuple[
|
109
|
+
use_activation_quantization: bool) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
|
110
110
|
"""
|
111
111
|
Applying the layer (op_func) to the input tensors (input_tensors).
|
112
112
|
If quantized is set to True, and the layer's corresponding node (n) has quantization
|
@@ -126,17 +126,17 @@ def _run_operation(n: BaseNode,
|
|
126
126
|
op_call_args = n.op_call_args if isinstance(n, FunctionalNode) else []
|
127
127
|
functional_kwargs = n.op_call_kwargs if isinstance(n, FunctionalNode) else {}
|
128
128
|
|
129
|
-
|
130
|
-
|
131
|
-
|
129
|
+
# Insert positional weights only when not a quantized functional node, because quantized functional nodes
|
130
|
+
# insert the quantized weights in the wrapper.
|
131
|
+
if isinstance(n, FunctionalNode) and isinstance(op_func, PytorchQuantizationWrapper):
|
132
|
+
_tensor_input_allocs = [i for i in n.tensor_input_allocs if i not in n.weights]
|
133
|
+
else:
|
132
134
|
input_tensors = n.insert_positional_weights_to_input_list(input_tensors)
|
133
135
|
# convert inputs from positional weights (numpy arrays) to tensors. Must handle each element in the
|
134
136
|
# list separately, because in FX the tensors are FX objects and fail to_torch_tensor
|
135
137
|
input_tensors = [to_torch_tensor(t, numpy_type=t.dtype) if isinstance(t, np.ndarray) else t
|
136
138
|
for t in input_tensors]
|
137
139
|
_tensor_input_allocs = None
|
138
|
-
else:
|
139
|
-
_tensor_input_allocs = [i for i in n.tensor_input_allocs if i not in n.weights]
|
140
140
|
|
141
141
|
if isinstance(n, FunctionalNode) and n.inputs_as_list:
|
142
142
|
out_tensors_of_n_float = op_func(input_tensors, *op_call_args, **functional_kwargs)
|
@@ -152,6 +152,8 @@ def _run_operation(n: BaseNode,
|
|
152
152
|
out_tensors_of_n_float = torch.cat(out_tensors_of_n_float, dim=0)
|
153
153
|
out_tensors_of_n = quantize_node_activation_fn(out_tensors_of_n_float)
|
154
154
|
|
155
|
+
if not isinstance(out_tensors_of_n, list):
|
156
|
+
out_tensors_of_n, out_tensors_of_n_float = [out_tensors_of_n], [out_tensors_of_n_float]
|
155
157
|
return out_tensors_of_n, out_tensors_of_n_float
|
156
158
|
|
157
159
|
|
@@ -318,12 +320,8 @@ class PytorchModel(torch.nn.Module):
|
|
318
320
|
quantize_node_activation_fn=activation_quantization_fn,
|
319
321
|
use_activation_quantization=use_activation_quantization)
|
320
322
|
|
321
|
-
|
322
|
-
|
323
|
-
node_to_output_tensors_dict_float.update({node: out_tensors_of_n_float})
|
324
|
-
else:
|
325
|
-
node_to_output_tensors_dict.update({node: [out_tensors_of_n]})
|
326
|
-
node_to_output_tensors_dict_float.update({node: [out_tensors_of_n_float]})
|
323
|
+
node_to_output_tensors_dict.update({node: out_tensors_of_n})
|
324
|
+
node_to_output_tensors_dict_float.update({node: out_tensors_of_n_float})
|
327
325
|
|
328
326
|
if self.append2output:
|
329
327
|
outputs = _generate_outputs(self.append2output,
|
@@ -19,6 +19,7 @@ from model_compression_toolkit.logger import Logger
|
|
19
19
|
from model_compression_toolkit.core import common
|
20
20
|
from model_compression_toolkit.core.common.graph.base_graph import Graph
|
21
21
|
from model_compression_toolkit.core.common.graph.base_node import BaseNode
|
22
|
+
from model_compression_toolkit.core.common.graph.functional_node import FunctionalNode
|
22
23
|
from model_compression_toolkit.core.pytorch.constants import IN_CHANNELS, OUT_CHANNELS, KERNEL_SIZE, KERNEL, BIAS
|
23
24
|
from model_compression_toolkit.core.common import FrameworkInfo
|
24
25
|
|
@@ -37,7 +38,7 @@ class FunctionalConvSubstitution(common.BaseSubstitution):
|
|
37
38
|
|
38
39
|
def substitute(self,
|
39
40
|
graph: Graph,
|
40
|
-
func_node:
|
41
|
+
func_node: FunctionalNode) -> Graph:
|
41
42
|
"""
|
42
43
|
Substitute functional and conv/linear layer with torch layer
|
43
44
|
Args:
|
@@ -60,9 +61,15 @@ class FunctionalConvSubstitution(common.BaseSubstitution):
|
|
60
61
|
# Create new node of layer convolution
|
61
62
|
if 1 not in func_node.weights:
|
62
63
|
Logger.critical(f'Weight input missing for node {func_node.name}.') # pragma: no cover
|
63
|
-
|
64
|
-
|
65
|
-
|
64
|
+
# Extract index of kernel and bias according to tensor_input_allocs if they were input as kwargs. If
|
65
|
+
# they were input as args, use their fixed positions.
|
66
|
+
weight_index = func_node.tensor_input_allocs.index(KERNEL) if KERNEL in func_node.tensor_input_allocs else 1
|
67
|
+
bias_index = func_node.tensor_input_allocs.index(BIAS) if BIAS in func_node.tensor_input_allocs else 2
|
68
|
+
if weight_index not in func_node.weights:
|
69
|
+
Logger.critical(f'Mismatch between tensor_input_allocs and weight index in node {func_node.name}.') # pragma: no cover
|
70
|
+
weight = func_node.weights[weight_index]
|
71
|
+
bias = func_node.weights.get(bias_index)
|
72
|
+
framework_attr = func_node.op_call_kwargs
|
66
73
|
framework_attr.update({OUT_CHANNELS: weight.shape[out_channel_index]})
|
67
74
|
framework_attr.update({IN_CHANNELS: weight.shape[in_channel_index]})
|
68
75
|
framework_attr.update({KERNEL_SIZE: weight.shape[2:]})
|
@@ -20,6 +20,7 @@ import torch.nn.functional as F
|
|
20
20
|
from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher
|
21
21
|
from model_compression_toolkit.core import common
|
22
22
|
from model_compression_toolkit.core.common import BaseNode, Graph
|
23
|
+
from model_compression_toolkit.core.common.graph.functional_node import FunctionalNode
|
23
24
|
from model_compression_toolkit.core.pytorch.constants import *
|
24
25
|
from model_compression_toolkit.logger import Logger
|
25
26
|
|
@@ -37,9 +38,12 @@ class FunctionalBatchNorm(common.BaseSubstitution):
|
|
37
38
|
super().__init__(matcher_instance=bn_node)
|
38
39
|
|
39
40
|
@staticmethod
|
40
|
-
def get_attributes_from_weights(node:
|
41
|
+
def get_attributes_from_weights(node: FunctionalNode) -> Dict:
|
41
42
|
"""
|
42
|
-
|
43
|
+
Convert functional batch_norm positional weights to BatchNorm2d weights. Extract indices of gamma
|
44
|
+
and beta according to tensor_input_allocs if they were input as kwargs. If they were input as args,
|
45
|
+
use their fixed positions.
|
46
|
+
|
43
47
|
Args:
|
44
48
|
node: functional batch_norm node.
|
45
49
|
|
@@ -53,23 +57,22 @@ class FunctionalBatchNorm(common.BaseSubstitution):
|
|
53
57
|
GAMMA: np.ones(node.weights[1].shape),
|
54
58
|
BETA: np.zeros(node.weights[1].shape)}
|
55
59
|
|
56
|
-
|
57
|
-
|
60
|
+
# Check if weight and/or bias were not given.
|
61
|
+
if KERNEL in node.tensor_input_allocs:
|
62
|
+
weights_dict[GAMMA] = node.weights[node.tensor_input_allocs.index(KERNEL)]
|
63
|
+
elif KERNEL not in node.op_call_kwargs:
|
64
|
+
weights_dict[GAMMA] = node.weights[3]
|
58
65
|
|
59
|
-
if
|
60
|
-
|
61
|
-
|
62
|
-
else:
|
63
|
-
weights_dict[BETA] = node.weights[3]
|
64
|
-
if 4 in node.weights:
|
65
|
-
assert has_bias
|
66
|
+
if BIAS in node.tensor_input_allocs:
|
67
|
+
weights_dict[BETA] = node.weights[node.tensor_input_allocs.index(BIAS)]
|
68
|
+
elif BIAS not in node.op_call_kwargs:
|
66
69
|
weights_dict[BETA] = node.weights[4]
|
67
70
|
|
68
71
|
return weights_dict
|
69
72
|
|
70
73
|
def substitute(self,
|
71
74
|
graph: Graph,
|
72
|
-
node:
|
75
|
+
node: FunctionalNode) -> Graph:
|
73
76
|
"""
|
74
77
|
Substitute functional.batch_norm and its inputs with BatchNorm2d.
|
75
78
|
Args:
|
@@ -87,10 +90,13 @@ class FunctionalBatchNorm(common.BaseSubstitution):
|
|
87
90
|
bn_node_weights = self.get_attributes_from_weights(node)
|
88
91
|
if not bn_node_weights:
|
89
92
|
return graph
|
93
|
+
framework_attr = {NUM_FEATURES: out_channels}
|
94
|
+
if EPSILON in node.op_call_kwargs:
|
95
|
+
framework_attr.update({EPSILON: node.op_call_kwargs[EPSILON]})
|
96
|
+
if MOMENTUM in node.op_call_kwargs:
|
97
|
+
framework_attr.update({MOMENTUM: node.op_call_kwargs[MOMENTUM]})
|
90
98
|
new_batchnorm2d = BaseNode(name=node.name + '_into_BatchNorm2d',
|
91
|
-
framework_attr=
|
92
|
-
EPSILON: EPSILON_VAL,
|
93
|
-
MOMENTUM: MOMENTUM_VAL},
|
99
|
+
framework_attr=framework_attr,
|
94
100
|
input_shape=node.output_shape,
|
95
101
|
output_shape=node.output_shape,
|
96
102
|
weights=bn_node_weights,
|
@@ -21,6 +21,7 @@ from typing import Dict, Tuple, List
|
|
21
21
|
from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher
|
22
22
|
from model_compression_toolkit.core import common
|
23
23
|
from model_compression_toolkit.core.common import BaseNode, Graph
|
24
|
+
from model_compression_toolkit.core.common.graph.functional_node import FunctionalNode
|
24
25
|
from model_compression_toolkit.core.pytorch.constants import *
|
25
26
|
from model_compression_toolkit.logger import Logger
|
26
27
|
|
@@ -38,9 +39,11 @@ class FunctionalLayerNorm(common.BaseSubstitution):
|
|
38
39
|
super().__init__(matcher_instance=ln_node)
|
39
40
|
|
40
41
|
@staticmethod
|
41
|
-
def get_attributes_from_weights(node:
|
42
|
+
def get_attributes_from_weights(node: FunctionalNode, normalized_shape: [Tuple, List, int]) -> Dict:
|
42
43
|
"""
|
43
|
-
|
44
|
+
Convert functional layer_norm positional weights to LayerNorm weights. Extract indices of gamma
|
45
|
+
and beta according to tensor_input_allocs if they were input as kwargs. If they were input as args,
|
46
|
+
use their fixed positions.
|
44
47
|
Args:
|
45
48
|
node: Node that match the pattern in the substitution init.
|
46
49
|
normalized_shape: nn.LayerNorm "normalized_shape" argument
|
@@ -50,28 +53,26 @@ class FunctionalLayerNorm(common.BaseSubstitution):
|
|
50
53
|
"""
|
51
54
|
|
52
55
|
# Define default weight and bias
|
53
|
-
weights_dict = {GAMMA: np.ones(normalized_shape),
|
54
|
-
BETA: np.zeros(normalized_shape)
|
56
|
+
weights_dict = {GAMMA: np.ones(normalized_shape), # Default value in case weight is not given
|
57
|
+
BETA: np.zeros(normalized_shape) # Default value in case bias is not given
|
55
58
|
}
|
56
59
|
|
57
60
|
# Check if weight and/or bias were not given.
|
58
|
-
|
59
|
-
|
61
|
+
if KERNEL in node.tensor_input_allocs:
|
62
|
+
weights_dict[GAMMA] = node.weights[node.tensor_input_allocs.index(KERNEL)]
|
63
|
+
elif KERNEL not in node.op_call_kwargs:
|
64
|
+
weights_dict[GAMMA] = node.weights[1]
|
60
65
|
|
61
|
-
if
|
62
|
-
|
63
|
-
|
64
|
-
else:
|
65
|
-
weights_dict[BETA] = node.weights[1]
|
66
|
-
if 2 in node.weights:
|
67
|
-
assert has_bias
|
66
|
+
if BIAS in node.tensor_input_allocs:
|
67
|
+
weights_dict[BETA] = node.weights[node.tensor_input_allocs.index(BIAS)]
|
68
|
+
elif BIAS not in node.op_call_kwargs:
|
68
69
|
weights_dict[BETA] = node.weights[2]
|
69
70
|
|
70
71
|
return weights_dict
|
71
72
|
|
72
73
|
def substitute(self,
|
73
74
|
graph: Graph,
|
74
|
-
node:
|
75
|
+
node: FunctionalNode) -> Graph:
|
75
76
|
"""
|
76
77
|
Substitute functional.layer_norm and its inputs with LayerNorm.
|
77
78
|
Args:
|
@@ -85,10 +86,11 @@ class FunctionalLayerNorm(common.BaseSubstitution):
|
|
85
86
|
|
86
87
|
ln_node_weights = self.get_attributes_from_weights(node, normalized_shape)
|
87
88
|
|
89
|
+
framework_attr = {NORMALIZED_SHAPE: normalized_shape}
|
90
|
+
if EPSILON in node.op_call_kwargs:
|
91
|
+
framework_attr.update({EPSILON: node.op_call_kwargs[EPSILON]})
|
88
92
|
new_layernorm = BaseNode(name=node.name + '_into_LayerNorm',
|
89
|
-
framework_attr=
|
90
|
-
EPSILON: node.framework_attr.get('eps'),
|
91
|
-
},
|
93
|
+
framework_attr=framework_attr,
|
92
94
|
input_shape=node.output_shape,
|
93
95
|
output_shape=node.output_shape,
|
94
96
|
weights=ln_node_weights,
|
mct-nightly-2.1.0.20240624.520/model_compression_toolkit/core/pytorch/reader/graph_builders.py
ADDED
@@ -0,0 +1,353 @@
|
|
1
|
+
# Copyright 2022 Sony Semiconductor Israel, Inc. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
import inspect
|
16
|
+
from operator import getitem
|
17
|
+
from typing import Dict, List, Tuple, Callable, Union, Any, Type
|
18
|
+
|
19
|
+
import numpy as np
|
20
|
+
import torch
|
21
|
+
from torch.fx import GraphModule, Node
|
22
|
+
|
23
|
+
from model_compression_toolkit.core.common import BaseNode
|
24
|
+
from model_compression_toolkit.core.common.graph.base_graph import OutTensor
|
25
|
+
from model_compression_toolkit.core.common.graph.edge import Edge
|
26
|
+
from model_compression_toolkit.core.common.graph.functional_node import FunctionalNode
|
27
|
+
from model_compression_toolkit.core.pytorch.constants import OUTPUT, PLACEHOLDER, TENSOR_META, CALL_FUNCTION, TYPE, \
|
28
|
+
CALL_METHOD, BIAS, FUNCTIONAL_OP, OP_CALL_KWARGS, OP_CALL_ARGS, INPUTS_AS_LIST, TENSOR_INPUT_ALLOCS, GET_ATTR
|
29
|
+
from model_compression_toolkit.core.pytorch.reader.node_holders import DummyPlaceHolder
|
30
|
+
from model_compression_toolkit.logger import Logger
|
31
|
+
|
32
|
+
|
33
|
+
def _extract_parameters_and_buffers(module: Union[torch.nn.Module, GraphModule],
|
34
|
+
to_numpy: Callable) -> Dict[str, np.ndarray]:
|
35
|
+
"""
|
36
|
+
Extract parameters & buffers from input module to a dictionary.
|
37
|
+
Args:
|
38
|
+
module: FX ot PyTorch module to extract parameters and buffers from.
|
39
|
+
|
40
|
+
Returns:
|
41
|
+
Dictionary containing module parameters and buffers by name.
|
42
|
+
"""
|
43
|
+
|
44
|
+
named_parameters = {name: to_numpy(parameter) for name, parameter in module.named_parameters()}
|
45
|
+
named_buffers = {name: to_numpy(buffer) for name, buffer in module.named_buffers()}
|
46
|
+
|
47
|
+
return {**named_parameters, **named_buffers}
|
48
|
+
|
49
|
+
|
50
|
+
def is_instance_first_arg(n: Node, expected_type: Union[Type, Tuple[Type]]) -> bool:
|
51
|
+
"""
|
52
|
+
Check whether first argument of the node is the expected type
|
53
|
+
Args:
|
54
|
+
n: fx node.
|
55
|
+
expected_type: Expected 1st argument type.
|
56
|
+
|
57
|
+
Returns:
|
58
|
+
True is the first argument of node n is of the expected type, else return False.
|
59
|
+
|
60
|
+
"""
|
61
|
+
return len(n.args) > 0 and isinstance(n.args[0], expected_type)
|
62
|
+
|
63
|
+
|
64
|
+
def _build_input_alloc_and_call_args(n: Node, input_tensors_in_node_kwargs: Dict,
|
65
|
+
inputs_as_list: bool) -> Tuple[List, List]:
|
66
|
+
"""
|
67
|
+
Build the tensor inputs list and op_call_args of the functional node.
|
68
|
+
|
69
|
+
Args:
|
70
|
+
n: fx node.
|
71
|
+
input_tensors_in_node_kwargs: A dictionary of node kwarg name and input fx node.
|
72
|
+
inputs_as_list: Is node's inputs are a list.
|
73
|
+
|
74
|
+
Returns:
|
75
|
+
A list of updated op_call args.
|
76
|
+
A list of tensor allocations in node's inputs.
|
77
|
+
|
78
|
+
"""
|
79
|
+
|
80
|
+
tensor_input_alloc = []
|
81
|
+
op_call_args = list(n.args)
|
82
|
+
if inputs_as_list:
|
83
|
+
op_call_args.pop(0)
|
84
|
+
else:
|
85
|
+
for in_node in n.all_input_nodes:
|
86
|
+
# The extra for loop is used to tackle the case of the same input tensor for this node (e.g. torch.add(x, x)).
|
87
|
+
for i, arg in enumerate(n.args):
|
88
|
+
if arg == in_node:
|
89
|
+
tensor_input_alloc.append(i)
|
90
|
+
for k, arg in input_tensors_in_node_kwargs.items():
|
91
|
+
if arg == in_node:
|
92
|
+
tensor_input_alloc.append(k)
|
93
|
+
|
94
|
+
return op_call_args, tensor_input_alloc
|
95
|
+
|
96
|
+
|
97
|
+
def _extract_torch_layer_data(node_module: torch.nn.Module,
|
98
|
+
to_numpy: Callable) -> Tuple[Any, Dict[str, np.ndarray], Dict]:
|
99
|
+
"""
|
100
|
+
Extract required data from a non-functional node to rebuild the PyTorch layer.
|
101
|
+
|
102
|
+
Args:
|
103
|
+
node_module: Torch layer, such as nn.Conv2d, nn.Linear, etc.
|
104
|
+
to_numpy: Function to convert framework's tensor to a Numpy array.
|
105
|
+
|
106
|
+
Returns:
|
107
|
+
Node layer class.
|
108
|
+
A mapping between the layer's named parameters and buffers to their tensor values.
|
109
|
+
A framework_attr dictionary required to instantiate the node with the layer class.
|
110
|
+
"""
|
111
|
+
node_type = type(node_module)
|
112
|
+
if not isinstance(node_module, torch.nn.Module):
|
113
|
+
Logger.error(f"Expected an instance of torch.nn.Module for node {node_module.name}, but got {node_type}")
|
114
|
+
# Extract the instance framework_attr (i.e. the arguments the class instance was initialized with). "fullargspec"
|
115
|
+
# is a list of the layer's attribute names, that will be used as keys of the framework_attr dictionary. We the
|
116
|
+
# values from the layer instance.
|
117
|
+
fullargspec = inspect.getfullargspec(node_type.__init__).args
|
118
|
+
framework_attr = {k: v for k, v in node_module.__dict__.items() if k in fullargspec}
|
119
|
+
# The "bias" argument doesn't appear in the node_module.__dict__, so we add it manually.
|
120
|
+
if hasattr(node_module, BIAS) and BIAS in fullargspec:
|
121
|
+
framework_attr[BIAS] = False if node_module.bias is None else True
|
122
|
+
|
123
|
+
# Extract layer weights and named buffers.
|
124
|
+
weights = {n: w for n, w in _extract_parameters_and_buffers(node_module, to_numpy).items() if len(w.shape) > 0}
|
125
|
+
return node_type, weights, framework_attr
|
126
|
+
|
127
|
+
|
128
|
+
def _extract_input_and_output_shapes(_node: Node) -> Tuple[List, List]:
|
129
|
+
"""
|
130
|
+
Extract input and output shapes of a node.
|
131
|
+
Args:
|
132
|
+
_node: fx node.
|
133
|
+
|
134
|
+
Returns:
|
135
|
+
Input and output shapes as lists.
|
136
|
+
"""
|
137
|
+
input_shape = []
|
138
|
+
if _node.op != PLACEHOLDER:
|
139
|
+
for i, input_node in enumerate(_node.all_input_nodes):
|
140
|
+
tensor_meta = input_node.meta
|
141
|
+
if tensor_meta[TYPE] in [torch.Tensor, torch.nn.parameter.Parameter]:
|
142
|
+
input_shape += [list(tensor_meta[TENSOR_META].shape)]
|
143
|
+
elif tensor_meta[TYPE] == tuple:
|
144
|
+
input_shape += [list(n.shape) for n in tensor_meta[TENSOR_META]]
|
145
|
+
elif tensor_meta[TYPE] == int:
|
146
|
+
input_shape += [[1]]
|
147
|
+
|
148
|
+
if _node.meta[TYPE] == torch.Tensor:
|
149
|
+
output_shape = [list(_node.meta[TENSOR_META].shape)]
|
150
|
+
elif _node.meta[TYPE] in (list, tuple):
|
151
|
+
output_shape = [list(m.shape) for m in _node.meta[TENSOR_META]]
|
152
|
+
elif _node.meta[TYPE] == int:
|
153
|
+
output_shape = [[1]]
|
154
|
+
else:
|
155
|
+
output_shape = []
|
156
|
+
|
157
|
+
return input_shape, output_shape
|
158
|
+
|
159
|
+
|
160
|
+
def nodes_builder(model: GraphModule,
|
161
|
+
module_dict: Dict,
|
162
|
+
to_numpy: Callable) -> Tuple[List, List, List, Dict]:
|
163
|
+
"""
|
164
|
+
Build a node from a fx node. A node contains all information to reconstruct the model module or call function
|
165
|
+
it's representing in the model: operation, module configuration, weights, input/output shape.
|
166
|
+
Args:
|
167
|
+
model: Pytorch FX model.
|
168
|
+
module_dict: A dictionary of the Pyotrch model's named modules.
|
169
|
+
to_numpy: A function to convert a Tensor to numpy array
|
170
|
+
|
171
|
+
Returns:
|
172
|
+
A list of Graph nodes that were built from the fx GraphModule nodes.
|
173
|
+
"""
|
174
|
+
# Init function variables:
|
175
|
+
inputs, outputs = [], []
|
176
|
+
nodes, output_nodes = [], []
|
177
|
+
fx_node_2_graph_node = {}
|
178
|
+
consts_dict = {}
|
179
|
+
used_consts = set()
|
180
|
+
|
181
|
+
# Init parameters & buffers dictionary of the entire model. We later extract the constants values from this dictionary.
|
182
|
+
model_parameters_and_buffers = _extract_parameters_and_buffers(model, to_numpy)
|
183
|
+
|
184
|
+
for node in model.graph.nodes:
|
185
|
+
|
186
|
+
# ##############################################
|
187
|
+
# Extract node type and framework attributes #
|
188
|
+
# ##############################################
|
189
|
+
weights = {}
|
190
|
+
framework_attr = {}
|
191
|
+
node_has_activation = True
|
192
|
+
|
193
|
+
if node.target in module_dict.keys():
|
194
|
+
# PyTorch module node, such as nn.Conv2d or nn.Linear.
|
195
|
+
node_type, weights, framework_attr = _extract_torch_layer_data(module_dict[node.target], to_numpy)
|
196
|
+
|
197
|
+
elif node.op == CALL_FUNCTION:
|
198
|
+
# Node is a function that handle a parameter\buffer in the model.
|
199
|
+
node_type = node.target
|
200
|
+
if node_type in [getattr, getitem]:
|
201
|
+
node_has_activation = False
|
202
|
+
|
203
|
+
elif node.op == PLACEHOLDER:
|
204
|
+
# Input node to the model.
|
205
|
+
node_type = DummyPlaceHolder
|
206
|
+
|
207
|
+
elif node.op == OUTPUT:
|
208
|
+
# Output node of the model. Only saved in output_nodes for later handling.
|
209
|
+
output_nodes += node.all_input_nodes
|
210
|
+
continue
|
211
|
+
|
212
|
+
elif node.op == CALL_METHOD:
|
213
|
+
# Node is a PyTorch function such as torch.add, torch.reshape etc.
|
214
|
+
if hasattr(torch, node.target):
|
215
|
+
node_type = getattr(torch, node.target)
|
216
|
+
elif hasattr(torch.Tensor, node.target):
|
217
|
+
node_type = getattr(torch.Tensor, node.target)
|
218
|
+
else:
|
219
|
+
Logger.critical(f"The call method '{node.target}' in {node} is not supported.")
|
220
|
+
|
221
|
+
elif node.op == GET_ATTR:
|
222
|
+
# Node holding a constant -> add to consts_dict so can add them later to weights of next node.
|
223
|
+
if node.target in consts_dict:
|
224
|
+
Logger.critical('A constant weight appears to have been recorded multiple times.')
|
225
|
+
consts_dict[node] = model_parameters_and_buffers[node.target]
|
226
|
+
continue
|
227
|
+
else:
|
228
|
+
Logger.critical(f'Encountered an unsupported node type in node: {node.name}.')
|
229
|
+
|
230
|
+
# Add constants to weights dictionary.
|
231
|
+
if node.op != PLACEHOLDER:
|
232
|
+
for i, input_node in enumerate(node.all_input_nodes):
|
233
|
+
if input_node in consts_dict:
|
234
|
+
used_consts.add(input_node)
|
235
|
+
weights.update({i: consts_dict[input_node]})
|
236
|
+
|
237
|
+
# Extract input and output shapes of the node.
|
238
|
+
input_shape, output_shape = _extract_input_and_output_shapes(node)
|
239
|
+
|
240
|
+
# Initiate graph nodes.
|
241
|
+
if node.op in [CALL_METHOD, CALL_FUNCTION]:
|
242
|
+
graph_node_type = FunctionalNode
|
243
|
+
|
244
|
+
# Filter FX nodes from node_kwargs. These FX nodes are tensor inputs to the node that are part of the
|
245
|
+
# model's graph. We remove them because the node_kwargs should not include input tensors of the node.
|
246
|
+
# These input tensors will be inserted in the kwargs according to the tensor_input_alloc which is used
|
247
|
+
# to convert the input_tensors list in the builder to the node's args & kwargs.
|
248
|
+
node_kwargs, input_tensors_in_node_kwargs = {}, {}
|
249
|
+
for k, v in node.kwargs.items():
|
250
|
+
if isinstance(v, Node):
|
251
|
+
input_tensors_in_node_kwargs[k] = v
|
252
|
+
else:
|
253
|
+
node_kwargs[k] = v
|
254
|
+
|
255
|
+
# Check if node's first input argument is a list of input fx nodes, such as torch.cat:
|
256
|
+
is_first_input_list_of_nodes = is_instance_first_arg(node, (list, tuple)) and all(
|
257
|
+
[isinstance(n, Node) for n in node.args[0]])
|
258
|
+
is_placeholder_a_list = is_instance_first_arg(node, Node) and \
|
259
|
+
node.args[0].op == PLACEHOLDER and node.args[0].meta[TYPE] in (list, tuple)
|
260
|
+
inputs_as_list = is_first_input_list_of_nodes or is_placeholder_a_list
|
261
|
+
|
262
|
+
# Build tensor_input_alloc required for the model builder. All input nodes are received as a list in the builder,
|
263
|
+
# so tensor_input_alloc is used to allocate each input tensor in the correct place in the node's args & kwargs.
|
264
|
+
op_call_args, tensor_input_alloc = _build_input_alloc_and_call_args(node, input_tensors_in_node_kwargs,
|
265
|
+
inputs_as_list)
|
266
|
+
|
267
|
+
# Remove torch.fx.node.Node from inputs to the functional node. FX nodes are input tensors in the builder,
|
268
|
+
# so they are remove from the op_call_args (same as op_call_kwargs) and are inserted back according to the
|
269
|
+
# tensor_input_alloc list.
|
270
|
+
op_call_args = [arg for arg in op_call_args if not isinstance(arg, Node)]
|
271
|
+
# Convert torch.fx.immutable_collections.immutable_list to tuple.
|
272
|
+
op_call_args = [tuple(arg) if isinstance(arg, torch.fx.immutable_collections.immutable_list) else arg
|
273
|
+
for arg in op_call_args]
|
274
|
+
|
275
|
+
kwargs = {FUNCTIONAL_OP: node_type,
|
276
|
+
OP_CALL_ARGS: op_call_args,
|
277
|
+
OP_CALL_KWARGS: node_kwargs,
|
278
|
+
INPUTS_AS_LIST: inputs_as_list,
|
279
|
+
TENSOR_INPUT_ALLOCS: tensor_input_alloc}
|
280
|
+
else:
|
281
|
+
if not all([not isinstance(v, Node) for v in framework_attr.values()]):
|
282
|
+
Logger.critical(f'Found FX nodes in framework attributes of {node.name}. This node type should not contain any.') # pragma: no cover
|
283
|
+
|
284
|
+
graph_node_type = BaseNode
|
285
|
+
kwargs = {}
|
286
|
+
|
287
|
+
graph_node = graph_node_type(name=node.name,
|
288
|
+
framework_attr=framework_attr,
|
289
|
+
input_shape=input_shape,
|
290
|
+
output_shape=output_shape,
|
291
|
+
weights=weights,
|
292
|
+
layer_class=node_type,
|
293
|
+
has_activation=node_has_activation,
|
294
|
+
**kwargs)
|
295
|
+
|
296
|
+
# Generate graph inputs list.
|
297
|
+
if node.op == PLACEHOLDER:
|
298
|
+
for ii in range(len(output_shape)):
|
299
|
+
inputs.append(graph_node)
|
300
|
+
|
301
|
+
fx_node_2_graph_node[node] = graph_node
|
302
|
+
nodes.append(graph_node)
|
303
|
+
|
304
|
+
# Check whether all extracted constants were used in the graph.
|
305
|
+
not_connected_consts = [c for c in consts_dict if c not in used_consts]
|
306
|
+
if not_connected_consts:
|
307
|
+
Logger.critical(f'Error reading graph: These constants are not connected in the graph: {not_connected_consts}.') # pragma: no cover
|
308
|
+
|
309
|
+
# Generate graph outputs list.
|
310
|
+
for node in output_nodes:
|
311
|
+
outputs.append(OutTensor(fx_node_2_graph_node[node], output_nodes.index(node)))
|
312
|
+
|
313
|
+
return nodes, inputs, outputs, fx_node_2_graph_node
|
314
|
+
|
315
|
+
|
316
|
+
def edges_builder(model: GraphModule,
|
317
|
+
fx_node_2_graph_node: Dict) -> List:
|
318
|
+
"""
|
319
|
+
|
320
|
+
Args:
|
321
|
+
model: Pytorch FX model.
|
322
|
+
fx_node_2_graph_node: dictionary from fx node to graph node.
|
323
|
+
|
324
|
+
Returns:
|
325
|
+
List of graph edges
|
326
|
+
"""
|
327
|
+
src_index = 0 # in fx src_index is always zero because fx uses the getitem operator to fetch node outputs
|
328
|
+
edges = []
|
329
|
+
connectivity_dict = {}
|
330
|
+
for node in model.graph.nodes:
|
331
|
+
if node.op != OUTPUT:
|
332
|
+
for input_node in node.all_input_nodes:
|
333
|
+
if input_node in fx_node_2_graph_node:
|
334
|
+
# n_edges_for_input_node is for the case that the input node appears more than
|
335
|
+
# once as the input of the node, for example add(x, x)
|
336
|
+
n_edges_for_input_node = sum([1 for a in node.args if input_node == a])
|
337
|
+
n_edges_for_input_node = max(n_edges_for_input_node, 1)
|
338
|
+
|
339
|
+
dst_index = node.all_input_nodes.index(input_node)
|
340
|
+
for i in range(n_edges_for_input_node):
|
341
|
+
if connectivity_dict.get(input_node):
|
342
|
+
connectivity_dict[input_node].append((node, dst_index))
|
343
|
+
else:
|
344
|
+
connectivity_dict[input_node] = [(node, dst_index)]
|
345
|
+
dst_index += 1
|
346
|
+
for node in model.graph.nodes:
|
347
|
+
out_nodes = connectivity_dict.get(node)
|
348
|
+
if out_nodes:
|
349
|
+
for (out_node, dst_index) in out_nodes:
|
350
|
+
edges.append(
|
351
|
+
Edge(fx_node_2_graph_node[node], fx_node_2_graph_node[out_node], src_index, dst_index))
|
352
|
+
|
353
|
+
return edges
|
@@ -216,7 +216,7 @@ def _set_final_resource_utilization(graph: Graph,
|
|
216
216
|
# No relevant nodes have been quantized with affect on the given target - since we only consider
|
217
217
|
# in the model's final size the quantized layers size, this means that the final size for this target
|
218
218
|
# is zero.
|
219
|
-
Logger.warning(f"No relevant quantized layers for the ru target {ru_target} were found, the recorded"
|
219
|
+
Logger.warning(f"No relevant quantized layers for the ru target {ru_target} were found, the recorded "
|
220
220
|
f"final ru for this target would be 0.")
|
221
221
|
final_ru_dict[ru_target] = 0
|
222
222
|
|
@@ -148,6 +148,6 @@ class OperationsToLayers:
|
|
148
148
|
qco_by_opset_name = _current_tpc.get().tp_model.get_config_options_by_operators_set(ops2layers.name)
|
149
149
|
if layer in existing_layers:
|
150
150
|
Logger.critical(f'Found layer {layer.__name__} in more than one '
|
151
|
-
|
151
|
+
f'OperatorsSet') # pragma: no cover
|
152
152
|
else:
|
153
153
|
existing_layers.update({layer: qco_by_opset_name})
|