mct-nightly 2.2.0.20241127.529__tar.gz → 2.2.0.20241129.526__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/PKG-INFO +25 -34
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/README.md +24 -33
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/mct_nightly.egg-info/PKG-INFO +25 -34
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/__init__.py +1 -1
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/LICENSE.md +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/mct_nightly.egg-info/SOURCES.txt +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/mct_nightly.egg-info/dependency_links.txt +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/mct_nightly.egg-info/requires.txt +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/mct_nightly.egg-info/top_level.txt +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/constants.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/analyzer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/framework_info.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/fusion/graph_fuser.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/edge.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/hessian/hessian_scores_calculator.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/hessian/hessian_scores_request.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/matchers/function.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/memory_computation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/model_collector.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/model_validation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/bit_width_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/statistics_correction/apply_activation_bias_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/statistics_correction/compute_activation_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/user_info.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/data_util.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/sigmoid_mul_to_swish.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/hessian/hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/hessian/weights_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/reader/common.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/statistics_correction/keras_compute_activation_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/data_util.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_linear.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scaled_dot_product_attention.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/hessian/hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/hessian/weights_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/statistics_correction/pytorch_compute_activation_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/pytorch/utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/core/runner.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/common/enums.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/keras/image_operations.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/keras/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/pytorch/image_operations.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/pytorch/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/defaultdict.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/common/gptq_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/common/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/common/gradual_activation_quantization.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/common/regularization_factory.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/gptq/runner.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/logger.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/metadata.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/ptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/ptq/runner.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/common/qat_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_weight_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_weight_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/common/annealing_schedulers.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/common/training_method.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/common/util.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/base_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/annealing_schedulers.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/symmetric_lsq.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/uniform_lsq.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/uniform_ste.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/pytorch/annealing_schedulers.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/verify_packages.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/common/constants.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/common/core_report_generator.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/common/framework_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/common/model_folding_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/common/similarity_calculator.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/common/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/common/xquant_config.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/keras/keras_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/setup.cfg +0 -0
- {mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/setup.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: mct-nightly
|
3
|
-
Version: 2.2.0.
|
3
|
+
Version: 2.2.0.20241129.526
|
4
4
|
Summary: A Model Compression Toolkit for neural networks
|
5
5
|
Home-page: UNKNOWN
|
6
6
|
License: UNKNOWN
|
@@ -56,9 +56,9 @@ Description: <div align="center" markdown="1">
|
|
56
56
|
|
57
57
|
Quantization Method | Complexity | Computational Cost | API | Tutorial
|
58
58
|
-------------------- | -----------|--------------------|---------|--------
|
59
|
-
PTQ (Post Training Quantization) | Low | Low (~1-10 CPU minutes) | [PyTorch API](https://sony.github.io/model_optimization/
|
60
|
-
GPTQ (parameters fine-tuning using gradients) | Moderate | Moderate (~1-3 GPU hours) | [PyTorch API](https://sony.github.io/model_optimization/
|
61
|
-
QAT (Quantization Aware Training) | High | High (~12-36 GPU hours) | [QAT API](https://sony.github.io/model_optimization/
|
59
|
+
PTQ (Post Training Quantization) | Low | Low (~1-10 CPU minutes) | [PyTorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_post_training_quantization.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_post_training_quantization.html) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_post_training_quantization.ipynb"><img src="https://img.shields.io/badge/Pytorch-green"/></a> <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_post-training_quantization.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
60
|
+
GPTQ (parameters fine-tuning using gradients) | Moderate | Moderate (~1-3 GPU hours) | [PyTorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_gradient_post_training_quantization.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_gradient_post_training_quantization.html) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenet_gptq.ipynb"><img src="https://img.shields.io/badge/PyTorch-green"/></a> <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_mobilenet_gptq.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
61
|
+
QAT (Quantization Aware Training) | High | High (~12-36 GPU hours) | [QAT API](https://sony.github.io/model_optimization/api/api_docs/index.html#qat) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
62
62
|
|
63
63
|
</p>
|
64
64
|
</div>
|
@@ -66,9 +66,9 @@ Description: <div align="center" markdown="1">
|
|
66
66
|
For each flow, **Quantization core** utilizes various algorithms and hyper-parameters for optimal [hardware-aware](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/README.md) quantization results.
|
67
67
|
For further details, please see [Supported features and algorithms](#high-level-features-and-techniques).
|
68
68
|
|
69
|
-
|
70
|
-
|
71
|
-
|
69
|
+
**Required input**: Floating point model - 32bit model in either .pt or .keras format
|
70
|
+
|
71
|
+
**Optional input**: Representative dataset - can be either provided by the user, or generated utilizing the [Data Generation](#data-generation-) capability
|
72
72
|
|
73
73
|
<div align="center">
|
74
74
|
<p align="center">
|
@@ -101,13 +101,13 @@ Description: <div align="center" markdown="1">
|
|
101
101
|
The specifications of the method are detailed in the paper: _"**Data Generation for Hardware-Friendly Post-Training Quantization**"_ [5].
|
102
102
|
__________________________________________________________________________________________________________
|
103
103
|
### Structured Pruning [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_pruning_mnist.ipynb)
|
104
|
-
Reduces model size/complexity and ensures better channels utilization by removing redundant input channels from layers and reconstruction of layer weights. Read more ([Pytorch API](https://sony.github.io/model_optimization/
|
104
|
+
Reduces model size/complexity and ensures better channels utilization by removing redundant input channels from layers and reconstruction of layer weights. Read more ([Pytorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_pruning_experimental.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_pruning_experimental.html)).
|
105
105
|
__________________________________________________________________________________________________________
|
106
106
|
### **Debugging and Visualization**
|
107
107
|
**🎛️ Network Editor (Modify Quantization Configurations)** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_network_editor.ipynb).
|
108
|
-
Modify your model's quantization configuration for specific layers or apply a custom edit rule (e.g adjust layer's bit-width) using MCT’s network editor
|
108
|
+
Modify your model's quantization configuration for specific layers or apply a custom edit rule (e.g adjust layer's bit-width) using MCT’s network editor.
|
109
109
|
|
110
|
-
**🖥️ Visualization**. Observe useful information for troubleshooting the quantized model's performance using TensorBoard. [Read more](https://sony.github.io/model_optimization/
|
110
|
+
**🖥️ Visualization**. Observe useful information for troubleshooting the quantized model's performance using TensorBoard. [Read more](https://sony.github.io/model_optimization/guidelines/visualization.html).
|
111
111
|
|
112
112
|
**🔑 XQuant (Explainable Quantization)** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_xquant.ipynb). Get valuable insights regarding the quality and success of the quantization process of your model. The report includes histograms and similarity metrics between the original float model and the quantized model in key points of the model. The report can be visualized using TensorBoard.
|
113
113
|
__________________________________________________________________________________________________________
|
@@ -117,15 +117,15 @@ Description: <div align="center" markdown="1">
|
|
117
117
|
More details on how to use EPTQ via MCT can be found in the [GPTQ guidelines](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/gptq/README.md).
|
118
118
|
|
119
119
|
## <div align="center">Resources</div>
|
120
|
-
* [User Guide](https://sony.github.io/model_optimization/
|
120
|
+
* [User Guide](https://sony.github.io/model_optimization/index.html) contains detailed information about MCT and guides you from installation through optimizing models for your edge AI applications.
|
121
121
|
|
122
|
-
* MCT's [API Docs](https://sony.github.io/model_optimization/
|
122
|
+
* MCT's [API Docs](https://sony.github.io/model_optimization/api/api_docs/) is separated per quantization methods:
|
123
123
|
|
124
|
-
* [Post-training quantization](https://sony.github.io/model_optimization/
|
125
|
-
* [Gradient-based post-training quantization](https://sony.github.io/model_optimization/
|
126
|
-
* [Quantization-aware training](https://sony.github.io/model_optimization/
|
124
|
+
* [Post-training quantization](https://sony.github.io/model_optimization/api/api_docs/index.html#ptq) | PTQ API docs
|
125
|
+
* [Gradient-based post-training quantization](https://sony.github.io/model_optimization/api/api_docs/index.html#gptq) | GPTQ API docs
|
126
|
+
* [Quantization-aware training](https://sony.github.io/model_optimization/api/api_docs/index.html#qat) | QAT API docs
|
127
127
|
|
128
|
-
* [Debug](https://sony.github.io/model_optimization/
|
128
|
+
* [Debug](https://sony.github.io/model_optimization/guidelines/visualization.html) – modify optimization process or generate an explainable report
|
129
129
|
|
130
130
|
* [Release notes](https://github.com/sony/model_optimization/releases)
|
131
131
|
|
@@ -159,25 +159,15 @@ Description: <div align="center" markdown="1">
|
|
159
159
|
<img src="/docsrc/images/PoseEst.png" width="200">
|
160
160
|
<img src="/docsrc/images/ObjDet.png" width="200">
|
161
161
|
|
162
|
-
### Pytorch
|
163
|
-
We quantized classification networks from the torchvision library.
|
164
|
-
In the following table we present the ImageNet validation results for these models:
|
165
|
-
|
166
|
-
| Network Name | Float Accuracy | 8Bit Accuracy | Data-Free 8Bit Accuracy |
|
167
|
-
|---------------------------|-----------------|-----------------|-------------------------|
|
168
|
-
| MobileNet V2 [3] | 71.886 | 71.444 |71.29|
|
169
|
-
| ResNet-18 [3] | 69.86 | 69.63 |69.53|
|
170
|
-
| SqueezeNet 1.1 [3] | 58.128 | 57.678 ||
|
171
|
-
|
172
|
-
### Keras
|
173
162
|
MCT can quantize an existing 32-bit floating-point model to an 8-bit fixed-point (or less) model without compromising accuracy.
|
174
|
-
Below is a graph of [MobileNetV2](https://
|
175
|
-
single-precision quantization, mixed-precision quantization, and mixed-precision quantization with GPTQ.
|
163
|
+
Below is a graph of [MobileNetV2](https://pytorch.org/vision/main/models/generated/torchvision.models.mobilenet_v2.html) accuracy on ImageNet vs average bit-width of weights (X-axis), using **single-precision** quantization, **mixed-precision** quantization, and mixed-precision quantization with GPTQ.
|
176
164
|
|
177
|
-
<
|
165
|
+
<p align="center">
|
166
|
+
<img src="/docsrc/images/torch_mobilenetv2.png" width="800">
|
178
167
|
|
179
168
|
For more results, please see [1]
|
180
169
|
|
170
|
+
|
181
171
|
### Pruning Results
|
182
172
|
|
183
173
|
Results for applying pruning to reduce the parameters of the following models by 50%:
|
@@ -189,19 +179,20 @@ Description: <div align="center" markdown="1">
|
|
189
179
|
|
190
180
|
## <div align="center">Troubleshooting and Community</div>
|
191
181
|
|
192
|
-
If you encountered large accuracy degradation with MCT, check out the [Quantization Troubleshooting](https://github.com/sony/model_optimization/tree/main/quantization_troubleshooting.md)
|
193
|
-
for common pitfalls and some tools to improve quantized model's accuracy.
|
182
|
+
If you encountered a large accuracy degradation with MCT, check out the [Quantization Troubleshooting](https://github.com/sony/model_optimization/tree/main/quantization_troubleshooting.md)
|
183
|
+
for common pitfalls and some tools to improve the quantized model's accuracy.
|
194
184
|
|
195
185
|
Check out the [FAQ](https://github.com/sony/model_optimization/tree/main/FAQ.md) for common issues.
|
196
186
|
|
197
|
-
You are welcome to ask questions and get support on our [issues section](https://github.com/sony/model_optimization/issues) and manage community discussions under [discussions section](https://github.com/sony/model_optimization/discussions).
|
187
|
+
You are welcome to ask questions and get support on our [issues section](https://github.com/sony/model_optimization/issues) and manage community discussions under the [discussions section](https://github.com/sony/model_optimization/discussions).
|
198
188
|
|
199
189
|
|
200
190
|
## <div align="center">Contributions</div>
|
201
|
-
MCT
|
191
|
+
We'd love your input! MCT would not be possible without help from our community, and welcomes contributions from anyone!
|
202
192
|
|
203
193
|
*Checkout our [Contribution guide](https://github.com/sony/model_optimization/blob/main/CONTRIBUTING.md) for more details.
|
204
194
|
|
195
|
+
Thank you 🙏 to all our contributors!
|
205
196
|
|
206
197
|
## <div align="center">License</div>
|
207
198
|
MCT is licensed under Apache License Version 2.0. By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms.
|
@@ -50,9 +50,9 @@ MCT supports various quantization methods as appears below.
|
|
50
50
|
|
51
51
|
Quantization Method | Complexity | Computational Cost | API | Tutorial
|
52
52
|
-------------------- | -----------|--------------------|---------|--------
|
53
|
-
PTQ (Post Training Quantization) | Low | Low (~1-10 CPU minutes) | [PyTorch API](https://sony.github.io/model_optimization/
|
54
|
-
GPTQ (parameters fine-tuning using gradients) | Moderate | Moderate (~1-3 GPU hours) | [PyTorch API](https://sony.github.io/model_optimization/
|
55
|
-
QAT (Quantization Aware Training) | High | High (~12-36 GPU hours) | [QAT API](https://sony.github.io/model_optimization/
|
53
|
+
PTQ (Post Training Quantization) | Low | Low (~1-10 CPU minutes) | [PyTorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_post_training_quantization.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_post_training_quantization.html) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_post_training_quantization.ipynb"><img src="https://img.shields.io/badge/Pytorch-green"/></a> <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_post-training_quantization.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
54
|
+
GPTQ (parameters fine-tuning using gradients) | Moderate | Moderate (~1-3 GPU hours) | [PyTorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_gradient_post_training_quantization.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_gradient_post_training_quantization.html) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenet_gptq.ipynb"><img src="https://img.shields.io/badge/PyTorch-green"/></a> <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_mobilenet_gptq.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
55
|
+
QAT (Quantization Aware Training) | High | High (~12-36 GPU hours) | [QAT API](https://sony.github.io/model_optimization/api/api_docs/index.html#qat) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
56
56
|
|
57
57
|
</p>
|
58
58
|
</div>
|
@@ -60,9 +60,9 @@ QAT (Quantization Aware Training) | High | High (~12-36 GPU hours) | [QAT API](
|
|
60
60
|
For each flow, **Quantization core** utilizes various algorithms and hyper-parameters for optimal [hardware-aware](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/README.md) quantization results.
|
61
61
|
For further details, please see [Supported features and algorithms](#high-level-features-and-techniques).
|
62
62
|
|
63
|
-
Required input
|
64
|
-
|
65
|
-
|
63
|
+
**Required input**: Floating point model - 32bit model in either .pt or .keras format
|
64
|
+
|
65
|
+
**Optional input**: Representative dataset - can be either provided by the user, or generated utilizing the [Data Generation](#data-generation-) capability
|
66
66
|
|
67
67
|
<div align="center">
|
68
68
|
<p align="center">
|
@@ -95,13 +95,13 @@ Generates synthetic images based on the statistics stored in the model's batch n
|
|
95
95
|
The specifications of the method are detailed in the paper: _"**Data Generation for Hardware-Friendly Post-Training Quantization**"_ [5].
|
96
96
|
__________________________________________________________________________________________________________
|
97
97
|
### Structured Pruning [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_pruning_mnist.ipynb)
|
98
|
-
Reduces model size/complexity and ensures better channels utilization by removing redundant input channels from layers and reconstruction of layer weights. Read more ([Pytorch API](https://sony.github.io/model_optimization/
|
98
|
+
Reduces model size/complexity and ensures better channels utilization by removing redundant input channels from layers and reconstruction of layer weights. Read more ([Pytorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_pruning_experimental.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_pruning_experimental.html)).
|
99
99
|
__________________________________________________________________________________________________________
|
100
100
|
### **Debugging and Visualization**
|
101
101
|
**🎛️ Network Editor (Modify Quantization Configurations)** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_network_editor.ipynb).
|
102
|
-
Modify your model's quantization configuration for specific layers or apply a custom edit rule (e.g adjust layer's bit-width) using MCT’s network editor
|
102
|
+
Modify your model's quantization configuration for specific layers or apply a custom edit rule (e.g adjust layer's bit-width) using MCT’s network editor.
|
103
103
|
|
104
|
-
**🖥️ Visualization**. Observe useful information for troubleshooting the quantized model's performance using TensorBoard. [Read more](https://sony.github.io/model_optimization/
|
104
|
+
**🖥️ Visualization**. Observe useful information for troubleshooting the quantized model's performance using TensorBoard. [Read more](https://sony.github.io/model_optimization/guidelines/visualization.html).
|
105
105
|
|
106
106
|
**🔑 XQuant (Explainable Quantization)** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_xquant.ipynb). Get valuable insights regarding the quality and success of the quantization process of your model. The report includes histograms and similarity metrics between the original float model and the quantized model in key points of the model. The report can be visualized using TensorBoard.
|
107
107
|
__________________________________________________________________________________________________________
|
@@ -111,15 +111,15 @@ The specifications of the algorithm are detailed in the paper: _"**EPTQ: Enhance
|
|
111
111
|
More details on how to use EPTQ via MCT can be found in the [GPTQ guidelines](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/gptq/README.md).
|
112
112
|
|
113
113
|
## <div align="center">Resources</div>
|
114
|
-
* [User Guide](https://sony.github.io/model_optimization/
|
114
|
+
* [User Guide](https://sony.github.io/model_optimization/index.html) contains detailed information about MCT and guides you from installation through optimizing models for your edge AI applications.
|
115
115
|
|
116
|
-
* MCT's [API Docs](https://sony.github.io/model_optimization/
|
116
|
+
* MCT's [API Docs](https://sony.github.io/model_optimization/api/api_docs/) is separated per quantization methods:
|
117
117
|
|
118
|
-
* [Post-training quantization](https://sony.github.io/model_optimization/
|
119
|
-
* [Gradient-based post-training quantization](https://sony.github.io/model_optimization/
|
120
|
-
* [Quantization-aware training](https://sony.github.io/model_optimization/
|
118
|
+
* [Post-training quantization](https://sony.github.io/model_optimization/api/api_docs/index.html#ptq) | PTQ API docs
|
119
|
+
* [Gradient-based post-training quantization](https://sony.github.io/model_optimization/api/api_docs/index.html#gptq) | GPTQ API docs
|
120
|
+
* [Quantization-aware training](https://sony.github.io/model_optimization/api/api_docs/index.html#qat) | QAT API docs
|
121
121
|
|
122
|
-
* [Debug](https://sony.github.io/model_optimization/
|
122
|
+
* [Debug](https://sony.github.io/model_optimization/guidelines/visualization.html) – modify optimization process or generate an explainable report
|
123
123
|
|
124
124
|
* [Release notes](https://github.com/sony/model_optimization/releases)
|
125
125
|
|
@@ -153,25 +153,15 @@ Currently, MCT is being tested on various Python, Pytorch and TensorFlow version
|
|
153
153
|
<img src="/docsrc/images/PoseEst.png" width="200">
|
154
154
|
<img src="/docsrc/images/ObjDet.png" width="200">
|
155
155
|
|
156
|
-
### Pytorch
|
157
|
-
We quantized classification networks from the torchvision library.
|
158
|
-
In the following table we present the ImageNet validation results for these models:
|
159
|
-
|
160
|
-
| Network Name | Float Accuracy | 8Bit Accuracy | Data-Free 8Bit Accuracy |
|
161
|
-
|---------------------------|-----------------|-----------------|-------------------------|
|
162
|
-
| MobileNet V2 [3] | 71.886 | 71.444 |71.29|
|
163
|
-
| ResNet-18 [3] | 69.86 | 69.63 |69.53|
|
164
|
-
| SqueezeNet 1.1 [3] | 58.128 | 57.678 ||
|
165
|
-
|
166
|
-
### Keras
|
167
156
|
MCT can quantize an existing 32-bit floating-point model to an 8-bit fixed-point (or less) model without compromising accuracy.
|
168
|
-
Below is a graph of [MobileNetV2](https://
|
169
|
-
single-precision quantization, mixed-precision quantization, and mixed-precision quantization with GPTQ.
|
157
|
+
Below is a graph of [MobileNetV2](https://pytorch.org/vision/main/models/generated/torchvision.models.mobilenet_v2.html) accuracy on ImageNet vs average bit-width of weights (X-axis), using **single-precision** quantization, **mixed-precision** quantization, and mixed-precision quantization with GPTQ.
|
170
158
|
|
171
|
-
<
|
159
|
+
<p align="center">
|
160
|
+
<img src="/docsrc/images/torch_mobilenetv2.png" width="800">
|
172
161
|
|
173
162
|
For more results, please see [1]
|
174
163
|
|
164
|
+
|
175
165
|
### Pruning Results
|
176
166
|
|
177
167
|
Results for applying pruning to reduce the parameters of the following models by 50%:
|
@@ -183,19 +173,20 @@ Results for applying pruning to reduce the parameters of the following models by
|
|
183
173
|
|
184
174
|
## <div align="center">Troubleshooting and Community</div>
|
185
175
|
|
186
|
-
If you encountered large accuracy degradation with MCT, check out the [Quantization Troubleshooting](https://github.com/sony/model_optimization/tree/main/quantization_troubleshooting.md)
|
187
|
-
for common pitfalls and some tools to improve quantized model's accuracy.
|
176
|
+
If you encountered a large accuracy degradation with MCT, check out the [Quantization Troubleshooting](https://github.com/sony/model_optimization/tree/main/quantization_troubleshooting.md)
|
177
|
+
for common pitfalls and some tools to improve the quantized model's accuracy.
|
188
178
|
|
189
179
|
Check out the [FAQ](https://github.com/sony/model_optimization/tree/main/FAQ.md) for common issues.
|
190
180
|
|
191
|
-
You are welcome to ask questions and get support on our [issues section](https://github.com/sony/model_optimization/issues) and manage community discussions under [discussions section](https://github.com/sony/model_optimization/discussions).
|
181
|
+
You are welcome to ask questions and get support on our [issues section](https://github.com/sony/model_optimization/issues) and manage community discussions under the [discussions section](https://github.com/sony/model_optimization/discussions).
|
192
182
|
|
193
183
|
|
194
184
|
## <div align="center">Contributions</div>
|
195
|
-
MCT
|
185
|
+
We'd love your input! MCT would not be possible without help from our community, and welcomes contributions from anyone!
|
196
186
|
|
197
187
|
*Checkout our [Contribution guide](https://github.com/sony/model_optimization/blob/main/CONTRIBUTING.md) for more details.
|
198
188
|
|
189
|
+
Thank you 🙏 to all our contributors!
|
199
190
|
|
200
191
|
## <div align="center">License</div>
|
201
192
|
MCT is licensed under Apache License Version 2.0. By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms.
|
{mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/mct_nightly.egg-info/PKG-INFO
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: mct-nightly
|
3
|
-
Version: 2.2.0.
|
3
|
+
Version: 2.2.0.20241129.526
|
4
4
|
Summary: A Model Compression Toolkit for neural networks
|
5
5
|
Home-page: UNKNOWN
|
6
6
|
License: UNKNOWN
|
@@ -56,9 +56,9 @@ Description: <div align="center" markdown="1">
|
|
56
56
|
|
57
57
|
Quantization Method | Complexity | Computational Cost | API | Tutorial
|
58
58
|
-------------------- | -----------|--------------------|---------|--------
|
59
|
-
PTQ (Post Training Quantization) | Low | Low (~1-10 CPU minutes) | [PyTorch API](https://sony.github.io/model_optimization/
|
60
|
-
GPTQ (parameters fine-tuning using gradients) | Moderate | Moderate (~1-3 GPU hours) | [PyTorch API](https://sony.github.io/model_optimization/
|
61
|
-
QAT (Quantization Aware Training) | High | High (~12-36 GPU hours) | [QAT API](https://sony.github.io/model_optimization/
|
59
|
+
PTQ (Post Training Quantization) | Low | Low (~1-10 CPU minutes) | [PyTorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_post_training_quantization.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_post_training_quantization.html) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_post_training_quantization.ipynb"><img src="https://img.shields.io/badge/Pytorch-green"/></a> <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_post-training_quantization.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
60
|
+
GPTQ (parameters fine-tuning using gradients) | Moderate | Moderate (~1-3 GPU hours) | [PyTorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_gradient_post_training_quantization.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_gradient_post_training_quantization.html) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenet_gptq.ipynb"><img src="https://img.shields.io/badge/PyTorch-green"/></a> <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_mobilenet_gptq.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
61
|
+
QAT (Quantization Aware Training) | High | High (~12-36 GPU hours) | [QAT API](https://sony.github.io/model_optimization/api/api_docs/index.html#qat) | <a href="https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb"><img src="https://img.shields.io/badge/Keras-green"/></a>
|
62
62
|
|
63
63
|
</p>
|
64
64
|
</div>
|
@@ -66,9 +66,9 @@ Description: <div align="center" markdown="1">
|
|
66
66
|
For each flow, **Quantization core** utilizes various algorithms and hyper-parameters for optimal [hardware-aware](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/README.md) quantization results.
|
67
67
|
For further details, please see [Supported features and algorithms](#high-level-features-and-techniques).
|
68
68
|
|
69
|
-
|
70
|
-
|
71
|
-
|
69
|
+
**Required input**: Floating point model - 32bit model in either .pt or .keras format
|
70
|
+
|
71
|
+
**Optional input**: Representative dataset - can be either provided by the user, or generated utilizing the [Data Generation](#data-generation-) capability
|
72
72
|
|
73
73
|
<div align="center">
|
74
74
|
<p align="center">
|
@@ -101,13 +101,13 @@ Description: <div align="center" markdown="1">
|
|
101
101
|
The specifications of the method are detailed in the paper: _"**Data Generation for Hardware-Friendly Post-Training Quantization**"_ [5].
|
102
102
|
__________________________________________________________________________________________________________
|
103
103
|
### Structured Pruning [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_pruning_mnist.ipynb)
|
104
|
-
Reduces model size/complexity and ensures better channels utilization by removing redundant input channels from layers and reconstruction of layer weights. Read more ([Pytorch API](https://sony.github.io/model_optimization/
|
104
|
+
Reduces model size/complexity and ensures better channels utilization by removing redundant input channels from layers and reconstruction of layer weights. Read more ([Pytorch API](https://sony.github.io/model_optimization/api/api_docs/methods/pytorch_pruning_experimental.html) / [Keras API](https://sony.github.io/model_optimization/api/api_docs/methods/keras_pruning_experimental.html)).
|
105
105
|
__________________________________________________________________________________________________________
|
106
106
|
### **Debugging and Visualization**
|
107
107
|
**🎛️ Network Editor (Modify Quantization Configurations)** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/keras/example_keras_network_editor.ipynb).
|
108
|
-
Modify your model's quantization configuration for specific layers or apply a custom edit rule (e.g adjust layer's bit-width) using MCT’s network editor
|
108
|
+
Modify your model's quantization configuration for specific layers or apply a custom edit rule (e.g adjust layer's bit-width) using MCT’s network editor.
|
109
109
|
|
110
|
-
**🖥️ Visualization**. Observe useful information for troubleshooting the quantized model's performance using TensorBoard. [Read more](https://sony.github.io/model_optimization/
|
110
|
+
**🖥️ Visualization**. Observe useful information for troubleshooting the quantized model's performance using TensorBoard. [Read more](https://sony.github.io/model_optimization/guidelines/visualization.html).
|
111
111
|
|
112
112
|
**🔑 XQuant (Explainable Quantization)** [](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_xquant.ipynb). Get valuable insights regarding the quality and success of the quantization process of your model. The report includes histograms and similarity metrics between the original float model and the quantized model in key points of the model. The report can be visualized using TensorBoard.
|
113
113
|
__________________________________________________________________________________________________________
|
@@ -117,15 +117,15 @@ Description: <div align="center" markdown="1">
|
|
117
117
|
More details on how to use EPTQ via MCT can be found in the [GPTQ guidelines](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/gptq/README.md).
|
118
118
|
|
119
119
|
## <div align="center">Resources</div>
|
120
|
-
* [User Guide](https://sony.github.io/model_optimization/
|
120
|
+
* [User Guide](https://sony.github.io/model_optimization/index.html) contains detailed information about MCT and guides you from installation through optimizing models for your edge AI applications.
|
121
121
|
|
122
|
-
* MCT's [API Docs](https://sony.github.io/model_optimization/
|
122
|
+
* MCT's [API Docs](https://sony.github.io/model_optimization/api/api_docs/) is separated per quantization methods:
|
123
123
|
|
124
|
-
* [Post-training quantization](https://sony.github.io/model_optimization/
|
125
|
-
* [Gradient-based post-training quantization](https://sony.github.io/model_optimization/
|
126
|
-
* [Quantization-aware training](https://sony.github.io/model_optimization/
|
124
|
+
* [Post-training quantization](https://sony.github.io/model_optimization/api/api_docs/index.html#ptq) | PTQ API docs
|
125
|
+
* [Gradient-based post-training quantization](https://sony.github.io/model_optimization/api/api_docs/index.html#gptq) | GPTQ API docs
|
126
|
+
* [Quantization-aware training](https://sony.github.io/model_optimization/api/api_docs/index.html#qat) | QAT API docs
|
127
127
|
|
128
|
-
* [Debug](https://sony.github.io/model_optimization/
|
128
|
+
* [Debug](https://sony.github.io/model_optimization/guidelines/visualization.html) – modify optimization process or generate an explainable report
|
129
129
|
|
130
130
|
* [Release notes](https://github.com/sony/model_optimization/releases)
|
131
131
|
|
@@ -159,25 +159,15 @@ Description: <div align="center" markdown="1">
|
|
159
159
|
<img src="/docsrc/images/PoseEst.png" width="200">
|
160
160
|
<img src="/docsrc/images/ObjDet.png" width="200">
|
161
161
|
|
162
|
-
### Pytorch
|
163
|
-
We quantized classification networks from the torchvision library.
|
164
|
-
In the following table we present the ImageNet validation results for these models:
|
165
|
-
|
166
|
-
| Network Name | Float Accuracy | 8Bit Accuracy | Data-Free 8Bit Accuracy |
|
167
|
-
|---------------------------|-----------------|-----------------|-------------------------|
|
168
|
-
| MobileNet V2 [3] | 71.886 | 71.444 |71.29|
|
169
|
-
| ResNet-18 [3] | 69.86 | 69.63 |69.53|
|
170
|
-
| SqueezeNet 1.1 [3] | 58.128 | 57.678 ||
|
171
|
-
|
172
|
-
### Keras
|
173
162
|
MCT can quantize an existing 32-bit floating-point model to an 8-bit fixed-point (or less) model without compromising accuracy.
|
174
|
-
Below is a graph of [MobileNetV2](https://
|
175
|
-
single-precision quantization, mixed-precision quantization, and mixed-precision quantization with GPTQ.
|
163
|
+
Below is a graph of [MobileNetV2](https://pytorch.org/vision/main/models/generated/torchvision.models.mobilenet_v2.html) accuracy on ImageNet vs average bit-width of weights (X-axis), using **single-precision** quantization, **mixed-precision** quantization, and mixed-precision quantization with GPTQ.
|
176
164
|
|
177
|
-
<
|
165
|
+
<p align="center">
|
166
|
+
<img src="/docsrc/images/torch_mobilenetv2.png" width="800">
|
178
167
|
|
179
168
|
For more results, please see [1]
|
180
169
|
|
170
|
+
|
181
171
|
### Pruning Results
|
182
172
|
|
183
173
|
Results for applying pruning to reduce the parameters of the following models by 50%:
|
@@ -189,19 +179,20 @@ Description: <div align="center" markdown="1">
|
|
189
179
|
|
190
180
|
## <div align="center">Troubleshooting and Community</div>
|
191
181
|
|
192
|
-
If you encountered large accuracy degradation with MCT, check out the [Quantization Troubleshooting](https://github.com/sony/model_optimization/tree/main/quantization_troubleshooting.md)
|
193
|
-
for common pitfalls and some tools to improve quantized model's accuracy.
|
182
|
+
If you encountered a large accuracy degradation with MCT, check out the [Quantization Troubleshooting](https://github.com/sony/model_optimization/tree/main/quantization_troubleshooting.md)
|
183
|
+
for common pitfalls and some tools to improve the quantized model's accuracy.
|
194
184
|
|
195
185
|
Check out the [FAQ](https://github.com/sony/model_optimization/tree/main/FAQ.md) for common issues.
|
196
186
|
|
197
|
-
You are welcome to ask questions and get support on our [issues section](https://github.com/sony/model_optimization/issues) and manage community discussions under [discussions section](https://github.com/sony/model_optimization/discussions).
|
187
|
+
You are welcome to ask questions and get support on our [issues section](https://github.com/sony/model_optimization/issues) and manage community discussions under the [discussions section](https://github.com/sony/model_optimization/discussions).
|
198
188
|
|
199
189
|
|
200
190
|
## <div align="center">Contributions</div>
|
201
|
-
MCT
|
191
|
+
We'd love your input! MCT would not be possible without help from our community, and welcomes contributions from anyone!
|
202
192
|
|
203
193
|
*Checkout our [Contribution guide](https://github.com/sony/model_optimization/blob/main/CONTRIBUTING.md) for more details.
|
204
194
|
|
195
|
+
Thank you 🙏 to all our contributors!
|
205
196
|
|
206
197
|
## <div align="center">License</div>
|
207
198
|
MCT is licensed under Apache License Version 2.0. By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms.
|
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
|
|
27
27
|
from model_compression_toolkit import pruning
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
29
29
|
|
30
|
-
__version__ = "2.2.0.
|
30
|
+
__version__ = "2.2.0.20241129.000526"
|
File without changes
|
{mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/mct_nightly.egg-info/SOURCES.txt
RENAMED
File without changes
|
File without changes
|
{mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/mct_nightly.egg-info/requires.txt
RENAMED
File without changes
|
{mct-nightly-2.2.0.20241127.529 → mct-nightly-2.2.0.20241129.526}/mct_nightly.egg-info/top_level.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|