mct-nightly 2.2.0.20240902.511__tar.gz → 2.2.0.20240904.449__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/PKG-INFO +6 -6
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/README.md +5 -5
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/mct_nightly.egg-info/PKG-INFO +6 -6
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/mct_nightly.egg-info/SOURCES.txt +11 -2
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/__init__.py +1 -1
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +1 -2
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/__init__.py +2 -2
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/common/qat_config.py +1 -19
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/keras/quantization_facade.py +1 -1
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +1 -1
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +1 -1
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +1 -1
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +1 -1
- mct-nightly-2.2.0.20240902.511/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py → mct-nightly-2.2.0.20240904.449/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_weight_quantizer.py +4 -13
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +6 -116
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +12 -122
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +8 -7
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +6 -84
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +6 -85
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/__init__.py +9 -3
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +9 -8
- mct-nightly-2.2.0.20240904.449/model_compression_toolkit/trainable_infrastructure/common/training_method.py +31 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +2 -2
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +2 -2
- mct-nightly-2.2.0.20240904.449/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py +19 -0
- mct-nightly-2.2.0.20240904.449/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py +22 -0
- mct-nightly-2.2.0.20240904.449/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py +14 -0
- mct-nightly-2.2.0.20240904.449/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/symmetric_lsq.py +111 -0
- mct-nightly-2.2.0.20240904.449/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/uniform_lsq.py +106 -0
- mct-nightly-2.2.0.20240904.449/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/__init__.py +14 -0
- mct-nightly-2.2.0.20240904.449/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/symmetric_ste.py +108 -0
- mct-nightly-2.2.0.20240904.449/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/uniform_ste.py +105 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +7 -14
- {mct-nightly-2.2.0.20240902.511/model_compression_toolkit/qat/pytorch/quantizer → mct-nightly-2.2.0.20240904.449/model_compression_toolkit/trainable_infrastructure/pytorch}/quantizer_utils.py +79 -2
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/LICENSE.md +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/mct_nightly.egg-info/dependency_links.txt +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/mct_nightly.egg-info/requires.txt +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/mct_nightly.egg-info/top_level.txt +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/constants.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/analyzer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/framework_info.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/fusion/graph_fuser.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/edge.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/hessian/hessian_scores_calculator.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/hessian/hessian_scores_request.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/matchers/function.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/memory_computation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/model_collector.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/model_validation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/bit_width_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/user_info.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/sigmoid_mul_to_swish.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/hessian/hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/hessian/weights_hessian_scores_calculator_keras.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/reader/common.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/hessian/hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/hessian/weights_hessian_scores_calculator_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/pytorch/utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/core/runner.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/common/constants.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/common/enums.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/keras/image_operations.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/keras/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/pytorch/image_operations.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/pytorch/optimization_functions/lr_scheduler.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/defaultdict.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/common/gptq_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/common/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/gptq_training.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/gptq/runner.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/logger.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/metadata.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/pruning/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/ptq/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/ptq/runner.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/verify_packages.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/common/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/common/constants.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/common/core_report_generator.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/common/framework_report_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/common/model_folding_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/common/similarity_calculator.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/common/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/common/xquant_config.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/keras/keras_report_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/setup.cfg +0 -0
- {mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/setup.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: mct-nightly
|
3
|
-
Version: 2.2.0.
|
3
|
+
Version: 2.2.0.20240904.449
|
4
4
|
Summary: A Model Compression Toolkit for neural networks
|
5
5
|
Home-page: UNKNOWN
|
6
6
|
License: UNKNOWN
|
@@ -57,11 +57,11 @@ Description: # Model Compression Toolkit (MCT)
|
|
57
57
|
Currently, MCT is being tested on various Python, Pytorch and TensorFlow versions:
|
58
58
|
|
59
59
|
|
60
|
-
| | PyTorch 2.1 | PyTorch 2.2 | PyTorch 2.3 |
|
61
|
-
|
62
|
-
| Python 3.9 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch23.yml) |
|
63
|
-
| Python 3.10 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch23.yml) |
|
64
|
-
| Python 3.11 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch23.yml) |
|
60
|
+
| | PyTorch 2.1 | PyTorch 2.2 | PyTorch 2.3 | PyTorch 2.4 |
|
61
|
+
|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
62
|
+
| Python 3.9 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch24.yml) |
|
63
|
+
| Python 3.10 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch24.yml) |
|
64
|
+
| Python 3.11 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch24.yml) |
|
65
65
|
|
66
66
|
|
67
67
|
|
@@ -51,11 +51,11 @@ for hands-on learning. For example:
|
|
51
51
|
Currently, MCT is being tested on various Python, Pytorch and TensorFlow versions:
|
52
52
|
|
53
53
|
|
54
|
-
| | PyTorch 2.1 | PyTorch 2.2 | PyTorch 2.3 |
|
55
|
-
|
56
|
-
| Python 3.9 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch23.yml) |
|
57
|
-
| Python 3.10 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch23.yml) |
|
58
|
-
| Python 3.11 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch23.yml) |
|
54
|
+
| | PyTorch 2.1 | PyTorch 2.2 | PyTorch 2.3 | PyTorch 2.4 |
|
55
|
+
|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
56
|
+
| Python 3.9 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch24.yml) |
|
57
|
+
| Python 3.10 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch24.yml) |
|
58
|
+
| Python 3.11 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch24.yml) |
|
59
59
|
|
60
60
|
|
61
61
|
|
{mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/mct_nightly.egg-info/PKG-INFO
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: mct-nightly
|
3
|
-
Version: 2.2.0.
|
3
|
+
Version: 2.2.0.20240904.449
|
4
4
|
Summary: A Model Compression Toolkit for neural networks
|
5
5
|
Home-page: UNKNOWN
|
6
6
|
License: UNKNOWN
|
@@ -57,11 +57,11 @@ Description: # Model Compression Toolkit (MCT)
|
|
57
57
|
Currently, MCT is being tested on various Python, Pytorch and TensorFlow versions:
|
58
58
|
|
59
59
|
|
60
|
-
| | PyTorch 2.1 | PyTorch 2.2 | PyTorch 2.3 |
|
61
|
-
|
62
|
-
| Python 3.9 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch23.yml) |
|
63
|
-
| Python 3.10 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch23.yml) |
|
64
|
-
| Python 3.11 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch23.yml) |
|
60
|
+
| | PyTorch 2.1 | PyTorch 2.2 | PyTorch 2.3 | PyTorch 2.4 |
|
61
|
+
|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
62
|
+
| Python 3.9 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python39_pytorch24.yml) |
|
63
|
+
| Python 3.10 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python310_pytorch24.yml) |
|
64
|
+
| Python 3.11 | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch21.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch22.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch23.yml) | [](https://github.com/sony/model_optimization/actions/workflows/run_tests_python311_pytorch24.yml) |
|
65
65
|
|
66
66
|
|
67
67
|
|
{mct-nightly-2.2.0.20240902.511 → mct-nightly-2.2.0.20240904.449}/mct_nightly.egg-info/SOURCES.txt
RENAMED
@@ -421,9 +421,8 @@ model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py
|
|
421
421
|
model_compression_toolkit/qat/pytorch/__init__.py
|
422
422
|
model_compression_toolkit/qat/pytorch/quantization_facade.py
|
423
423
|
model_compression_toolkit/qat/pytorch/quantizer/__init__.py
|
424
|
-
model_compression_toolkit/qat/pytorch/quantizer/
|
424
|
+
model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_weight_quantizer.py
|
425
425
|
model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py
|
426
|
-
model_compression_toolkit/qat/pytorch/quantizer/quantizer_utils.py
|
427
426
|
model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py
|
428
427
|
model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py
|
429
428
|
model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py
|
@@ -502,6 +501,7 @@ model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.p
|
|
502
501
|
model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py
|
503
502
|
model_compression_toolkit/trainable_infrastructure/common/quant_utils.py
|
504
503
|
model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py
|
504
|
+
model_compression_toolkit/trainable_infrastructure/common/training_method.py
|
505
505
|
model_compression_toolkit/trainable_infrastructure/keras/__init__.py
|
506
506
|
model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py
|
507
507
|
model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py
|
@@ -510,6 +510,15 @@ model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py
|
|
510
510
|
model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py
|
511
511
|
model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py
|
512
512
|
model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py
|
513
|
+
model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py
|
514
|
+
model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py
|
515
|
+
model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py
|
516
|
+
model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py
|
517
|
+
model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/symmetric_lsq.py
|
518
|
+
model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/uniform_lsq.py
|
519
|
+
model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/__init__.py
|
520
|
+
model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/symmetric_ste.py
|
521
|
+
model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/uniform_ste.py
|
513
522
|
model_compression_toolkit/xquant/__init__.py
|
514
523
|
model_compression_toolkit/xquant/common/__init__.py
|
515
524
|
model_compression_toolkit/xquant/common/constants.py
|
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
|
|
27
27
|
from model_compression_toolkit import pruning
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
|
29
29
|
|
30
|
-
__version__ = "2.2.0.
|
30
|
+
__version__ = "2.2.0.20240904.000449"
|
@@ -27,7 +27,6 @@ from mct_quantizers.pytorch.quantizers import BasePyTorchInferableQuantizer
|
|
27
27
|
from model_compression_toolkit.logger import Logger
|
28
28
|
from model_compression_toolkit.trainable_infrastructure.common.get_quantizer_config import \
|
29
29
|
get_trainable_quantizer_weights_config
|
30
|
-
from model_compression_toolkit.qat.pytorch.quantizer.base_pytorch_qat_quantizer import BasePytorchQATTrainableQuantizer
|
31
30
|
from model_compression_toolkit.trainable_infrastructure.common.get_quantizers import \
|
32
31
|
get_trainable_quantizer_class
|
33
32
|
|
@@ -35,7 +34,7 @@ from model_compression_toolkit.trainable_infrastructure.common.get_quantizers im
|
|
35
34
|
def quantization_builder(n: common.BaseNode,
|
36
35
|
gptq_config: GradientPTQConfig,
|
37
36
|
kernel_attr: str = None
|
38
|
-
) -> Tuple[Dict[str,
|
37
|
+
) -> Tuple[Dict[str, BasePytorchGPTQTrainableQuantizer], List[BasePyTorchInferableQuantizer]]:
|
39
38
|
"""
|
40
39
|
Build quantizers for a node according to its quantization configuration and
|
41
40
|
a global NoOpQuantizeConfig object.
|
@@ -12,7 +12,7 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
# ==============================================================================
|
15
|
-
from model_compression_toolkit.qat.common.qat_config import QATConfig
|
15
|
+
from model_compression_toolkit.qat.common.qat_config import QATConfig
|
16
16
|
|
17
17
|
from model_compression_toolkit.qat.keras.quantization_facade import keras_quantization_aware_training_init_experimental, keras_quantization_aware_training_finalize_experimental
|
18
|
-
from model_compression_toolkit.qat.pytorch.quantization_facade import pytorch_quantization_aware_training_init_experimental, pytorch_quantization_aware_training_finalize_experimental
|
18
|
+
from model_compression_toolkit.qat.pytorch.quantization_facade import pytorch_quantization_aware_training_init_experimental, pytorch_quantization_aware_training_finalize_experimental
|
@@ -14,10 +14,9 @@
|
|
14
14
|
# ==============================================================================
|
15
15
|
|
16
16
|
from typing import Dict
|
17
|
-
from enum import Enum
|
18
17
|
from model_compression_toolkit.core import common
|
19
18
|
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
|
20
|
-
from model_compression_toolkit.
|
19
|
+
from model_compression_toolkit.trainable_infrastructure import TrainingMethod
|
21
20
|
|
22
21
|
|
23
22
|
def is_qat_applicable(node: common.BaseNode,
|
@@ -38,23 +37,6 @@ def is_qat_applicable(node: common.BaseNode,
|
|
38
37
|
or node.is_activation_quantization_enabled()
|
39
38
|
|
40
39
|
|
41
|
-
|
42
|
-
class TrainingMethod(Enum):
|
43
|
-
"""
|
44
|
-
An enum for selecting a QAT training method
|
45
|
-
|
46
|
-
STE - Standard straight-through estimator. Includes PowerOfTwo, symmetric & uniform quantizers
|
47
|
-
|
48
|
-
DQA - DNN Quantization with Attention. Includes a smooth quantization introduces by DQA method
|
49
|
-
|
50
|
-
LSQ - Learned Step size Quantization. Includes PowerOfTwo, symmetric & uniform quantizers: https://arxiv.org/pdf/1902.08153.pdf
|
51
|
-
|
52
|
-
"""
|
53
|
-
STE = "STE",
|
54
|
-
DQA = "DQA",
|
55
|
-
LSQ = "LSQ"
|
56
|
-
|
57
|
-
|
58
40
|
class QATConfig:
|
59
41
|
"""
|
60
42
|
QAT configuration class.
|
@@ -24,7 +24,6 @@ from model_compression_toolkit.core.common.mixed_precision.resource_utilization_
|
|
24
24
|
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
|
25
25
|
MixedPrecisionQuantizationConfig
|
26
26
|
from mct_quantizers import KerasActivationQuantizationHolder
|
27
|
-
from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
|
28
27
|
from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import TargetPlatformCapabilities
|
29
28
|
from model_compression_toolkit.core.runner import core_runner
|
30
29
|
from model_compression_toolkit.ptq.runner import ptq_runner
|
@@ -34,6 +33,7 @@ if FOUND_TF:
|
|
34
33
|
from tensorflow.keras.layers import Layer
|
35
34
|
from tensorflow.keras.models import Model
|
36
35
|
|
36
|
+
from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
|
37
37
|
from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
|
38
38
|
from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
|
39
39
|
from model_compression_toolkit.core.keras.keras_model_validation import KerasModelValidation
|
@@ -20,7 +20,7 @@ import tensorflow as tf
|
|
20
20
|
from tensorflow.python.framework.tensor_shape import TensorShape
|
21
21
|
from model_compression_toolkit.constants import SIGNED
|
22
22
|
|
23
|
-
from model_compression_toolkit.
|
23
|
+
from model_compression_toolkit.trainable_infrastructure import TrainingMethod
|
24
24
|
|
25
25
|
from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
|
26
26
|
from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
|
@@ -18,7 +18,7 @@ from tensorflow.python.framework.tensor_shape import TensorShape
|
|
18
18
|
from model_compression_toolkit.constants import RANGE_MIN, RANGE_MAX
|
19
19
|
from model_compression_toolkit.trainable_infrastructure.common.constants import FQ_MIN, FQ_MAX
|
20
20
|
from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
|
21
|
-
from model_compression_toolkit.
|
21
|
+
from model_compression_toolkit.trainable_infrastructure import TrainingMethod
|
22
22
|
|
23
23
|
from mct_quantizers import mark_quantizer, QuantizationMethod, QuantizationTarget
|
24
24
|
from mct_quantizers.keras.quantizers import \
|
@@ -21,7 +21,7 @@ from tensorflow.python.framework.tensor_shape import TensorShape
|
|
21
21
|
from model_compression_toolkit.constants import SIGNED
|
22
22
|
from model_compression_toolkit.trainable_infrastructure.common.constants import FQ_MIN, FQ_MAX
|
23
23
|
|
24
|
-
from model_compression_toolkit.
|
24
|
+
from model_compression_toolkit.trainable_infrastructure import TrainingMethod
|
25
25
|
|
26
26
|
from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
|
27
27
|
from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
|
@@ -18,7 +18,7 @@ from tensorflow.python.framework.tensor_shape import TensorShape
|
|
18
18
|
from model_compression_toolkit.constants import RANGE_MIN, RANGE_MAX
|
19
19
|
from model_compression_toolkit.trainable_infrastructure.common.constants import FQ_MIN, FQ_MAX
|
20
20
|
from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
|
21
|
-
from model_compression_toolkit.
|
21
|
+
from model_compression_toolkit.trainable_infrastructure import TrainingMethod
|
22
22
|
|
23
23
|
from mct_quantizers import mark_quantizer, QuantizationMethod, QuantizationTarget
|
24
24
|
from mct_quantizers.keras.quantizers import \
|
@@ -24,23 +24,14 @@ from model_compression_toolkit.trainable_infrastructure.pytorch.base_pytorch_qua
|
|
24
24
|
|
25
25
|
if FOUND_TORCH:
|
26
26
|
|
27
|
-
class
|
27
|
+
class BasePytorchQATWeightTrainableQuantizer(BasePytorchTrainableQuantizer):
|
28
28
|
"""
|
29
|
-
A base class for trainable
|
29
|
+
A base class for trainable PyTorch weights quantizer for QAT.
|
30
30
|
"""
|
31
|
-
|
32
|
-
def __init__(self,
|
33
|
-
quantization_config: Union[TrainableQuantizerWeightsConfig, TrainableQuantizerActivationConfig]):
|
34
|
-
"""
|
35
|
-
Initializes BasePytorchQATTrainableQuantizer object.
|
36
|
-
|
37
|
-
Args:
|
38
|
-
quantization_config: quantizer config class contains all the information about a quantizer configuration.
|
39
|
-
"""
|
40
|
-
super().__init__(quantization_config)
|
31
|
+
pass
|
41
32
|
|
42
33
|
else: # pragma: no cover
|
43
|
-
class
|
34
|
+
class BasePytorchQATWeightTrainableQuantizer(BasePytorchTrainableQuantizer):
|
44
35
|
def __init__(self,
|
45
36
|
quantization_config: Union[TrainableQuantizerWeightsConfig, TrainableQuantizerActivationConfig]):
|
46
37
|
super().__init__(quantization_config)
|
@@ -18,56 +18,27 @@ import numpy as np
|
|
18
18
|
import torch
|
19
19
|
import torch.nn as nn
|
20
20
|
|
21
|
-
from model_compression_toolkit.qat import TrainingMethod
|
22
21
|
from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
|
23
22
|
from mct_quantizers import PytorchQuantizationWrapper
|
24
23
|
from model_compression_toolkit.qat.common import THRESHOLD_TENSOR
|
25
24
|
from model_compression_toolkit import constants as C
|
26
|
-
from model_compression_toolkit.qat.pytorch.quantizer.
|
25
|
+
from model_compression_toolkit.qat.pytorch.quantizer.base_pytorch_qat_weight_quantizer import BasePytorchQATWeightTrainableQuantizer
|
27
26
|
from mct_quantizers.common.base_inferable_quantizer import mark_quantizer, QuantizationTarget
|
28
27
|
|
29
28
|
from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
|
30
|
-
from model_compression_toolkit.
|
29
|
+
from model_compression_toolkit.trainable_infrastructure import TrainingMethod
|
30
|
+
from model_compression_toolkit.trainable_infrastructure.pytorch.quantizer_utils import symmetric_lsq_quantizer
|
31
31
|
from mct_quantizers.pytorch.quantizers import \
|
32
|
-
WeightsPOTInferableQuantizer, WeightsSymmetricInferableQuantizer
|
33
|
-
ActivationSymmetricInferableQuantizer
|
32
|
+
WeightsPOTInferableQuantizer, WeightsSymmetricInferableQuantizer
|
34
33
|
from model_compression_toolkit.trainable_infrastructure.common.trainable_quantizer_config import \
|
35
|
-
TrainableQuantizerWeightsConfig
|
34
|
+
TrainableQuantizerWeightsConfig
|
36
35
|
from model_compression_toolkit.trainable_infrastructure.common.base_trainable_quantizer import VariableGroup
|
37
36
|
|
38
37
|
|
39
|
-
def symmetric_lsq_quantizer(x: nn.Parameter,
|
40
|
-
thresholds: nn.Parameter,
|
41
|
-
num_bits: int,
|
42
|
-
sign: bool,
|
43
|
-
min_int: int,
|
44
|
-
max_int: int,
|
45
|
-
scale_factor: float) -> Union[nn.Parameter, torch.Tensor]:
|
46
|
-
"""
|
47
|
-
Symmetric quantizer according to LSQ algorithm: https://arxiv.org/pdf/1902.08153.pdf
|
48
|
-
Args:
|
49
|
-
x: input to quantize
|
50
|
-
thresholds: thresholds of quantization levels
|
51
|
-
num_bits: number of bits for quantization
|
52
|
-
sign: whether x is signed or not
|
53
|
-
min_int: min clipping integer value
|
54
|
-
max_int: max clipping integer value
|
55
|
-
scale_factor: grad scale of LSQ algorithm
|
56
|
-
Returns:
|
57
|
-
A quantized tensor
|
58
|
-
"""
|
59
|
-
delta = thresholds / (2 ** (num_bits - int(sign)))
|
60
|
-
delta_scaled = grad_scale(delta, scale_factor)
|
61
|
-
rounded = ste_round(x / delta_scaled)
|
62
|
-
clipped = torch.clip(rounded, min=min_int, max=max_int)
|
63
|
-
quantized = delta_scaled * clipped
|
64
|
-
return quantized
|
65
|
-
|
66
|
-
|
67
38
|
@mark_quantizer(quantization_target=QuantizationTarget.Weights,
|
68
39
|
quantization_method=[QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC],
|
69
40
|
identifier=TrainingMethod.LSQ)
|
70
|
-
class LSQWeightQATQuantizer(
|
41
|
+
class LSQWeightQATQuantizer(BasePytorchQATWeightTrainableQuantizer):
|
71
42
|
"""
|
72
43
|
Trainable constrained quantizer to quantize layer's weights.
|
73
44
|
"""
|
@@ -145,84 +116,3 @@ class LSQWeightQATQuantizer(BasePytorchQATTrainableQuantizer):
|
|
145
116
|
threshold=threshold_values.tolist(),
|
146
117
|
per_channel=self.quantization_config.weights_per_channel_threshold,
|
147
118
|
channel_axis=self.quantization_config.weights_channels_axis)
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
@mark_quantizer(quantization_target=QuantizationTarget.Activation,
|
152
|
-
quantization_method=[QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC],
|
153
|
-
identifier=TrainingMethod.LSQ)
|
154
|
-
class LSQActivationQATQuantizer(BasePytorchQATTrainableQuantizer):
|
155
|
-
"""
|
156
|
-
Trainable constrained quantizer to quantize layer activations.
|
157
|
-
"""
|
158
|
-
|
159
|
-
def __init__(self, quantization_config: TrainableQuantizerActivationConfig):
|
160
|
-
"""
|
161
|
-
Initialize a LSQActivationQATQuantizer object with parameters to use
|
162
|
-
for symmetric or power of two quantization.
|
163
|
-
|
164
|
-
Args:
|
165
|
-
quantization_config: trainable quantizer config class
|
166
|
-
"""
|
167
|
-
super().__init__(quantization_config)
|
168
|
-
self.power_of_two = quantization_config.activation_quantization_method == QuantizationMethod.POWER_OF_TWO
|
169
|
-
self.sign = quantization_config.activation_quantization_params['is_signed']
|
170
|
-
self.threshold_values = np.array([quantization_config.activation_quantization_params[C.THRESHOLD]])
|
171
|
-
self.num_bits = quantization_config.activation_n_bits
|
172
|
-
n_pos_bits = self.num_bits - int(self.sign)
|
173
|
-
self.min_int = -int(self.sign) * (2 ** n_pos_bits)
|
174
|
-
self.max_int = (2 ** n_pos_bits) - 1
|
175
|
-
|
176
|
-
def initialize_quantization(self,
|
177
|
-
tensor_shape: torch.Size,
|
178
|
-
name: str,
|
179
|
-
layer: PytorchQuantizationWrapper):
|
180
|
-
"""
|
181
|
-
Add quantizer parameters to the quantizer parameters dictionary
|
182
|
-
|
183
|
-
Args:
|
184
|
-
tensor_shape: tensor shape of the quantized tensor.
|
185
|
-
name: Tensor name.
|
186
|
-
layer: Layer to quantize.
|
187
|
-
"""
|
188
|
-
layer.register_parameter(name, nn.Parameter(to_torch_tensor(self.threshold_values), requires_grad=True))
|
189
|
-
|
190
|
-
# save the quantizer added parameters for later calculations
|
191
|
-
self.add_quantizer_variable(THRESHOLD_TENSOR, layer.get_parameter(name), VariableGroup.QPARAMS)
|
192
|
-
|
193
|
-
def __call__(self,
|
194
|
-
inputs: torch.Tensor,
|
195
|
-
training: bool = True) -> torch.Tensor:
|
196
|
-
"""
|
197
|
-
Quantize a tensor.
|
198
|
-
Args:
|
199
|
-
inputs: Input tensor to quantize.
|
200
|
-
training: Whether the graph is in training mode.
|
201
|
-
|
202
|
-
Returns:
|
203
|
-
The quantized tensor.
|
204
|
-
"""
|
205
|
-
|
206
|
-
thresholds = self.get_quantizer_variable(THRESHOLD_TENSOR)
|
207
|
-
n_channels = inputs.shape[1]
|
208
|
-
scale_factor = 1.0 / np.sqrt(self.max_int * n_channels)
|
209
|
-
inputs_quantized = symmetric_lsq_quantizer(inputs, thresholds, self.num_bits, self.sign, self.min_int, self.max_int, scale_factor)
|
210
|
-
return inputs_quantized
|
211
|
-
|
212
|
-
def convert2inferable(self) -> Union[ActivationPOTInferableQuantizer, ActivationSymmetricInferableQuantizer]:
|
213
|
-
"""
|
214
|
-
Convert quantizer to inferable quantizer.
|
215
|
-
|
216
|
-
Returns:
|
217
|
-
A pytorch inferable quanizer object.
|
218
|
-
"""
|
219
|
-
threshold_values = self.get_quantizer_variable(THRESHOLD_TENSOR).cpu().detach().numpy()
|
220
|
-
if self.power_of_two:
|
221
|
-
pot_threshold = np.power(2.0, np.ceil(np.log2(threshold_values)))
|
222
|
-
return ActivationPOTInferableQuantizer(num_bits=self.num_bits,
|
223
|
-
threshold=pot_threshold.tolist(),
|
224
|
-
signed=self.sign)
|
225
|
-
else:
|
226
|
-
return ActivationSymmetricInferableQuantizer(num_bits=self.num_bits,
|
227
|
-
threshold=threshold_values.tolist(),
|
228
|
-
signed=self.sign)
|
@@ -12,66 +12,32 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
# ==============================================================================
|
15
|
-
from typing import Union
|
16
15
|
import numpy as np
|
17
16
|
import torch
|
18
17
|
import torch.nn as nn
|
19
18
|
|
20
|
-
from model_compression_toolkit.constants import RANGE_MAX, RANGE_MIN
|
21
|
-
from model_compression_toolkit.trainable_infrastructure.common.constants import FQ_MIN, FQ_MAX
|
22
|
-
|
23
|
-
from model_compression_toolkit.qat import TrainingMethod
|
24
|
-
from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
|
25
19
|
from mct_quantizers import QuantizationTarget, PytorchQuantizationWrapper
|
26
|
-
from model_compression_toolkit import constants as C
|
27
|
-
|
28
|
-
from model_compression_toolkit.qat.pytorch.quantizer.base_pytorch_qat_quantizer import BasePytorchQATTrainableQuantizer
|
29
20
|
from mct_quantizers import mark_quantizer
|
30
|
-
from model_compression_toolkit.qat.pytorch.quantizer.quantizer_utils import ste_round, grad_scale
|
31
|
-
from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
|
32
21
|
from mct_quantizers.pytorch.quantizers import \
|
33
|
-
WeightsUniformInferableQuantizer
|
34
|
-
|
35
|
-
|
22
|
+
WeightsUniformInferableQuantizer
|
23
|
+
|
24
|
+
from model_compression_toolkit.constants import RANGE_MAX, RANGE_MIN
|
25
|
+
from model_compression_toolkit.trainable_infrastructure.common.constants import FQ_MIN, FQ_MAX
|
26
|
+
from model_compression_toolkit.trainable_infrastructure import TrainingMethod
|
27
|
+
from model_compression_toolkit.trainable_infrastructure.pytorch.quantizer_utils import uniform_lsq_quantizer
|
36
28
|
from model_compression_toolkit.trainable_infrastructure.common.base_trainable_quantizer import VariableGroup
|
37
|
-
from model_compression_toolkit.
|
29
|
+
from model_compression_toolkit.trainable_infrastructure.common.trainable_quantizer_config import \
|
30
|
+
TrainableQuantizerWeightsConfig
|
31
|
+
from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
|
32
|
+
from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
|
38
33
|
from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import fix_range_to_include_zero
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
def uniform_lsq_quantizer(x: nn.Parameter,
|
43
|
-
min_range: nn.Parameter,
|
44
|
-
max_range: nn.Parameter,
|
45
|
-
num_bits: int,
|
46
|
-
min_int: int,
|
47
|
-
max_int: int,
|
48
|
-
scale_factor: float) -> Union[nn.Parameter, torch.Tensor]:
|
49
|
-
"""
|
50
|
-
Uniform quantizer according to LSQ algorithm: https://arxiv.org/pdf/1902.08153.pdf
|
51
|
-
Args:
|
52
|
-
x: input to quantize
|
53
|
-
min_range: min range of quantization values
|
54
|
-
max_range: min range of quantization values
|
55
|
-
num_bits: number of bits for quantization
|
56
|
-
min_int: min clipping integer value
|
57
|
-
max_int: max clipping integer value
|
58
|
-
scale_factor: grad scale of LSQ algorithm
|
59
|
-
Returns:
|
60
|
-
A quantized tensor
|
61
|
-
"""
|
62
|
-
a, b = adjust_range_to_include_zero(min_range, max_range, num_bits)
|
63
|
-
delta = (b - a) / (2 ** num_bits - 1)
|
64
|
-
delta_scaled = grad_scale(delta, scale_factor)
|
65
|
-
rounded = ste_round((x - a) / delta_scaled)
|
66
|
-
clipped = torch.clip(rounded, min=min_int, max=max_int)
|
67
|
-
quantized = delta_scaled * clipped + a
|
68
|
-
return quantized
|
34
|
+
from model_compression_toolkit.qat.pytorch.quantizer.base_pytorch_qat_weight_quantizer import BasePytorchQATWeightTrainableQuantizer
|
69
35
|
|
70
36
|
|
71
37
|
@mark_quantizer(quantization_target=QuantizationTarget.Weights,
|
72
38
|
quantization_method=[QuantizationMethod.UNIFORM],
|
73
39
|
identifier=TrainingMethod.LSQ)
|
74
|
-
class LSQUniformWeightQATQuantizer(
|
40
|
+
class LSQUniformWeightQATQuantizer(BasePytorchQATWeightTrainableQuantizer):
|
75
41
|
"""
|
76
42
|
Trainable constrained quantizer to quantize layer's weights.
|
77
43
|
"""
|
@@ -145,79 +111,3 @@ class LSQUniformWeightQATQuantizer(BasePytorchQATTrainableQuantizer):
|
|
145
111
|
max_range=max_range.tolist(),
|
146
112
|
per_channel=self.quantization_config.weights_per_channel_threshold,
|
147
113
|
channel_axis=self.quantization_config.weights_channels_axis)
|
148
|
-
|
149
|
-
|
150
|
-
@mark_quantizer(quantization_target=QuantizationTarget.Activation,
|
151
|
-
quantization_method=[QuantizationMethod.UNIFORM],
|
152
|
-
identifier=TrainingMethod.LSQ)
|
153
|
-
class LSQUniformActivationQATQuantizer(BasePytorchQATTrainableQuantizer):
|
154
|
-
"""
|
155
|
-
Trainable constrained quantizer to quantize layer activations.
|
156
|
-
"""
|
157
|
-
|
158
|
-
def __init__(self, quantization_config: TrainableQuantizerActivationConfig):
|
159
|
-
"""
|
160
|
-
Initialize a LSQUniformActivationQATQuantizer object with parameters to use
|
161
|
-
for uniform quantization.
|
162
|
-
|
163
|
-
Args:
|
164
|
-
quantization_config: trainable quantizer config class
|
165
|
-
"""
|
166
|
-
super().__init__(quantization_config)
|
167
|
-
self.num_bits = self.quantization_config.activation_n_bits
|
168
|
-
self.min_int = 0
|
169
|
-
self.max_int = 2 ** self.num_bits - 1
|
170
|
-
self.min_range = np.array([quantization_config.activation_quantization_params[C.RANGE_MIN]])
|
171
|
-
self.max_range = np.array([quantization_config.activation_quantization_params[C.RANGE_MAX]])
|
172
|
-
|
173
|
-
def initialize_quantization(self,
|
174
|
-
tensor_shape: torch.Size,
|
175
|
-
name: str,
|
176
|
-
layer: PytorchQuantizationWrapper):
|
177
|
-
"""
|
178
|
-
Add quantizer parameters to the quantizer parameters dictionary
|
179
|
-
|
180
|
-
Args:
|
181
|
-
tensor_shape: tensor shape of the quantized tensor.
|
182
|
-
name: Tensor name.
|
183
|
-
layer: Layer to quantize.
|
184
|
-
"""
|
185
|
-
layer.register_parameter(name+"_"+FQ_MIN, nn.Parameter(to_torch_tensor(self.min_range), requires_grad=True))
|
186
|
-
layer.register_parameter(name+"_"+FQ_MAX, nn.Parameter(to_torch_tensor(self.max_range), requires_grad=True))
|
187
|
-
|
188
|
-
# Save the quantizer parameters for later calculations
|
189
|
-
self.add_quantizer_variable(FQ_MIN, layer.get_parameter(name+"_"+FQ_MIN), VariableGroup.QPARAMS)
|
190
|
-
self.add_quantizer_variable(FQ_MAX, layer.get_parameter(name+"_"+FQ_MAX), VariableGroup.QPARAMS)
|
191
|
-
|
192
|
-
def __call__(self,
|
193
|
-
inputs: torch.Tensor,
|
194
|
-
training: bool = True) -> torch.Tensor:
|
195
|
-
"""
|
196
|
-
Quantize a tensor.
|
197
|
-
Args:
|
198
|
-
inputs: Input tensor to quantize.
|
199
|
-
training: Whether the graph is in training mode.
|
200
|
-
|
201
|
-
Returns:
|
202
|
-
The quantized tensor.
|
203
|
-
"""
|
204
|
-
min_range = self.get_quantizer_variable(FQ_MIN)
|
205
|
-
max_range = self.get_quantizer_variable(FQ_MAX)
|
206
|
-
n_channels = inputs.shape[1]
|
207
|
-
scale_factor = 1.0 / np.sqrt(self.max_int * n_channels)
|
208
|
-
inputs_quantized = uniform_lsq_quantizer(inputs, min_range, max_range, self.num_bits, self.min_int, self.max_int, scale_factor)
|
209
|
-
return inputs_quantized
|
210
|
-
|
211
|
-
def convert2inferable(self) -> ActivationUniformInferableQuantizer:
|
212
|
-
"""
|
213
|
-
Convert quantizer to inferable quantizer.
|
214
|
-
|
215
|
-
Returns:
|
216
|
-
A pytorch inferable quanizer object.
|
217
|
-
"""
|
218
|
-
min_range = self.get_quantizer_variable(FQ_MIN).cpu().detach().numpy()
|
219
|
-
max_range = self.get_quantizer_variable(FQ_MAX).cpu().detach().numpy()
|
220
|
-
min_range, max_range = fix_range_to_include_zero(min_range, max_range, self.num_bits)
|
221
|
-
return ActivationUniformInferableQuantizer(num_bits=self.num_bits,
|
222
|
-
min_range=min_range.tolist(),
|
223
|
-
max_range=max_range.tolist())
|