mct-nightly 1.8.0.27022023.post430__tar.gz → 1.8.0.27032023.post403__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/PKG-INFO +7 -7
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/README.md +6 -6
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/mct_nightly.egg-info/PKG-INFO +7 -7
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/mct_nightly.egg-info/SOURCES.txt +8 -2
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/__init__.py +9 -15
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/logger.py +10 -2
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +6 -1
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/quantization_facade.py +1 -1
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/constants.py +4 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +4 -10
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +16 -2
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/exporter/model_exporter → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/exporter}/__init__.py +1 -0
- mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/exporter/model_exporter/__init__.py +15 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_exporter/tflite/fakely_quant_tflite_exporter.py +1 -1
- mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/exporter/model_wrapper/__init__.py +20 -0
- mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +65 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +39 -24
- mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +82 -0
- mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +61 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +24 -5
- mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +44 -0
- mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/gptq/__init__.py +20 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/common/gptq_config.py +60 -106
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -7
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/common/gptq_training.py +28 -38
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/keras/gptq_training.py +10 -28
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/keras/graph_info.py +8 -33
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/keras/quantization_facade.py +6 -12
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -1
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +2 -2
- mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +45 -0
- mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +112 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +22 -128
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +11 -41
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/pytorch/gptq_training.py +12 -4
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/pytorch/graph_info.py +9 -6
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +9 -22
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +3 -1
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -20
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +10 -1
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +2 -2
- mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +45 -0
- mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +115 -0
- mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +236 -0
- mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +196 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +9 -31
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +30 -37
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +27 -36
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +21 -21
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +25 -26
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_all_subclasses.py +1 -2
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_quantizers.py +1 -1
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantize_wrapper.py +12 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/__init__.py +4 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/constants.py +1 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantize_wrapper.py +12 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/__init__.py +6 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/constants.py +3 -0
- mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/__init__.py +14 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/base_trainable_quantizer.py +53 -2
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizers.py +2 -1
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/base_keras_quantizer.py +22 -4
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +24 -3
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/setup.cfg +1 -1
- mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -24
- mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -59
- mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -74
- mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -54
- mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -37
- mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/gptq/common/gptq_quantizer_config.py +0 -93
- mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -14
- mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/qat/pytorch/__init__.py +0 -14
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/LICENSE.md +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/mct_nightly.egg-info/dependency_links.txt +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/mct_nightly.egg-info/requires.txt +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/mct_nightly.egg-info/top_level.txt +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/analyzer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/collectors/statistics_collector_generator.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/constants.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/data_loader.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/defaultdict.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/framework_info.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/edge.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/immutable.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/matchers/function.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/memory_computation.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_aggregation_methods.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_data.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_functions_mapping.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_methods.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/model_collector.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/model_validation.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_analyzer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_params_generation/kmeans_params.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantizers/kmeans_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/target_platform/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/target_platform/current_tp_model.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/target_platform/fusing.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/target_platform/op_quantization_config.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/target_platform/operators.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/target_platform/target_platform_model.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/target_platform/target_platform_model_component.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/target_platform/targetplatform2framework/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/target_platform/targetplatform2framework/attribute_filter.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/target_platform/targetplatform2framework/current_tpc.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/user_info.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/exporter.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/back2framework/model_gradients.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/constants.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_relu_upper_bound.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/kpi_data_facade.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/mixed_precision/set_layer_to_bitwidth.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/quantizer/input_layer_quantize_transform.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/quantizer/mixed_precision/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/quantizer/mixed_precision/quantization_config_factory.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/quantizer/mixed_precision/selective_activation_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/quantizer/mixed_precision/selective_quantize_config.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/quantizer/mixed_precision/selective_weights_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/reader/common.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/back2framework/model_gradients.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/permute_call_method.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/kpi_data_facade.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/mixed_precision/mixed_precision_wrapper.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/mixed_precision/set_layer_to_bitwidth.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/quantization_facade.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/pytorch/utils.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/runner.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/latest/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v1/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v1/tp_model.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v2/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v2/tp_model.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v2/tpc_keras.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v2/tpc_pytorch.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v3/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v3/tp_model.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v3/tpc_keras.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v3/tpc_pytorch.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v3_lut/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v3_lut/tp_model.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v3_lut/tpc_keras.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v3_lut/tpc_pytorch.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v4/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v4/tp_model.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v4/tpc_keras.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v4/tpc_pytorch.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v4_lut/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v4_lut/tp_model.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v4_lut/tpc_keras.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v4_lut/tpc_pytorch.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v5/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v5/tp_model.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v5/tpc_keras.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/default_tpc/v5/tpc_pytorch.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/get_target_platform_capabilities.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/imx500_tpc/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/imx500_tpc/latest/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/imx500_tpc/v1/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/qnnpack_tpc/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/tflite_tpc/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/tflite_tpc/latest/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/tflite_tpc/v1/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/core/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/exporter → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/exporter/model_exporter/fw_agonstic}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/exporter/model_exporter/fw_agonstic → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/exporter/model_exporter/tflite}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_exporter/tflite/int8_tflite_exporter.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_exporter/tflite/tflite_export_facade.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/exporter/model_exporter/tflite → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/exporter/model_wrapper/keras}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/exporter/model_wrapper/keras → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/exporter/model_wrapper/keras/builder}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizers.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/exporter/model_wrapper/keras → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/exporter/model_wrapper/pytorch}/builder/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizers.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/exporter/model_wrapper/pytorch/builder → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/gptq/common}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/gptq → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/gptq/keras}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/gptq/common → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/gptq/keras/quantizer/ste_rounding}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/gptq/keras → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/gptq/pytorch}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/gptq/runner.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/gptq/keras/quantizer/ste_rounding → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/ptq}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/gptq/pytorch → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/ptq/keras}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/ptq → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/ptq/pytorch}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/ptq/runner.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/ptq/keras → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/qat}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/common/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/common/constants.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/common/qat_config.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/ptq/pytorch → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/qat/keras}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/qat → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/qat/keras/quantizer/ste_rounding}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/qat/keras → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/qat/pytorch}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/pytorch/quantizer/quantizer_utils.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/base_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/constants.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/quant_utils.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/load_model.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizer_utils.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/base_keras_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/validation_functions.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizer_utils.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers}/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common → mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers}/activation_lut_pot_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_lut_symmetric_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_pytorch_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_symmetric_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_uniform_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizer_config.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/quant_utils.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/config_serialization.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/quantizer_utils.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/__init__.py +0 -0
- {mct-nightly-1.8.0.27022023.post430 → mct-nightly-1.8.0.27032023.post403}/setup.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: mct-nightly
|
|
3
|
-
Version: 1.8.0.
|
|
3
|
+
Version: 1.8.0.27032023.post403
|
|
4
4
|
Summary: A Model Compression Toolkit for neural networks
|
|
5
5
|
Home-page: UNKNOWN
|
|
6
6
|
License: UNKNOWN
|
|
@@ -29,8 +29,8 @@ Description: # Model Compression Toolkit (MCT)
|
|
|
29
29
|
## Supported Features
|
|
30
30
|
|
|
31
31
|
MCT supports different quantization methods:
|
|
32
|
-
* Post
|
|
33
|
-
* Gradient-based post
|
|
32
|
+
* Post-training quantization (PTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_post_training_quantization_experimental.html#ug-keras-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_post_training_quantization_experimental.html#ug-pytorch-post-training-quantization-experimental)
|
|
33
|
+
* Gradient-based post-training quantization (GPTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_gradient_post_training_quantization_experimental.html#ug-keras-gradient-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_gradient_post_training_quantization_experimental.html#ug-pytorch-gradient-post-training-quantization-experimental)
|
|
34
34
|
* Quantization aware training (QAT)[*](#experimental-features)
|
|
35
35
|
|
|
36
36
|
|
|
@@ -87,15 +87,15 @@ Description: # Model Compression Toolkit (MCT)
|
|
|
87
87
|
pip install mct-nightly
|
|
88
88
|
```
|
|
89
89
|
|
|
90
|
-
###
|
|
90
|
+
### Requirements
|
|
91
91
|
|
|
92
|
-
To run MCT, one of the supported frameworks,
|
|
92
|
+
To run MCT, one of the supported frameworks, Tensorflow/Pytorch, needs to be installed.
|
|
93
93
|
|
|
94
|
-
For
|
|
94
|
+
For use with Tensorflow please install the packages:
|
|
95
95
|
[tensorflow](https://www.tensorflow.org/install),
|
|
96
96
|
[tensorflow-model-optimization](https://www.tensorflow.org/model_optimization/guide/install)
|
|
97
97
|
|
|
98
|
-
For
|
|
98
|
+
For use with PyTorch please install the packages:
|
|
99
99
|
[torch](https://pytorch.org/)
|
|
100
100
|
|
|
101
101
|
Also, a [requirements](requirements.txt) file can be used to set up your environment.
|
|
@@ -23,8 +23,8 @@ MCT is developed by researchers and engineers working at Sony Semiconductor Isra
|
|
|
23
23
|
## Supported Features
|
|
24
24
|
|
|
25
25
|
MCT supports different quantization methods:
|
|
26
|
-
* Post
|
|
27
|
-
* Gradient-based post
|
|
26
|
+
* Post-training quantization (PTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_post_training_quantization_experimental.html#ug-keras-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_post_training_quantization_experimental.html#ug-pytorch-post-training-quantization-experimental)
|
|
27
|
+
* Gradient-based post-training quantization (GPTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_gradient_post_training_quantization_experimental.html#ug-keras-gradient-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_gradient_post_training_quantization_experimental.html#ug-pytorch-gradient-post-training-quantization-experimental)
|
|
28
28
|
* Quantization aware training (QAT)[*](#experimental-features)
|
|
29
29
|
|
|
30
30
|
|
|
@@ -81,15 +81,15 @@ A nightly package is also available (unstable):
|
|
|
81
81
|
pip install mct-nightly
|
|
82
82
|
```
|
|
83
83
|
|
|
84
|
-
###
|
|
84
|
+
### Requirements
|
|
85
85
|
|
|
86
|
-
To run MCT, one of the supported frameworks,
|
|
86
|
+
To run MCT, one of the supported frameworks, Tensorflow/Pytorch, needs to be installed.
|
|
87
87
|
|
|
88
|
-
For
|
|
88
|
+
For use with Tensorflow please install the packages:
|
|
89
89
|
[tensorflow](https://www.tensorflow.org/install),
|
|
90
90
|
[tensorflow-model-optimization](https://www.tensorflow.org/model_optimization/guide/install)
|
|
91
91
|
|
|
92
|
-
For
|
|
92
|
+
For use with PyTorch please install the packages:
|
|
93
93
|
[torch](https://pytorch.org/)
|
|
94
94
|
|
|
95
95
|
Also, a [requirements](requirements.txt) file can be used to set up your environment.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: mct-nightly
|
|
3
|
-
Version: 1.8.0.
|
|
3
|
+
Version: 1.8.0.27032023.post403
|
|
4
4
|
Summary: A Model Compression Toolkit for neural networks
|
|
5
5
|
Home-page: UNKNOWN
|
|
6
6
|
License: UNKNOWN
|
|
@@ -29,8 +29,8 @@ Description: # Model Compression Toolkit (MCT)
|
|
|
29
29
|
## Supported Features
|
|
30
30
|
|
|
31
31
|
MCT supports different quantization methods:
|
|
32
|
-
* Post
|
|
33
|
-
* Gradient-based post
|
|
32
|
+
* Post-training quantization (PTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_post_training_quantization_experimental.html#ug-keras-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_post_training_quantization_experimental.html#ug-pytorch-post-training-quantization-experimental)
|
|
33
|
+
* Gradient-based post-training quantization (GPTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_gradient_post_training_quantization_experimental.html#ug-keras-gradient-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_gradient_post_training_quantization_experimental.html#ug-pytorch-gradient-post-training-quantization-experimental)
|
|
34
34
|
* Quantization aware training (QAT)[*](#experimental-features)
|
|
35
35
|
|
|
36
36
|
|
|
@@ -87,15 +87,15 @@ Description: # Model Compression Toolkit (MCT)
|
|
|
87
87
|
pip install mct-nightly
|
|
88
88
|
```
|
|
89
89
|
|
|
90
|
-
###
|
|
90
|
+
### Requirements
|
|
91
91
|
|
|
92
|
-
To run MCT, one of the supported frameworks,
|
|
92
|
+
To run MCT, one of the supported frameworks, Tensorflow/Pytorch, needs to be installed.
|
|
93
93
|
|
|
94
|
-
For
|
|
94
|
+
For use with Tensorflow please install the packages:
|
|
95
95
|
[tensorflow](https://www.tensorflow.org/install),
|
|
96
96
|
[tensorflow-model-optimization](https://www.tensorflow.org/model_optimization/guide/install)
|
|
97
97
|
|
|
98
|
-
For
|
|
98
|
+
For use with PyTorch please install the packages:
|
|
99
99
|
[torch](https://pytorch.org/)
|
|
100
100
|
|
|
101
101
|
Also, a [requirements](requirements.txt) file can be used to set up your environment.
|
|
@@ -345,7 +345,6 @@ model_compression_toolkit/gptq/common/__init__.py
|
|
|
345
345
|
model_compression_toolkit/gptq/common/gptq_config.py
|
|
346
346
|
model_compression_toolkit/gptq/common/gptq_constants.py
|
|
347
347
|
model_compression_toolkit/gptq/common/gptq_graph.py
|
|
348
|
-
model_compression_toolkit/gptq/common/gptq_quantizer_config.py
|
|
349
348
|
model_compression_toolkit/gptq/common/gptq_training.py
|
|
350
349
|
model_compression_toolkit/gptq/keras/__init__.py
|
|
351
350
|
model_compression_toolkit/gptq/keras/gptq_loss.py
|
|
@@ -356,7 +355,9 @@ model_compression_toolkit/gptq/keras/quantizer/__init__.py
|
|
|
356
355
|
model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py
|
|
357
356
|
model_compression_toolkit/gptq/keras/quantizer/quant_utils.py
|
|
358
357
|
model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py
|
|
358
|
+
model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py
|
|
359
359
|
model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py
|
|
360
|
+
model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py
|
|
360
361
|
model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py
|
|
361
362
|
model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py
|
|
362
363
|
model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py
|
|
@@ -369,6 +370,11 @@ model_compression_toolkit/gptq/pytorch/quantizer/__init__.py
|
|
|
369
370
|
model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py
|
|
370
371
|
model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py
|
|
371
372
|
model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py
|
|
373
|
+
model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py
|
|
374
|
+
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py
|
|
375
|
+
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py
|
|
376
|
+
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py
|
|
377
|
+
model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py
|
|
372
378
|
model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py
|
|
373
379
|
model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py
|
|
374
380
|
model_compression_toolkit/ptq/__init__.py
|
|
@@ -402,7 +408,6 @@ model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py
|
|
|
402
408
|
model_compression_toolkit/quantizers_infrastructure/__init__.py
|
|
403
409
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/__init__.py
|
|
404
410
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/__init__.py
|
|
405
|
-
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/activation_lut_pot_inferable_quantizer.py
|
|
406
411
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/base_inferable_quantizer.py
|
|
407
412
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/constants.py
|
|
408
413
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_all_subclasses.py
|
|
@@ -437,6 +442,7 @@ model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pyt
|
|
|
437
442
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/base_uniform_inferable_quantizer.py
|
|
438
443
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/constants.py
|
|
439
444
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/__init__.py
|
|
445
|
+
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py
|
|
440
446
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py
|
|
441
447
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py
|
|
442
448
|
model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py
|
|
@@ -14,8 +14,6 @@
|
|
|
14
14
|
# ==============================================================================
|
|
15
15
|
|
|
16
16
|
from model_compression_toolkit.core.common.quantization.debug_config import DebugConfig
|
|
17
|
-
from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, RoundingType, GradientPTQConfigV2
|
|
18
|
-
from model_compression_toolkit.gptq.common.gptq_quantizer_config import GPTQQuantizerConfig, SoftQuantizerConfig
|
|
19
17
|
from model_compression_toolkit.core.common.quantization import quantization_config
|
|
20
18
|
from model_compression_toolkit.core.common.mixed_precision import mixed_precision_quantization_config
|
|
21
19
|
from model_compression_toolkit.core.common.quantization.quantization_config import QuantizationConfig, \
|
|
@@ -36,25 +34,21 @@ from model_compression_toolkit.core.common import network_editors as network_edi
|
|
|
36
34
|
from model_compression_toolkit.core.keras.quantization_facade import keras_post_training_quantization, \
|
|
37
35
|
keras_post_training_quantization_mixed_precision
|
|
38
36
|
from model_compression_toolkit.ptq.keras.quantization_facade import keras_post_training_quantization_experimental
|
|
39
|
-
from model_compression_toolkit.
|
|
40
|
-
|
|
41
|
-
from model_compression_toolkit.
|
|
42
|
-
from model_compression_toolkit.qat.keras.quantization_facade import keras_quantization_aware_training_init, \
|
|
43
|
-
keras_quantization_aware_training_finalize
|
|
44
|
-
from model_compression_toolkit.qat.pytorch.quantization_facade import pytorch_quantization_aware_training_init, \
|
|
45
|
-
pytorch_quantization_aware_training_finalize
|
|
46
|
-
from model_compression_toolkit.core.pytorch.quantization_facade import pytorch_post_training_quantization, \
|
|
47
|
-
pytorch_post_training_quantization_mixed_precision
|
|
37
|
+
from model_compression_toolkit.qat.keras.quantization_facade import keras_quantization_aware_training_init, keras_quantization_aware_training_finalize
|
|
38
|
+
from model_compression_toolkit.qat.pytorch.quantization_facade import pytorch_quantization_aware_training_init, pytorch_quantization_aware_training_finalize
|
|
39
|
+
from model_compression_toolkit.core.pytorch.quantization_facade import pytorch_post_training_quantization, pytorch_post_training_quantization_mixed_precision
|
|
48
40
|
from model_compression_toolkit.ptq.pytorch.quantization_facade import pytorch_post_training_quantization_experimental
|
|
49
|
-
from model_compression_toolkit.gptq.pytorch.quantization_facade import \
|
|
50
|
-
pytorch_gradient_post_training_quantization_experimental
|
|
51
|
-
from model_compression_toolkit.gptq.pytorch.quantization_facade import get_pytorch_gptq_config
|
|
52
41
|
|
|
53
42
|
from model_compression_toolkit.core.keras.kpi_data_facade import keras_kpi_data, keras_kpi_data_experimental
|
|
54
43
|
from model_compression_toolkit.core.pytorch.kpi_data_facade import pytorch_kpi_data, pytorch_kpi_data_experimental
|
|
55
44
|
|
|
56
45
|
from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.keras.load_model import keras_load_quantized_model
|
|
57
46
|
|
|
58
|
-
|
|
47
|
+
|
|
48
|
+
from model_compression_toolkit import exporter
|
|
49
|
+
|
|
50
|
+
from model_compression_toolkit import gptq
|
|
51
|
+
from model_compression_toolkit.gptq import GradientPTQConfig
|
|
52
|
+
|
|
59
53
|
|
|
60
54
|
__version__ = "1.8.0"
|
|
@@ -17,7 +17,6 @@
|
|
|
17
17
|
import logging
|
|
18
18
|
import os
|
|
19
19
|
from datetime import datetime
|
|
20
|
-
from os import path
|
|
21
20
|
from pathlib import Path
|
|
22
21
|
|
|
23
22
|
LOGGER_NAME = 'Constrained Model Optimization'
|
|
@@ -43,7 +42,7 @@ class Logger:
|
|
|
43
42
|
|
|
44
43
|
"""
|
|
45
44
|
|
|
46
|
-
if not path.exists(log_path):
|
|
45
|
+
if not os.path.exists(log_path):
|
|
47
46
|
Path(log_path).mkdir(parents=True, exist_ok=True)
|
|
48
47
|
|
|
49
48
|
@staticmethod
|
|
@@ -93,6 +92,15 @@ class Logger:
|
|
|
93
92
|
|
|
94
93
|
print(f'log file is in {log_name}')
|
|
95
94
|
|
|
95
|
+
@staticmethod
|
|
96
|
+
def shutdown():
|
|
97
|
+
"""
|
|
98
|
+
An orderly command to shutdown by flushing and closing all logging handlers.
|
|
99
|
+
|
|
100
|
+
"""
|
|
101
|
+
Logger.LOG_PATH = None
|
|
102
|
+
logging.shutdown()
|
|
103
|
+
|
|
96
104
|
########################################
|
|
97
105
|
# Delegating methods to wrapped logger
|
|
98
106
|
########################################
|
|
@@ -22,6 +22,8 @@ from model_compression_toolkit.core.common import Logger
|
|
|
22
22
|
from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI, KPITarget
|
|
23
23
|
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_search_manager import MixedPrecisionSearchManager
|
|
24
24
|
|
|
25
|
+
# Limit ILP solver runtime in seconds
|
|
26
|
+
SOLVER_TIME_LIMIT = 60
|
|
25
27
|
|
|
26
28
|
def mp_integer_programming_search(search_manager: MixedPrecisionSearchManager,
|
|
27
29
|
target_kpi: KPI = None) -> List[int]:
|
|
@@ -64,7 +66,10 @@ def mp_integer_programming_search(search_manager: MixedPrecisionSearchManager,
|
|
|
64
66
|
target_kpi,
|
|
65
67
|
search_manager)
|
|
66
68
|
|
|
67
|
-
|
|
69
|
+
# Use default PULP solver. Limit runtime in seconds
|
|
70
|
+
solver = PULP_CBC_CMD(timeLimit=SOLVER_TIME_LIMIT)
|
|
71
|
+
lp_problem.solve(solver=solver) # Try to solve the problem.
|
|
72
|
+
|
|
68
73
|
assert lp_problem.status == LpStatusOptimal, Logger.critical(
|
|
69
74
|
"No solution was found during solving the LP problem")
|
|
70
75
|
Logger.info(LpStatus[lp_problem.status])
|
|
@@ -19,7 +19,7 @@ from model_compression_toolkit.core import common
|
|
|
19
19
|
from model_compression_toolkit.core.common import Logger
|
|
20
20
|
from model_compression_toolkit.core.common.constants import TENSORFLOW
|
|
21
21
|
from model_compression_toolkit.core.common.user_info import UserInformation
|
|
22
|
-
from model_compression_toolkit.gptq
|
|
22
|
+
from model_compression_toolkit.gptq import GradientPTQConfig, GradientPTQConfigV2
|
|
23
23
|
from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
|
|
24
24
|
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
|
|
25
25
|
from model_compression_toolkit.core.common.network_editors.actions import EditRule
|
|
@@ -92,3 +92,7 @@ IN_PROJ_WEIGHT = 'in_proj_weight'
|
|
|
92
92
|
IN_PROJ_BIAS = 'in_proj_bias'
|
|
93
93
|
BIAS_K = 'bias_k'
|
|
94
94
|
BIAS_V = 'bias_v'
|
|
95
|
+
|
|
96
|
+
# # Batch size value for 'reshape' and 'view' operators,
|
|
97
|
+
# # the value is -1 so the batch size is inferred from the length of the array and remaining dimensions.
|
|
98
|
+
BATCH_DIM_VALUE = -1
|
|
@@ -58,21 +58,15 @@ class MHAParams:
|
|
|
58
58
|
|
|
59
59
|
# Check if Add Bias KV feature is Active
|
|
60
60
|
if BIAS_K and BIAS_V in mha_node.weights.keys():
|
|
61
|
-
if mha_node.weights[BIAS_K] and mha_node.weights[BIAS_V] is not None:
|
|
61
|
+
if mha_node.weights[BIAS_K] is not None and mha_node.weights[BIAS_V] is not None:
|
|
62
62
|
Logger.error('Add BIAS_KV feature is Not Implemented') # pragma: no cover
|
|
63
63
|
|
|
64
64
|
self.embed_dim = mha_node.framework_attr[EMBED_DIM]
|
|
65
65
|
self.num_heads = mha_node.framework_attr[NUM_HEADS]
|
|
66
66
|
|
|
67
|
-
|
|
68
|
-
self.kdim = mha_node.framework_attr[KEY_DIM]
|
|
69
|
-
else:
|
|
70
|
-
self.kdim = False
|
|
67
|
+
self.kdim = mha_node.framework_attr[KEY_DIM]
|
|
71
68
|
|
|
72
|
-
|
|
73
|
-
self.vdim = mha_node.framework_attr[VALUE_DIM]
|
|
74
|
-
else:
|
|
75
|
-
self.vdim = False
|
|
69
|
+
self.vdim = mha_node.framework_attr[VALUE_DIM]
|
|
76
70
|
|
|
77
71
|
self.qdim = int(self.embed_dim / self.num_heads)
|
|
78
72
|
|
|
@@ -708,7 +702,7 @@ class MultiHeadAttentionDecomposition(common.BaseSubstitution):
|
|
|
708
702
|
"""
|
|
709
703
|
|
|
710
704
|
if mha_node.reuse:
|
|
711
|
-
raise Exception("MCT doesn't support reuse of MultiHeadAttention layer")
|
|
705
|
+
raise Exception("MCT doesn't support reuse of MultiHeadAttention layer") # pragma: no cover
|
|
712
706
|
params = MHAParams(mha_node)
|
|
713
707
|
|
|
714
708
|
# project
|
|
@@ -14,10 +14,13 @@
|
|
|
14
14
|
# ==============================================================================
|
|
15
15
|
from torch import reshape
|
|
16
16
|
import torch
|
|
17
|
+
|
|
18
|
+
from model_compression_toolkit.core.common import Logger
|
|
17
19
|
from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher
|
|
18
20
|
from model_compression_toolkit.core import common
|
|
19
21
|
from model_compression_toolkit.core.common.graph.base_graph import Graph
|
|
20
22
|
from model_compression_toolkit.core.common.graph.base_node import BaseNode
|
|
23
|
+
from model_compression_toolkit.core.pytorch.constants import BATCH_DIM_VALUE
|
|
21
24
|
|
|
22
25
|
|
|
23
26
|
class ReshapeWithStaticShapes(common.BaseSubstitution):
|
|
@@ -47,14 +50,25 @@ class ReshapeWithStaticShapes(common.BaseSubstitution):
|
|
|
47
50
|
Returns:
|
|
48
51
|
Graph after applying the substitution.
|
|
49
52
|
"""
|
|
53
|
+
# we want the batch size value to infer from the length of the array and remaining dimensions
|
|
54
|
+
if len(node.output_shape) == 1:
|
|
55
|
+
node.output_shape[0][0] = BATCH_DIM_VALUE
|
|
56
|
+
else:
|
|
57
|
+
Logger.error('Reshape or view nodes should have a single output shape') # pragma: no cover
|
|
58
|
+
|
|
50
59
|
# configure the new static output shape attribute
|
|
51
60
|
node.op_call_args = node.output_shape
|
|
52
61
|
|
|
53
62
|
# modify the node input info
|
|
54
63
|
node.input_shape = [node.input_shape[0]]
|
|
64
|
+
|
|
65
|
+
# the first input is the tensor to be reshaped, we want his batch size value to infer
|
|
66
|
+
# from the length of the array and remaining dimensions
|
|
67
|
+
node.input_shape[0][0] = BATCH_DIM_VALUE
|
|
68
|
+
|
|
55
69
|
nodes_to_check = []
|
|
56
70
|
for in_edge in graph.incoming_edges(node):
|
|
57
|
-
if in_edge.sink_index > 0:
|
|
71
|
+
if in_edge.sink_index > 0: # the first input is the tensor to be reshaped
|
|
58
72
|
nodes_to_check.append(in_edge.source_node)
|
|
59
73
|
graph.remove_edge(in_edge.source_node, node)
|
|
60
74
|
for n in nodes_to_check:
|
|
@@ -80,4 +94,4 @@ def clean_graph_from_nodes_without_out_edges(graph: Graph,
|
|
|
80
94
|
graph.remove_edge(in_edge.source_node, node)
|
|
81
95
|
graph.remove_node(node)
|
|
82
96
|
for n in nodes_to_check:
|
|
83
|
-
clean_graph_from_nodes_without_out_edges(graph, n)
|
|
97
|
+
clean_graph_from_nodes_without_out_edges(graph, n)
|
|
@@ -16,3 +16,4 @@
|
|
|
16
16
|
from model_compression_toolkit.exporter.model_exporter.keras.keras_export_facade import keras_export_model, KerasExportMode
|
|
17
17
|
from model_compression_toolkit.exporter.model_exporter.pytorch.pytorch_export_facade import PyTorchExportMode, pytorch_export_model
|
|
18
18
|
from model_compression_toolkit.exporter.model_exporter.tflite.tflite_export_facade import tflite_export_model, TFLiteExportMode
|
|
19
|
+
|
mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/exporter/model_exporter/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# Copyright 2022 Sony Semiconductor Israel, Inc. All rights reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
|
|
@@ -19,7 +19,7 @@ from typing import Callable
|
|
|
19
19
|
import keras.models
|
|
20
20
|
import tensorflow as tf
|
|
21
21
|
|
|
22
|
-
from model_compression_toolkit import keras_load_quantized_model
|
|
22
|
+
from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.keras.load_model import keras_load_quantized_model
|
|
23
23
|
from model_compression_toolkit.core.common import Logger
|
|
24
24
|
from model_compression_toolkit.exporter.model_exporter.keras.fakely_quant_keras_exporter import FakelyQuantKerasExporter
|
|
25
25
|
|
mct-nightly-1.8.0.27032023.post403/model_compression_toolkit/exporter/model_wrapper/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# Copyright 2022 Sony Semiconductor Israel, Inc. All rights reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
|
|
16
|
+
from model_compression_toolkit.exporter.model_wrapper.keras.validate_layer import is_keras_layer_exportable
|
|
17
|
+
from model_compression_toolkit.exporter.model_wrapper.keras.builder.fully_quantized_model_builder import get_exportable_keras_model
|
|
18
|
+
|
|
19
|
+
from model_compression_toolkit.exporter.model_wrapper.pytorch.validate_layer import is_pytorch_layer_exportable
|
|
20
|
+
from model_compression_toolkit.exporter.model_wrapper.pytorch.builder.fully_quantized_model_builder import get_exportable_pytorch_model
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# Copyright 2022 Sony Semiconductor Israel, Inc. All rights reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
from typing import Tuple
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
from model_compression_toolkit import quantizers_infrastructure as qi
|
|
19
|
+
from model_compression_toolkit.core import common
|
|
20
|
+
from model_compression_toolkit.core.common import Graph, Logger
|
|
21
|
+
from model_compression_toolkit.core.common.constants import FOUND_TF
|
|
22
|
+
from model_compression_toolkit.core.common.user_info import UserInformation
|
|
23
|
+
|
|
24
|
+
if FOUND_TF:
|
|
25
|
+
import tensorflow as tf
|
|
26
|
+
from tensorflow.keras.layers import Layer
|
|
27
|
+
from model_compression_toolkit.core.keras.back2framework.keras_model_builder import KerasModelBuilder
|
|
28
|
+
from model_compression_toolkit.exporter.model_wrapper.keras.builder.node_to_quantizers import get_quantization_quantizers
|
|
29
|
+
|
|
30
|
+
def _get_wrapper(node: common.BaseNode,
|
|
31
|
+
layer: Layer) -> qi.KerasQuantizationWrapper:
|
|
32
|
+
"""
|
|
33
|
+
A function which takes a computational graph node and a keras layer and perform the quantization wrapping
|
|
34
|
+
Args:
|
|
35
|
+
n: A node of mct graph.
|
|
36
|
+
layer: A keras layer
|
|
37
|
+
|
|
38
|
+
Returns: Wrapped layer with weights quantizers and activation quantizers
|
|
39
|
+
|
|
40
|
+
"""
|
|
41
|
+
weights_quantizers, activation_quantizers = get_quantization_quantizers(node)
|
|
42
|
+
return qi.KerasQuantizationWrapper(layer, weights_quantizers, activation_quantizers)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def get_exportable_keras_model(graph: Graph) -> Tuple[tf.keras.models.Model, UserInformation]:
|
|
46
|
+
"""
|
|
47
|
+
Convert graph to an exportable Keras model (model with all quantization parameters).
|
|
48
|
+
An exportable model can then be exported using model_exporter, to retrieve the
|
|
49
|
+
final exported model.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
graph: Graph to convert to an exportable Keras model.
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
Exportable Keras model and user information.
|
|
56
|
+
"""
|
|
57
|
+
exportable_model, user_info = KerasModelBuilder(graph=graph,
|
|
58
|
+
wrapper=_get_wrapper).build_model()
|
|
59
|
+
exportable_model.trainable = False
|
|
60
|
+
return exportable_model, user_info
|
|
61
|
+
else:
|
|
62
|
+
def get_exportable_keras_model(*args, **kwargs): # pragma: no cover
|
|
63
|
+
Logger.error('Installing tensorflow and tensorflow_model_optimization is mandatory '
|
|
64
|
+
'when using get_exportable_keras_model. '
|
|
65
|
+
'Could not find Tensorflow package.')
|
|
@@ -15,15 +15,12 @@
|
|
|
15
15
|
from typing import Dict, Any
|
|
16
16
|
|
|
17
17
|
from model_compression_toolkit.core.common import BaseNode, Logger
|
|
18
|
-
from model_compression_toolkit.core.common.constants import THRESHOLD, RANGE_MIN, RANGE_MAX, SIGNED
|
|
18
|
+
from model_compression_toolkit.core.common.constants import THRESHOLD, RANGE_MIN, RANGE_MAX, SIGNED, CLUSTER_CENTERS, SCALE_PER_CHANNEL
|
|
19
19
|
from model_compression_toolkit.core.common.target_platform import QuantizationMethod
|
|
20
20
|
from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import QuantizationTarget
|
|
21
|
-
from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.get_quantizers import
|
|
22
|
-
|
|
23
|
-
from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.keras.quantizers
|
|
24
|
-
import \
|
|
25
|
-
BaseKerasInferableQuantizer
|
|
26
|
-
|
|
21
|
+
from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.get_quantizers import get_inferable_quantizer_class
|
|
22
|
+
from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.keras.quantizers.base_keras_inferable_quantizer import BaseKerasInferableQuantizer
|
|
23
|
+
from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.keras.quantizers import constants as qi_keras_consts
|
|
27
24
|
|
|
28
25
|
def get_inferable_quantizer_kwargs(node: BaseNode,
|
|
29
26
|
quantization_target: QuantizationTarget) -> Dict[str, Any]:
|
|
@@ -44,19 +41,29 @@ def get_inferable_quantizer_kwargs(node: BaseNode,
|
|
|
44
41
|
# Return the appropriate quantization parameters based on the quantization method
|
|
45
42
|
if quantization_method in [QuantizationMethod.POWER_OF_TWO,
|
|
46
43
|
QuantizationMethod.SYMMETRIC]:
|
|
47
|
-
return {
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
44
|
+
return {qi_keras_consts.NUM_BITS: node_w_qc.weights_n_bits,
|
|
45
|
+
qi_keras_consts.THRESHOLD: list(node_w_qc.weights_quantization_params[THRESHOLD].flatten()),
|
|
46
|
+
qi_keras_consts.PER_CHANNEL: node_w_qc.weights_per_channel_threshold,
|
|
47
|
+
qi_keras_consts.CHANNEL_AXIS: node_w_qc.weights_channels_axis,
|
|
48
|
+
qi_keras_consts.INPUT_RANK: len(node_w_qc.weights_quantization_params[THRESHOLD].shape)}
|
|
52
49
|
|
|
53
50
|
elif quantization_method in [QuantizationMethod.UNIFORM]:
|
|
54
|
-
return {
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
51
|
+
return {qi_keras_consts.NUM_BITS: node_w_qc.weights_n_bits,
|
|
52
|
+
qi_keras_consts.PER_CHANNEL: node_w_qc.weights_per_channel_threshold,
|
|
53
|
+
qi_keras_consts.MIN_RANGE: list(node_w_qc.weights_quantization_params[RANGE_MIN].flatten()),
|
|
54
|
+
qi_keras_consts.MAX_RANGE: list(node_w_qc.weights_quantization_params[RANGE_MAX].flatten()),
|
|
55
|
+
qi_keras_consts.CHANNEL_AXIS: node_w_qc.weights_channels_axis,
|
|
56
|
+
qi_keras_consts.INPUT_RANK: len(node_w_qc.weights_quantization_params[RANGE_MIN].shape)}
|
|
57
|
+
|
|
58
|
+
elif quantization_method in [QuantizationMethod.LUT_SYM_QUANTIZER, QuantizationMethod.LUT_POT_QUANTIZER]:
|
|
59
|
+
return {qi_keras_consts.NUM_BITS: node_w_qc.weights_n_bits,
|
|
60
|
+
qi_keras_consts.PER_CHANNEL: node_w_qc.weights_per_channel_threshold,
|
|
61
|
+
qi_keras_consts.CLUSTER_CENTERS: node_w_qc.weights_quantization_params[CLUSTER_CENTERS],
|
|
62
|
+
qi_keras_consts.THRESHOLD: list(node_w_qc.weights_quantization_params[SCALE_PER_CHANNEL].flatten()),
|
|
63
|
+
qi_keras_consts.CHANNEL_AXIS: node_w_qc.weights_channels_axis,
|
|
64
|
+
# TODO: how to pass multiplier nbits and eps for a specific node?
|
|
65
|
+
qi_keras_consts.INPUT_RANK: len(node_w_qc.weights_quantization_params[SCALE_PER_CHANNEL].shape)}
|
|
66
|
+
|
|
60
67
|
else:
|
|
61
68
|
Logger.critical(f'Not supported quantization method for inferable quantizers.') # pragma: no cover
|
|
62
69
|
|
|
@@ -68,16 +75,24 @@ def get_inferable_quantizer_kwargs(node: BaseNode,
|
|
|
68
75
|
# Return the appropriate quantization parameters based on the quantization method
|
|
69
76
|
if quantization_method in [QuantizationMethod.POWER_OF_TWO,
|
|
70
77
|
QuantizationMethod.SYMMETRIC]:
|
|
71
|
-
return {
|
|
78
|
+
return {qi_keras_consts.NUM_BITS: node_qc.activation_n_bits,
|
|
72
79
|
# In activation quantization is per-tensor only - thus we hold the threshold as a list with a len of 1
|
|
73
|
-
|
|
74
|
-
|
|
80
|
+
qi_keras_consts.THRESHOLD: [node_qc.activation_quantization_params[THRESHOLD]],
|
|
81
|
+
qi_keras_consts.SIGNED: node_qc.activation_quantization_params[SIGNED]}
|
|
75
82
|
|
|
76
83
|
elif quantization_method in [QuantizationMethod.UNIFORM]:
|
|
77
|
-
return {
|
|
84
|
+
return {qi_keras_consts.NUM_BITS: node_qc.activation_n_bits,
|
|
78
85
|
# In activation quantization is per-tensor only - thus we hold the min/max as a list with a len of 1
|
|
79
|
-
|
|
80
|
-
|
|
86
|
+
qi_keras_consts.MIN_RANGE: [node_qc.activation_quantization_params[RANGE_MIN]],
|
|
87
|
+
qi_keras_consts.MAX_RANGE: [node_qc.activation_quantization_params[RANGE_MAX]]}
|
|
88
|
+
|
|
89
|
+
elif quantization_method in [QuantizationMethod.LUT_POT_QUANTIZER]:
|
|
90
|
+
return {qi_keras_consts.NUM_BITS: node_qc.activation_n_bits,
|
|
91
|
+
qi_keras_consts.SIGNED: node_qc.activation_quantization_params[SIGNED],
|
|
92
|
+
qi_keras_consts.CLUSTER_CENTERS: node_qc.activation_quantization_params[CLUSTER_CENTERS],
|
|
93
|
+
qi_keras_consts.THRESHOLD: [node_qc.activation_quantization_params[THRESHOLD]]
|
|
94
|
+
# TODO: how to pass multiplier nbits and eps for a specific node?
|
|
95
|
+
}
|
|
81
96
|
else:
|
|
82
97
|
Logger.critical(f'Not supported quantization method for inferable quantizers.') # pragma: no cover
|
|
83
98
|
else:
|