mct-nightly 1.11.0.20240317.91316__tar.gz → 1.11.0.20240319.407__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (484) hide show
  1. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/PKG-INFO +1 -1
  2. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/mct_nightly.egg-info/PKG-INFO +1 -1
  3. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/__init__.py +1 -1
  4. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/__init__.py +2 -0
  5. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_data.py +3 -1
  6. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +13 -9
  7. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +2 -1
  8. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/pruning_config.py +8 -2
  9. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/pruning_info.py +3 -10
  10. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/core_config.py +8 -3
  11. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +1 -1
  12. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/graph_prep_runner.py +3 -2
  13. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/kpi_data_facade.py +1 -1
  14. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/runner.py +10 -2
  15. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/__init__.py +3 -0
  16. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/common/enums.py +56 -27
  17. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +29 -0
  18. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +29 -2
  19. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +10 -0
  20. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +5 -9
  21. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/common/gptq_config.py +6 -3
  22. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/quantization_facade.py +9 -7
  23. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +3 -0
  24. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/pruning/keras/pruning_facade.py +5 -4
  25. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/ptq/keras/quantization_facade.py +3 -0
  26. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +3 -0
  27. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/keras/quantization_facade.py +4 -1
  28. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/pytorch/quantization_facade.py +3 -0
  29. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +3 -3
  30. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/LICENSE.md +0 -0
  31. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/README.md +0 -0
  32. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/mct_nightly.egg-info/SOURCES.txt +0 -0
  33. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/mct_nightly.egg-info/dependency_links.txt +0 -0
  34. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/mct_nightly.egg-info/requires.txt +0 -0
  35. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/mct_nightly.egg-info/top_level.txt +0 -0
  36. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/constants.py +0 -0
  37. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/analyzer.py +0 -0
  38. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/__init__.py +0 -0
  39. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
  40. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
  41. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
  42. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
  43. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
  44. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
  45. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
  46. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
  47. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
  48. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/data_loader.py +0 -0
  49. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
  50. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/framework_info.py +0 -0
  51. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
  52. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
  53. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
  54. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
  55. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
  56. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/edge.py +0 -0
  57. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
  58. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
  59. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
  60. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
  61. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
  62. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
  63. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
  64. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
  65. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
  66. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
  67. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
  68. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
  69. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
  70. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
  71. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/hessian/trace_hessian_calculator.py +0 -0
  72. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/hessian/trace_hessian_request.py +0 -0
  73. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
  74. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
  75. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
  76. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
  77. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/matchers/function.py +0 -0
  78. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
  79. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
  80. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/memory_computation.py +0 -0
  81. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
  82. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
  83. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
  84. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
  85. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
  86. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/__init__.py +0 -0
  87. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi.py +0 -0
  88. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_aggregation_methods.py +0 -0
  89. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_functions_mapping.py +0 -0
  90. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_methods.py +0 -0
  91. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
  92. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
  93. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
  94. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
  95. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
  96. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
  97. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
  98. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/model_collector.py +0 -0
  99. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/model_validation.py +0 -0
  100. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
  101. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
  102. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
  103. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
  104. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
  105. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
  106. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
  107. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
  108. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
  109. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
  110. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
  111. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
  112. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
  113. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
  114. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
  115. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
  116. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
  117. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
  118. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
  119. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
  120. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
  121. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
  122. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
  123. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
  124. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
  125. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
  126. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
  127. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
  128. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
  129. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
  130. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
  131. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
  132. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
  133. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
  134. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
  135. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
  136. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
  137. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
  138. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
  139. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
  140. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
  141. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
  142. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
  143. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
  144. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
  145. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
  146. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
  147. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
  148. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
  149. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
  150. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
  151. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
  152. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
  153. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
  154. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
  155. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
  156. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
  157. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
  158. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
  159. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
  160. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
  161. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
  162. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
  163. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
  164. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/user_info.py +0 -0
  165. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
  166. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
  167. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
  168. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
  169. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/exporter.py +0 -0
  170. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/__init__.py +0 -0
  171. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
  172. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
  173. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
  174. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
  175. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
  176. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
  177. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
  178. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/constants.py +0 -0
  179. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
  180. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
  181. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
  182. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
  183. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
  184. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  185. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  186. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  187. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
  188. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
  189. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  190. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
  191. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  192. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  193. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_relu_upper_bound.py +0 -0
  194. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  195. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
  196. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
  197. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  198. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
  199. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  200. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  201. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
  202. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/hessian/activation_trace_hessian_calculator_keras.py +0 -0
  203. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/hessian/trace_hessian_calculator_keras.py +0 -0
  204. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/hessian/weights_trace_hessian_calculator_keras.py +0 -0
  205. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
  206. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
  207. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
  208. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/kpi_data_facade.py +0 -0
  209. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
  210. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
  211. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
  212. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
  213. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
  214. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
  215. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
  216. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
  217. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
  218. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
  219. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/reader/common.py +0 -0
  220. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
  221. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
  222. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
  223. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
  224. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
  225. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
  226. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
  227. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
  228. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
  229. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
  230. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
  231. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
  232. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
  233. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
  234. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
  235. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
  236. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
  237. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
  238. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
  239. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
  240. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
  241. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
  242. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
  243. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/constants.py +0 -0
  244. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
  245. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
  246. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
  247. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  248. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  249. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  250. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
  251. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
  252. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
  253. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  254. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  255. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/permute_call_method.py +0 -0
  256. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  257. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
  258. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  259. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
  260. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  261. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
  262. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  263. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  264. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
  265. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/hessian/activation_trace_hessian_calculator_pytorch.py +0 -0
  266. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/hessian/trace_hessian_calculator_pytorch.py +0 -0
  267. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/hessian/weights_trace_hessian_calculator_pytorch.py +0 -0
  268. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
  269. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
  270. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
  271. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
  272. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
  273. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
  274. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
  275. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
  276. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
  277. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
  278. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
  279. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
  280. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
  281. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
  282. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
  283. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
  284. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
  285. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/pytorch/utils.py +0 -0
  286. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
  287. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
  288. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/common/constants.py +0 -0
  289. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
  290. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
  291. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
  292. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
  293. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
  294. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
  295. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
  296. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
  297. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
  298. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
  299. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
  300. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
  301. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
  302. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
  303. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
  304. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
  305. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
  306. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
  307. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
  308. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
  309. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
  310. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
  311. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
  312. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
  313. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
  314. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
  315. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
  316. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/defaultdict.py +0 -0
  317. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/__init__.py +0 -0
  318. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
  319. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
  320. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
  321. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
  322. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
  323. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
  324. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
  325. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
  326. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
  327. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
  328. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
  329. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
  330. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
  331. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
  332. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
  333. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
  334. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
  335. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
  336. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
  337. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
  338. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
  339. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
  340. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
  341. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
  342. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
  343. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
  344. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
  345. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
  346. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
  347. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/__init__.py +0 -0
  348. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/common/__init__.py +0 -0
  349. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
  350. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
  351. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
  352. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/common/gptq_training.py +0 -0
  353. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
  354. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
  355. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
  356. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
  357. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
  358. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
  359. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
  360. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
  361. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
  362. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +0 -0
  363. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
  364. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  365. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  366. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  367. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
  368. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  369. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
  370. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
  371. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
  372. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/gptq_training.py +0 -0
  373. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
  374. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
  375. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
  376. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
  377. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
  378. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +0 -0
  379. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
  380. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  381. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  382. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  383. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  384. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  385. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/gptq/runner.py +0 -0
  386. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/logger.py +0 -0
  387. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/pruning/__init__.py +0 -0
  388. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
  389. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
  390. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
  391. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/ptq/__init__.py +0 -0
  392. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
  393. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
  394. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/ptq/runner.py +0 -0
  395. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/__init__.py +0 -0
  396. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/common/__init__.py +0 -0
  397. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/common/qat_config.py +0 -0
  398. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/keras/__init__.py +0 -0
  399. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
  400. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +0 -0
  401. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
  402. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
  403. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
  404. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
  405. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
  406. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
  407. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  408. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
  409. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
  410. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
  411. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py +0 -0
  412. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
  413. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
  414. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
  415. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
  416. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/pytorch/quantizer/quantizer_utils.py +0 -0
  417. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  418. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  419. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
  420. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
  421. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
  422. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
  423. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
  424. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
  425. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
  426. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
  427. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
  428. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
  429. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
  430. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
  431. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
  432. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
  433. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
  434. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
  435. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
  436. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
  437. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
  438. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
  439. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
  440. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
  441. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
  442. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
  443. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
  444. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
  445. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
  446. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
  447. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
  448. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
  449. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
  450. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
  451. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
  452. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
  453. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
  454. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
  455. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
  456. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
  457. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
  458. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
  459. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
  460. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
  461. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
  462. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
  463. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
  464. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
  465. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
  466. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
  467. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
  468. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
  469. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
  470. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
  471. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
  472. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
  473. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
  474. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
  475. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
  476. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
  477. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
  478. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
  479. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
  480. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
  481. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
  482. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
  483. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/setup.cfg +0 -0
  484. {mct-nightly-1.11.0.20240317.91316 → mct-nightly-1.11.0.20240319.407}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 1.11.0.20240317.91316
3
+ Version: 1.11.0.20240319.407
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 1.11.0.20240317.91316
3
+ Version: 1.11.0.20240319.407
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
27
27
  from model_compression_toolkit import pruning
28
28
  from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
29
29
 
30
- __version__ = "1.11.0.20240317.091316"
30
+ __version__ = "1.11.0.20240319.000407"
@@ -25,3 +25,5 @@ from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import
25
25
  from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import MixedPrecisionQuantizationConfig
26
26
  from model_compression_toolkit.core.keras.kpi_data_facade import keras_kpi_data
27
27
  from model_compression_toolkit.core.pytorch.kpi_data_facade import pytorch_kpi_data
28
+ from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting
29
+
@@ -47,13 +47,15 @@ def compute_kpi_data(in_model: Any,
47
47
 
48
48
  """
49
49
 
50
+ # We assume that the kpi_data API is used to compute the model KPI for mixed precision scenario,
51
+ # so we run graph preparation under the assumption of enabled mixed precision.
50
52
  transformed_graph = graph_preparation_runner(in_model,
51
53
  representative_data_gen,
52
54
  core_config.quantization_config,
53
55
  fw_info,
54
56
  fw_impl,
55
57
  tpc,
56
- mixed_precision_enable=core_config.mixed_precision_enable)
58
+ mixed_precision_enable=True)
57
59
 
58
60
  # Compute parameters sum
59
61
  weights_params = compute_nodes_weights_params(graph=transformed_graph, fw_info=fw_info)
@@ -16,13 +16,11 @@
16
16
  from typing import List, Callable
17
17
 
18
18
  from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting
19
- from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
20
19
 
21
20
 
22
21
  class MixedPrecisionQuantizationConfig:
23
22
 
24
23
  def __init__(self,
25
- target_kpi: KPI = None,
26
24
  compute_distance_fn: Callable = None,
27
25
  distance_weighting_method: MpDistanceWeighting = MpDistanceWeighting.AVG,
28
26
  num_of_images: int = 32,
@@ -36,7 +34,6 @@ class MixedPrecisionQuantizationConfig:
36
34
  Class with mixed precision parameters to quantize the input model.
37
35
 
38
36
  Args:
39
- target_kpi (KPI): KPI to constraint the search of the mixed-precision configuration for the model.
40
37
  compute_distance_fn (Callable): Function to compute a distance between two tensors. If None, using pre-defined distance methods based on the layer type for each layer.
41
38
  distance_weighting_method (MpDistanceWeighting): MpDistanceWeighting enum value that provides a function to use when weighting the distances among different layers when computing the sensitivity metric.
42
39
  num_of_images (int): Number of images to use to evaluate the sensitivity of a mixed-precision model comparing to the float model.
@@ -49,7 +46,6 @@ class MixedPrecisionQuantizationConfig:
49
46
 
50
47
  """
51
48
 
52
- self.target_kpi = target_kpi
53
49
  self.compute_distance_fn = compute_distance_fn
54
50
  self.distance_weighting_method = distance_weighting_method
55
51
  self.num_of_images = num_of_images
@@ -67,13 +63,21 @@ class MixedPrecisionQuantizationConfig:
67
63
 
68
64
  self.metric_normalization_threshold = metric_normalization_threshold
69
65
 
70
- def set_target_kpi(self, target_kpi: KPI):
66
+ self._mixed_precision_enable = False
67
+
68
+ def set_mixed_precision_enable(self):
69
+ """
70
+ Set a flag in mixed precision config indicating that mixed precision is enabled.
71
71
  """
72
- Setting target KPI in mixed precision config.
73
72
 
74
- Args:
75
- target_kpi: A target KPI to set.
73
+ self._mixed_precision_enable = True
76
74
 
75
+ @property
76
+ def mixed_precision_enable(self):
77
77
  """
78
+ A property that indicates whether mixed precision quantization is enabled.
78
79
 
79
- self.target_kpi = target_kpi
80
+ Returns: True if mixed precision quantization is enabled
81
+
82
+ """
83
+ return self._mixed_precision_enable
@@ -47,6 +47,7 @@ search_methods = {
47
47
  def search_bit_width(graph_to_search_cfg: Graph,
48
48
  fw_info: FrameworkInfo,
49
49
  fw_impl: FrameworkImplementation,
50
+ target_kpi: KPI,
50
51
  mp_config: MixedPrecisionQuantizationConfig,
51
52
  representative_data_gen: Callable,
52
53
  search_method: BitWidthSearchMethod = BitWidthSearchMethod.INTEGER_PROGRAMMING,
@@ -63,6 +64,7 @@ def search_bit_width(graph_to_search_cfg: Graph,
63
64
  graph_to_search_cfg: Graph to search a MP configuration for.
64
65
  fw_info: FrameworkInfo object about the specific framework (e.g., attributes of different layers' weights to quantize).
65
66
  fw_impl: FrameworkImplementation object with specific framework methods implementation.
67
+ target_kpi: Target KPI to bound our feasible solution space s.t the configuration does not violate it.
66
68
  mp_config: Mixed-precision quantization configuration.
67
69
  representative_data_gen: Dataset to use for retrieving images for the models inputs.
68
70
  search_method: BitWidthSearchMethod to define which searching method to use.
@@ -74,7 +76,6 @@ def search_bit_width(graph_to_search_cfg: Graph,
74
76
  bit-width index on the node).
75
77
 
76
78
  """
77
- target_kpi = mp_config.target_kpi
78
79
 
79
80
  # target_kpi have to be passed. If it was not passed, the facade is not supposed to get here by now.
80
81
  if target_kpi is None:
@@ -20,14 +20,20 @@ from model_compression_toolkit.constants import PRUNING_NUM_SCORE_APPROXIMATIONS
20
20
 
21
21
  class ImportanceMetric(Enum):
22
22
  """
23
- Enum for specifying the metric used to determine the importance of channels when pruning.
23
+ Enum for specifying the metric used to determine the importance of channels when pruning:
24
+
25
+ LFH - Label-Free Hessian uses hessian info for measuring each channel's sensitivity.
26
+
24
27
  """
25
28
  LFH = 0 # Score based on the Hessian matrix w.r.t. layers weights, to determine channel importance without labels.
26
29
 
27
30
 
28
31
  class ChannelsFilteringStrategy(Enum):
29
32
  """
30
- Enum for specifying the strategy used for filtering (pruning) channels.
33
+ Enum for specifying the strategy used for filtering (pruning) channels:
34
+
35
+ GREEDY - Prune the least important channel groups up to allowed resources in the KPI (for now, only weights_memory is considered).
36
+
31
37
  """
32
38
  GREEDY = 0 # Greedy strategy for pruning channels based on importance metrics.
33
39
 
@@ -26,23 +26,16 @@ class PruningInfo:
26
26
  and importance scores for each layer. This class acts as a container for accessing
27
27
  pruning-related metadata.
28
28
 
29
- Attributes:
30
- pruning_masks (Dict[BaseNode, np.ndarray]): Stores the pruning masks for each layer.
31
- A pruning mask is an array where each element indicates whether the corresponding
32
- channel or neuron has been pruned (0) or kept (1).
33
- importance_scores (Dict[BaseNode, np.ndarray]): Stores the importance scores for each layer.
34
- Importance scores quantify the significance of each channel in the layer.
35
29
  """
36
30
 
37
31
  def __init__(self,
38
32
  pruning_masks: Dict[BaseNode, np.ndarray],
39
33
  importance_scores: Dict[BaseNode, np.ndarray]):
40
34
  """
41
- Initializes the PruningInfo with pruning masks and importance scores.
42
-
43
35
  Args:
44
- pruning_masks (Dict[BaseNode, np.ndarray]): Pruning masks for each layer.
45
- importance_scores (Dict[BaseNode, np.ndarray]): Importance scores for each layer.
36
+ pruning_masks (Dict[BaseNode, np.ndarray]): Stores the pruning masks for each layer. A pruning mask is an array where each element indicates whether the corresponding channel or neuron has been pruned (0) or kept (1).
37
+ importance_scores (Dict[BaseNode, np.ndarray]): Stores the importance scores for each layer. Importance scores quantify the significance of each channel in the layer.
38
+
46
39
  """
47
40
  self._pruning_masks = pruning_masks
48
41
  self._importance_scores = importance_scores
@@ -30,14 +30,19 @@ class CoreConfig:
30
30
 
31
31
  Args:
32
32
  quantization_config (QuantizationConfig): Config for quantization.
33
- mixed_precision_config (MixedPrecisionQuantizationConfig): Config for mixed precision quantization (optional, default=None).
33
+ mixed_precision_config (MixedPrecisionQuantizationConfig): Config for mixed precision quantization.
34
+ If None, a default MixedPrecisionQuantizationConfig is used.
34
35
  debug_config (DebugConfig): Config for debugging and editing the network quantization process.
35
36
  """
36
37
  self.quantization_config = quantization_config
37
- self.mixed_precision_config = mixed_precision_config
38
38
  self.debug_config = debug_config
39
39
 
40
+ if mixed_precision_config is None:
41
+ self.mixed_precision_config = MixedPrecisionQuantizationConfig()
42
+ else:
43
+ self.mixed_precision_config = mixed_precision_config
44
+
40
45
  @property
41
46
  def mixed_precision_enable(self):
42
- return self.mixed_precision_config is not None
47
+ return self.mixed_precision_config is not None and self.mixed_precision_config.mixed_precision_enable
43
48
 
@@ -71,7 +71,7 @@ def set_quantization_configs_to_node(node: BaseNode,
71
71
  quant_config: Quantization configuration to generate the node's configurations from.
72
72
  fw_info: Information needed for quantization about the specific framework.
73
73
  tpc: TargetPlatformCapabilities to get default OpQuantizationConfig.
74
- mixed_precision_enable: is mixed precision enabled
74
+ mixed_precision_enable: is mixed precision enabled.
75
75
  """
76
76
  node_qc_options = node.get_qco(tpc)
77
77
 
@@ -57,7 +57,8 @@ def graph_preparation_runner(in_model: Any,
57
57
  fw_impl: FrameworkImplementation object with a specific framework methods implementation.
58
58
  tpc: TargetPlatformCapabilities object that models the inference target platform and
59
59
  the attached framework operator's information.
60
- tb_w: TensorboardWriter object for logging
60
+ tb_w: TensorboardWriter object for logging.
61
+ mixed_precision_enable: is mixed precision enabled.
61
62
 
62
63
  Returns:
63
64
  An internal graph representation of the input model.
@@ -103,7 +104,7 @@ def get_finalized_graph(initial_graph: Graph,
103
104
  kernel channels indices, groups of layers by how they should be quantized, etc.)
104
105
  tb_w (TensorboardWriter): TensorboardWriter object to use for logging events such as graphs, histograms, etc.
105
106
  fw_impl (FrameworkImplementation): FrameworkImplementation object with a specific framework methods implementation.
106
- mixed_precision_enable: is mixed precision enabled.
107
+ mixed_precision_enable: is mixed precision enabled.
107
108
 
108
109
  Returns: Graph object that represents the model, after applying all required modifications to it.
109
110
  """
@@ -38,7 +38,7 @@ if FOUND_TORCH:
38
38
 
39
39
  def pytorch_kpi_data(in_model: Module,
40
40
  representative_data_gen: Callable,
41
- core_config: CoreConfig = CoreConfig(mixed_precision_config=MixedPrecisionQuantizationConfig()),
41
+ core_config: CoreConfig = CoreConfig(),
42
42
  target_platform_capabilities: TargetPlatformCapabilities = PYTORCH_DEFAULT_TPC) -> KPI:
43
43
  """
44
44
  Computes KPI data that can be used to calculate the desired target KPI for mixed-precision quantization.
@@ -47,6 +47,7 @@ def core_runner(in_model: Any,
47
47
  fw_info: FrameworkInfo,
48
48
  fw_impl: FrameworkImplementation,
49
49
  tpc: TargetPlatformCapabilities,
50
+ target_kpi: KPI = None,
50
51
  tb_w: TensorboardWriter = None):
51
52
  """
52
53
  Quantize a trained model using post-training quantization.
@@ -66,6 +67,7 @@ def core_runner(in_model: Any,
66
67
  fw_impl: FrameworkImplementation object with a specific framework methods implementation.
67
68
  tpc: TargetPlatformCapabilities object that models the inference target platform and
68
69
  the attached framework operator's information.
70
+ target_kpi: KPI to constraint the search of the mixed-precision configuration for the model.
69
71
  tb_w: TensorboardWriter object for logging
70
72
 
71
73
  Returns:
@@ -81,6 +83,13 @@ def core_runner(in_model: Any,
81
83
  Logger.warning('representative_data_gen generates a batch size of 1 which can be slow for optimization:'
82
84
  ' consider increasing the batch size')
83
85
 
86
+ # Checking whether to run mixed precision quantization
87
+ if target_kpi is not None:
88
+ if core_config.mixed_precision_config is None:
89
+ Logger.critical("Provided an initialized target_kpi, that means that mixed precision quantization is "
90
+ "enabled, but the provided MixedPrecisionQuantizationConfig is None.")
91
+ core_config.mixed_precision_config.set_mixed_precision_enable()
92
+
84
93
  graph = graph_preparation_runner(in_model,
85
94
  representative_data_gen,
86
95
  core_config.quantization_config,
@@ -105,13 +114,12 @@ def core_runner(in_model: Any,
105
114
  # Finalize bit widths
106
115
  ######################################
107
116
  if core_config.mixed_precision_enable:
108
- if core_config.mixed_precision_config.target_kpi is None:
109
- Logger.critical(f"Trying to run Mixed Precision quantization without providing a valid target KPI.")
110
117
  if core_config.mixed_precision_config.configuration_overwrite is None:
111
118
 
112
119
  bit_widths_config = search_bit_width(tg,
113
120
  fw_info,
114
121
  fw_impl,
122
+ target_kpi,
115
123
  core_config.mixed_precision_config,
116
124
  representative_data_gen,
117
125
  hessian_info_service=hessian_info_service)
@@ -12,7 +12,10 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
+
15
16
  from model_compression_toolkit.constants import FOUND_TORCH, FOUND_TF
17
+ from model_compression_toolkit.data_generation.common.data_generation_config import DataGenerationConfig
18
+ from model_compression_toolkit.data_generation.common.enums import ImageGranularity, DataInitType, SchedulerType, BNLayerWeightingType, OutputLossType, BatchNormAlignemntLossType, ImagePipelineType, ImageNormalizationType
16
19
 
17
20
  if FOUND_TF:
18
21
  from model_compression_toolkit.data_generation.keras.keras_data_generation import (
@@ -29,10 +29,14 @@ class EnumBaseClass(Enum):
29
29
 
30
30
  class ImageGranularity(EnumBaseClass):
31
31
  """
32
- An enum for choosing the image dependence granularity when generating images.
33
- 0. ImageWise
34
- 1. BatchWise
35
- 2. AllImages
32
+ An enum for choosing the image dependence granularity when generating images:
33
+
34
+ ImageWise
35
+
36
+ BatchWise
37
+
38
+ AllImages
39
+
36
40
  """
37
41
 
38
42
  ImageWise = 0
@@ -42,9 +46,12 @@ class ImageGranularity(EnumBaseClass):
42
46
 
43
47
  class DataInitType(EnumBaseClass):
44
48
  """
45
- An enum for choosing the image dependence granularity when generating images.
46
- 0. Gaussian
47
- 1. Diverse
49
+ An enum for choosing the image dependence granularity when generating images:
50
+
51
+ Gaussian
52
+
53
+ Diverse
54
+
48
55
  """
49
56
  Gaussian = 0
50
57
  Diverse = 1
@@ -52,9 +59,14 @@ class DataInitType(EnumBaseClass):
52
59
 
53
60
  class ImagePipelineType(EnumBaseClass):
54
61
  """
55
- An enum for choosing the image pipeline type for image manipulation.
56
- RANDOM_CROP_FLIP: Crop and flip the images.
57
- IDENTITY: Do not apply any manipulation (identity transformation).
62
+ An enum for choosing the image pipeline type for image manipulation:
63
+
64
+ RANDOM_CROP - Crop the images.
65
+
66
+ RANDOM_CROP_FLIP - Crop and flip the images.
67
+
68
+ IDENTITY - Do not apply any manipulation (identity transformation).
69
+
58
70
  """
59
71
  RANDOM_CROP = 'random_crop'
60
72
  RANDOM_CROP_FLIP = 'random_crop_flip'
@@ -63,10 +75,14 @@ class ImagePipelineType(EnumBaseClass):
63
75
 
64
76
  class ImageNormalizationType(EnumBaseClass):
65
77
  """
66
- An enum for choosing the image normalization type.
67
- TORCHVISION: Normalize the images using torchvision normalization.
68
- KERAS_APPLICATIONS: Normalize the images using keras_applications imagenet normalization.
69
- NO_NORMALIZATION: Do not apply any normalization.
78
+ An enum for choosing the image normalization type:
79
+
80
+ TORCHVISION - Normalize the images using torchvision normalization.
81
+
82
+ KERAS_APPLICATIONS - Normalize the images using keras_applications imagenet normalization.
83
+
84
+ NO_NORMALIZATION - Do not apply any normalization.
85
+
70
86
  """
71
87
  TORCHVISION = 'torchvision'
72
88
  KERAS_APPLICATIONS = 'keras_applications'
@@ -75,10 +91,14 @@ class ImageNormalizationType(EnumBaseClass):
75
91
 
76
92
  class BNLayerWeightingType(EnumBaseClass):
77
93
  """
78
- An enum for choosing the layer weighting type.
79
- AVERAGE: Use the same weight per layer.
80
- FIRST_LAYER_MULTIPLIER: Use a multiplier for the first layer, all other layers with the same weight.
81
- GRAD: Use gradient-based layer weighting.
94
+ An enum for choosing the layer weighting type:
95
+
96
+ AVERAGE - Use the same weight per layer.
97
+
98
+ FIRST_LAYER_MULTIPLIER - Use a multiplier for the first layer, all other layers with the same weight.
99
+
100
+ GRAD - Use gradient-based layer weighting.
101
+
82
102
  """
83
103
  AVERAGE = 'average'
84
104
  FIRST_LAYER_MULTIPLIER = 'first_layer_multiplier'
@@ -87,18 +107,24 @@ class BNLayerWeightingType(EnumBaseClass):
87
107
 
88
108
  class BatchNormAlignemntLossType(EnumBaseClass):
89
109
  """
90
- An enum for choosing the BatchNorm alignment loss type.
91
- L2_SQUARE: Use L2 square loss for BatchNorm alignment.
110
+ An enum for choosing the BatchNorm alignment loss type:
111
+
112
+ L2_SQUARE - Use L2 square loss for BatchNorm alignment.
113
+
92
114
  """
93
115
  L2_SQUARE = 'l2_square'
94
116
 
95
117
 
96
118
  class OutputLossType(EnumBaseClass):
97
119
  """
98
- An enum for choosing the output loss type.
99
- NONE: No output loss is applied.
100
- MIN_MAX_DIFF: Use min-max difference as the output loss.
101
- REGULARIZED_MIN_MAX_DIFF: Use regularized min-max difference as the output loss.
120
+ An enum for choosing the output loss type:
121
+
122
+ NONE - No output loss is applied.
123
+
124
+ MIN_MAX_DIFF - Use min-max difference as the output loss.
125
+
126
+ REGULARIZED_MIN_MAX_DIFF - Use regularized min-max difference as the output loss.
127
+
102
128
  """
103
129
  NONE = 'none'
104
130
  MIN_MAX_DIFF = 'min_max_diff'
@@ -107,9 +133,12 @@ class OutputLossType(EnumBaseClass):
107
133
 
108
134
  class SchedulerType(EnumBaseClass):
109
135
  """
110
- An enum for choosing the scheduler type for the optimizer.
111
- REDUCE_ON_PLATEAU: Use the ReduceOnPlateau scheduler.
112
- STEP: Use the Step scheduler.
136
+ An enum for choosing the scheduler type for the optimizer:
137
+
138
+ REDUCE_ON_PLATEAU - Use the ReduceOnPlateau scheduler.
139
+
140
+ STEP - Use the Step scheduler.
141
+
113
142
  """
114
143
  REDUCE_ON_PLATEAU = 'reduce_on_plateau'
115
144
  STEP = 'step'
@@ -131,7 +131,36 @@ if FOUND_TF:
131
131
 
132
132
  Returns:
133
133
  List[tf.Tensor]: Finalized list containing generated images.
134
+
135
+ Examples:
136
+
137
+ In this example, we'll walk through generating images using a simple Keras model and a data generation configuration. The process involves creating a model, setting up a data generation configuration, and finally generating images with specified parameters.
138
+
139
+ Start by importing the Model Compression Toolkit (MCT), TensorFlow, and some layers from `tensorflow.keras`:
140
+
141
+ >>> import model_compression_toolkit as mct
142
+ >>> from tensorflow.keras.models import Sequential
143
+ >>> from tensorflow.keras.layers import Conv2D, BatchNormalization, Flatten, Dense, Reshape
144
+
145
+ Next, define a simple Keras model:
146
+
147
+ >>> model = Sequential([Conv2D(2, 3, input_shape=(8,8,3)), BatchNormalization(), Flatten(), Dense(10)])
148
+
149
+ Configure the data generation process using `get_keras_data_generation_config`. This function allows customization of the data generation process. For simplicity, this example sets the number of iterations (`n_iter`) to 1 and the batch size (`data_gen_batch_size`) to 2.
150
+
151
+ >>> config = mct.data_generation.get_keras_data_generation_config(n_iter=1, data_gen_batch_size=2)
152
+
153
+ Finally, use the `keras_data_generation_experimental` function to generate images based on the model and data generation configuration.
154
+ Notice that this function is experimental and may change in future versions of MCT.
155
+ The `n_images` parameter specifies the number of images to generate, and `output_image_size` sets the size of the generated images.
156
+
157
+ >>> generated_images = mct.data_generation.keras_data_generation_experimental(model=model, n_images=4, output_image_size=(8, 8), data_generation_config=config)
158
+
159
+ The generated images can then be used for various purposes, such as data-free quantization.
160
+
161
+
134
162
  """
163
+
135
164
  Logger.warning(f"keras_data_generation_experimental is experimental "
136
165
  f"and is subject to future changes."
137
166
  f"If you encounter an issue, please open an issue in our GitHub "
@@ -129,7 +129,7 @@ if FOUND_TORCH:
129
129
  def pytorch_data_generation_experimental(
130
130
  model: Module,
131
131
  n_images: int,
132
- output_image_size: Tuple,
132
+ output_image_size: int,
133
133
  data_generation_config: DataGenerationConfig) -> List[Tensor]:
134
134
  """
135
135
  Function to perform data generation using the provided model and data generation configuration.
@@ -137,11 +137,38 @@ if FOUND_TORCH:
137
137
  Args:
138
138
  model (Module): PyTorch model to generate data for.
139
139
  n_images (int): Number of images to generate.
140
- output_image_size (Tuple): Size of the output images.
140
+ output_image_size (int): The hight and width size of the output images.
141
141
  data_generation_config (DataGenerationConfig): Configuration for data generation.
142
142
 
143
143
  Returns:
144
144
  List[Tensor]: Finalized list containing generated images.
145
+
146
+ Examples:
147
+
148
+ In this example, we'll walk through generating images using a simple PyTorch model and a data generation configuration. The process involves creating a model, setting up a data generation configuration, and finally generating images with specified parameters.
149
+
150
+ Start by importing the Model Compression Toolkit (MCT), PyTorch, and some modules from `torch.nn`:
151
+
152
+ >>> import model_compression_toolkit as mct
153
+ >>> import torch.nn as nn
154
+ >>> from torch.nn import Conv2d, BatchNorm2d, Flatten, Linear
155
+
156
+ Next, define a simple PyTorch model:
157
+
158
+ >>> model = nn.Sequential(nn.Conv2d(3, 2, 3), nn.BatchNorm2d(2), nn.Flatten(), nn.Linear(2*6*6, 10))
159
+
160
+ Configure the data generation process using `get_pytorch_data_generation_config`. This function allows customization of the data generation process. For simplicity, this example sets the number of iterations (`n_iter`) to 1 and the batch size (`data_gen_batch_size`) to 2.
161
+
162
+ >>> config = mct.data_generation.get_pytorch_data_generation_config(n_iter=1, data_gen_batch_size=2)
163
+
164
+ Finally, use the `pytorch_data_generation_experimental` function to generate images based on the model and data generation configuration.
165
+ Notice that this function is experimental and may change in future versions of MCT.
166
+ The `n_images` parameter specifies the number of images to generate, and `output_image_size` sets the size of the generated images.
167
+
168
+ >>> generated_images = mct.data_generation.pytorch_data_generation_experimental(model=model, n_images=4, output_image_size=8, data_generation_config=config)
169
+
170
+ The generated images can then be used for various purposes, such as data-free quantization.
171
+
145
172
  """
146
173
 
147
174
  Logger.warning(f"pytorch_data_generation_experimental is experimental "
@@ -16,6 +16,16 @@ from enum import Enum
16
16
 
17
17
 
18
18
  class QuantizationFormat(Enum):
19
+ """
20
+ Specify which quantization format to use for exporting a quantized model.
21
+
22
+ FAKELY_QUANT - Weights and activations are quantized but represented using float data type.
23
+
24
+ INT8 - Weights and activations are represented using 8-bit integer data type.
25
+
26
+ MCTQ - Weights and activations are quantized using mct_quantizers custom quantizers.
27
+
28
+ """
19
29
  FAKELY_QUANT = 0
20
30
  INT8 = 1
21
31
  MCTQ = 2
@@ -42,21 +42,17 @@ if FOUND_TF:
42
42
  serialization_format: KerasExportSerializationFormat = KerasExportSerializationFormat.KERAS,
43
43
  quantization_format : QuantizationFormat = QuantizationFormat.MCTQ) -> Dict[str, type]:
44
44
  """
45
- Export a Keras quantized model to a h5 or tflite model.
45
+ Export a Keras quantized model to a .keras or .tflite format model (according to serialization_format).
46
46
  The model will be saved to the path in save_model_path.
47
- keras_export_model supports the combination of QuantizationFormat.FAKELY_QUANT (where weights
48
- and activations are float fakely-quantized values) and KerasExportSerializationFormat.KERAS_H5 (where the model
49
- will be saved to h5 model) or the combination of KerasExportSerializationFormat.TFLITE (where the model will be
50
- saved to tflite model) with QuantizationFormat.FAKELY_QUANT or QuantizationFormat.INT8 (where weights and
51
- activations are represented using 8bits integers).
47
+ Models that are exported to .keras format can use quantization_format of QuantizationFormat.MCTQ or QuantizationFormat.FAKELY_QUANT.
48
+ Models that are exported to .tflite format can use quantization_format of QuantizationFormat.INT8 or QuantizationFormat.FAKELY_QUANT.
52
49
 
53
50
  Args:
54
51
  model: Model to export.
55
52
  save_model_path: Path to save the model.
56
53
  is_layer_exportable_fn: Callable to check whether a layer can be exported or not.
57
- serialization_format: Format to export the model according to (by default
58
- KerasExportSerializationFormat.KERAS_H5).
59
- quantization_format: Format of how quantizers are exported (fakely-quant, int8, MCTQ quantizers).
54
+ serialization_format: Format to export the model according to (KerasExportSerializationFormat.KERAS, by default).
55
+ quantization_format: Format of how quantizers are exported (MCTQ quantizers, by default).
60
56
 
61
57
  Returns:
62
58
  Custom objects dictionary needed to load the model.
@@ -19,9 +19,12 @@ from model_compression_toolkit.gptq.common.gptq_constants import REG_DEFAULT
19
19
 
20
20
  class RoundingType(Enum):
21
21
  """
22
- An enum for choosing the GPTQ rounding methods
23
- 0. STRAIGHT-THROUGH ESTIMATOR
24
- 1. SoftQuantizer
22
+ An enum for choosing the GPTQ rounding methods:
23
+
24
+ STE - STRAIGHT-THROUGH ESTIMATOR
25
+
26
+ SoftQuantizer - SoftQuantizer
27
+
25
28
  """
26
29
  STE = 0
27
30
  SoftQuantizer = 1
@@ -116,6 +116,7 @@ if FOUND_TF:
116
116
  def keras_gradient_post_training_quantization(in_model: Model, representative_data_gen: Callable,
117
117
  gptq_config: GradientPTQConfig,
118
118
  gptq_representative_data_gen: Callable = None,
119
+ target_kpi: KPI = None,
119
120
  core_config: CoreConfig = CoreConfig(),
120
121
  target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC) -> Tuple[Model, UserInformation]:
121
122
  """
@@ -139,6 +140,7 @@ if FOUND_TF:
139
140
  representative_data_gen (Callable): Dataset used for calibration.
140
141
  gptq_config (GradientPTQConfig): Configuration for using gptq (e.g. optimizer).
141
142
  gptq_representative_data_gen (Callable): Dataset used for GPTQ training. If None defaults to representative_data_gen
143
+ target_kpi (KPI): KPI object to limit the search of the mixed-precision configuration as desired.
142
144
  core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
143
145
  target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the Keras model according to.
144
146
 
@@ -166,6 +168,12 @@ if FOUND_TF:
166
168
 
167
169
  >>> config = mct.core.CoreConfig()
168
170
 
171
+ If mixed precision is desired, create an MCT core config with a mixed-precision configuration, to quantize a model
172
+ with different bitwidths for different layers.
173
+ The candidates bitwidth for quantization should be defined in the target platform model:
174
+
175
+ >>> config = mct.core.CoreConfig(mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=1))
176
+
169
177
  For mixed-precision set a target KPI object:
170
178
  Create a KPI object to limit our returned model's size. Note that this value affects only coefficients
171
179
  that should be quantized (for example, the kernel of Conv2D in Keras will be affected by this value,
@@ -173,19 +181,13 @@ if FOUND_TF:
173
181
 
174
182
  >>> kpi = mct.core.KPI(model.count_params() * 0.75) # About 0.75 of the model size when quantized with 8 bits.
175
183
 
176
- If mixed precision is desired, create an MCT core config with a mixed-precision configuration, to quantize a model
177
- with different bitwidths for different layers.
178
- The candidates bitwidth for quantization should be defined in the target platform model:
179
-
180
- >>> config = mct.core.CoreConfig(mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=1, target_kpi=kpi))
181
-
182
184
  Create GPTQ config:
183
185
 
184
186
  >>> gptq_config = mct.gptq.get_keras_gptq_config(n_epochs=1)
185
187
 
186
188
  Pass the model with the representative dataset generator to get a quantized model:
187
189
 
188
- >>> quantized_model, quantization_info = mct.gptq.keras_gradient_post_training_quantization(model, repr_datagen, gptq_config, core_config=config)
190
+ >>> quantized_model, quantization_info = mct.gptq.keras_gradient_post_training_quantization(model, repr_datagen, gptq_config, target_kpi=kpi, core_config=config)
189
191
 
190
192
  """
191
193
  KerasModelValidation(model=in_model,