mct-nightly 1.11.0.20240307.post318__tar.gz → 1.11.0.20240309.post349__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (484) hide show
  1. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/PKG-INFO +13 -13
  2. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/README.md +12 -12
  3. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/mct_nightly.egg-info/PKG-INFO +13 -13
  4. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/__init__.py +0 -19
  5. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/network_editors/actions.py +1 -1
  6. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantization_config.py +1 -1
  7. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +4 -2
  8. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/__init__.py +1 -1
  9. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +11 -6
  10. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +6 -0
  11. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/__init__.py +1 -1
  12. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/common/gptq_config.py +1 -4
  13. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +5 -1
  14. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/pruning/keras/pruning_facade.py +5 -1
  15. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +5 -1
  16. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/__init__.py +2 -2
  17. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/keras/quantization_facade.py +25 -15
  18. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/pytorch/quantization_facade.py +24 -15
  19. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/setup.cfg +1 -1
  20. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/LICENSE.md +0 -0
  21. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/mct_nightly.egg-info/SOURCES.txt +0 -0
  22. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/mct_nightly.egg-info/dependency_links.txt +0 -0
  23. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/mct_nightly.egg-info/requires.txt +0 -0
  24. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/mct_nightly.egg-info/top_level.txt +0 -0
  25. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/constants.py +0 -0
  26. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/__init__.py +0 -0
  27. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/analyzer.py +0 -0
  28. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/__init__.py +0 -0
  29. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
  30. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
  31. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
  32. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
  33. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
  34. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
  35. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
  36. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
  37. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
  38. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/data_loader.py +0 -0
  39. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
  40. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/framework_info.py +0 -0
  41. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
  42. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
  43. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
  44. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
  45. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
  46. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/edge.py +0 -0
  47. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
  48. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
  49. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
  50. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
  51. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
  52. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
  53. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
  54. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
  55. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
  56. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
  57. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
  58. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
  59. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
  60. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
  61. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/hessian/trace_hessian_calculator.py +0 -0
  62. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/hessian/trace_hessian_request.py +0 -0
  63. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
  64. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
  65. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
  66. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
  67. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/matchers/function.py +0 -0
  68. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
  69. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
  70. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/memory_computation.py +0 -0
  71. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
  72. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
  73. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
  74. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
  75. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
  76. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/__init__.py +0 -0
  77. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi.py +0 -0
  78. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_aggregation_methods.py +0 -0
  79. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_data.py +0 -0
  80. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_functions_mapping.py +0 -0
  81. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_methods.py +0 -0
  82. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
  83. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
  84. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
  85. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
  86. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
  87. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
  88. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
  89. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
  90. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
  91. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/model_collector.py +0 -0
  92. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/model_validation.py +0 -0
  93. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
  94. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
  95. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
  96. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
  97. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
  98. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
  99. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
  100. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
  101. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
  102. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
  103. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
  104. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
  105. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
  106. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
  107. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
  108. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
  109. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
  110. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
  111. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
  112. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
  113. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
  114. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
  115. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
  116. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
  117. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
  118. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
  119. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
  120. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
  121. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
  122. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
  123. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
  124. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
  125. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
  126. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
  127. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
  128. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
  129. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
  130. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
  131. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
  132. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
  133. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
  134. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
  135. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
  136. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
  137. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
  138. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
  139. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
  140. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
  141. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
  142. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
  143. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
  144. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
  145. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
  146. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
  147. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
  148. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
  149. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
  150. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
  151. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
  152. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
  153. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
  154. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
  155. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
  156. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
  157. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
  158. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
  159. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/user_info.py +0 -0
  160. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
  161. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
  162. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
  163. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
  164. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/exporter.py +0 -0
  165. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
  166. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/__init__.py +0 -0
  167. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
  168. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
  169. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
  170. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
  171. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
  172. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
  173. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
  174. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/constants.py +0 -0
  175. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
  176. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
  177. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
  178. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
  179. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
  180. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  181. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  182. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  183. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
  184. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
  185. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  186. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
  187. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  188. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  189. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_relu_upper_bound.py +0 -0
  190. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  191. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
  192. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  193. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
  194. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  195. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  196. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
  197. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/hessian/activation_trace_hessian_calculator_keras.py +0 -0
  198. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/hessian/trace_hessian_calculator_keras.py +0 -0
  199. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/hessian/weights_trace_hessian_calculator_keras.py +0 -0
  200. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
  201. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
  202. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
  203. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/kpi_data_facade.py +0 -0
  204. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
  205. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
  206. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
  207. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
  208. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
  209. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
  210. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
  211. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
  212. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
  213. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
  214. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/reader/common.py +0 -0
  215. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
  216. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
  217. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
  218. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
  219. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
  220. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
  221. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
  222. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
  223. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
  224. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
  225. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
  226. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
  227. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
  228. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
  229. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
  230. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
  231. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
  232. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
  233. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
  234. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
  235. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
  236. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
  237. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
  238. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/constants.py +0 -0
  239. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
  240. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
  241. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
  242. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  243. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  244. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  245. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
  246. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
  247. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
  248. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  249. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  250. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/permute_call_method.py +0 -0
  251. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  252. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
  253. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  254. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
  255. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  256. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
  257. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  258. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  259. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
  260. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/hessian/activation_trace_hessian_calculator_pytorch.py +0 -0
  261. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/hessian/trace_hessian_calculator_pytorch.py +0 -0
  262. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/hessian/weights_trace_hessian_calculator_pytorch.py +0 -0
  263. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/kpi_data_facade.py +0 -0
  264. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
  265. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
  266. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
  267. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
  268. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
  269. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
  270. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
  271. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
  272. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
  273. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
  274. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
  275. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
  276. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
  277. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
  278. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
  279. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
  280. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
  281. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/pytorch/utils.py +0 -0
  282. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
  283. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/core/runner.py +0 -0
  284. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
  285. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/common/constants.py +0 -0
  286. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
  287. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
  288. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/common/enums.py +0 -0
  289. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
  290. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
  291. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
  292. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
  293. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
  294. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
  295. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
  296. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
  297. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
  298. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
  299. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
  300. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
  301. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
  302. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
  303. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
  304. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
  305. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
  306. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
  307. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
  308. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
  309. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
  310. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
  311. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
  312. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
  313. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
  314. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/defaultdict.py +0 -0
  315. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/__init__.py +0 -0
  316. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
  317. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
  318. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
  319. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
  320. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
  321. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
  322. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
  323. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
  324. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
  325. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
  326. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
  327. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
  328. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
  329. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
  330. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
  331. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
  332. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
  333. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
  334. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
  335. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
  336. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
  337. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
  338. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
  339. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
  340. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
  341. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
  342. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
  343. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
  344. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
  345. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
  346. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
  347. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/common/__init__.py +0 -0
  348. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
  349. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
  350. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
  351. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/common/gptq_training.py +0 -0
  352. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
  353. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
  354. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
  355. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
  356. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
  357. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/quantization_facade.py +0 -0
  358. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
  359. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
  360. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
  361. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
  362. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +0 -0
  363. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
  364. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  365. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  366. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  367. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
  368. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  369. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
  370. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
  371. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
  372. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/gptq_training.py +0 -0
  373. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
  374. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
  375. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
  376. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
  377. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
  378. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +0 -0
  379. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
  380. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  381. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  382. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  383. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  384. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  385. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/gptq/runner.py +0 -0
  386. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/logger.py +0 -0
  387. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/pruning/__init__.py +0 -0
  388. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
  389. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
  390. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/ptq/__init__.py +0 -0
  391. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
  392. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
  393. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
  394. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
  395. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/ptq/runner.py +0 -0
  396. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/common/__init__.py +0 -0
  397. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/common/qat_config.py +0 -0
  398. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/keras/__init__.py +0 -0
  399. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
  400. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +0 -0
  401. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
  402. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
  403. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
  404. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
  405. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
  406. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
  407. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  408. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
  409. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
  410. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
  411. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py +0 -0
  412. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
  413. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
  414. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
  415. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
  416. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/pytorch/quantizer/quantizer_utils.py +0 -0
  417. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  418. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  419. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
  420. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
  421. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
  422. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
  423. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
  424. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
  425. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
  426. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
  427. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
  428. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
  429. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
  430. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
  431. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
  432. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
  433. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
  434. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
  435. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
  436. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
  437. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
  438. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
  439. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
  440. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
  441. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
  442. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
  443. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
  444. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
  445. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
  446. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
  447. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
  448. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
  449. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
  450. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
  451. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
  452. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
  453. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
  454. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
  455. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
  456. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
  457. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
  458. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
  459. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
  460. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
  461. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
  462. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
  463. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
  464. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
  465. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
  466. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
  467. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
  468. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
  469. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
  470. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
  471. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
  472. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
  473. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
  474. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
  475. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
  476. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
  477. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
  478. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
  479. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
  480. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
  481. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
  482. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
  483. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
  484. {mct-nightly-1.11.0.20240307.post318 → mct-nightly-1.11.0.20240309.post349}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 1.11.0.20240307.post318
3
+ Version: 1.11.0.20240309.post349
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -74,7 +74,7 @@ Description: # Model Compression Toolkit (MCT)
74
74
  ## Supported Features
75
75
  MCT offers a range of powerful features to optimize neural network models for efficient deployment. These supported features include:
76
76
 
77
- ### Data Generation
77
+ ### Data Generation [*](#experimental-features)
78
78
  MCT provides tools for generating synthetic images based on the statistics stored in a model's batch normalization layers. These generated images are valuable for various compression tasks where image data is required, such as quantization and pruning.
79
79
  You can customize data generation configurations to suit your specific needs. [Go to the Data Generation page.](model_compression_toolkit/data_generation/README.md)
80
80
 
@@ -82,7 +82,7 @@ Description: # Model Compression Toolkit (MCT)
82
82
  MCT supports different quantization methods:
83
83
  * Post-training quantization (PTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_post_training_quantization_experimental.html#ug-keras-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_post_training_quantization_experimental.html#ug-pytorch-post-training-quantization-experimental)
84
84
  * Gradient-based post-training quantization (GPTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_gradient_post_training_quantization_experimental.html#ug-keras-gradient-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_gradient_post_training_quantization_experimental.html#ug-pytorch-gradient-post-training-quantization-experimental)
85
- * Quantization-aware training (QAT)[*](#experimental-features)
85
+ * Quantization-aware training (QAT) [*](#experimental-features)
86
86
 
87
87
 
88
88
  | Quantization Method | Complexity | Computational Cost |
@@ -117,6 +117,15 @@ Description: # Model Compression Toolkit (MCT)
117
117
  More details on the how to use EPTQ via MCT can be found in the [EPTQ guidelines](model_compression_toolkit/gptq/README.md).
118
118
 
119
119
 
120
+ ### Structured Pruning [*](#experimental-features)
121
+ MCT introduces a structured and hardware-aware model pruning.
122
+ This pruning technique is designed to compress models for specific hardware architectures,
123
+ taking into account the target platform's Single Instruction, Multiple Data (SIMD) capabilities.
124
+ By pruning groups of channels (SIMD groups), our approach not only reduces model size
125
+ and complexity, but ensures that better utilization of channels is in line with the SIMD architecture
126
+ for a target KPI of weights memory footprint.
127
+ [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_pruning_experimental.html)
128
+ [Pytorch API](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/pruning/pytorch/pruning_facade.py#L43)
120
129
 
121
130
  #### Experimental features
122
131
 
@@ -146,17 +155,8 @@ Description: # Model Compression Toolkit (MCT)
146
155
 
147
156
  For more results, please refer to [quick start](https://github.com/sony/model_optimization/tree/main/tutorials/quick_start).
148
157
 
149
- ### Structured Pruning
150
- MCT introduces a structured and hardware-aware model pruning.
151
- This pruning technique is designed to compress models for specific hardware architectures,
152
- taking into account the target platform's Single Instruction, Multiple Data (SIMD) capabilities.
153
- By pruning groups of channels (SIMD groups), our approach not only reduces model size
154
- and complexity, but ensures that better utilization of channels is in line with the SIMD architecture
155
- for a target KPI of weights memory footprint.
156
- [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_pruning_experimental.html)
157
- [Pytorch API](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/pruning/pytorch/pruning_facade.py#L43)
158
158
 
159
- #### Results
159
+ #### Pruning Results
160
160
 
161
161
  Results for applying pruning to reduce the parameters of the following models by 50%:
162
162
 
@@ -68,7 +68,7 @@ Currently, MCT is being tested on various Python, Pytorch and TensorFlow version
68
68
  ## Supported Features
69
69
  MCT offers a range of powerful features to optimize neural network models for efficient deployment. These supported features include:
70
70
 
71
- ### Data Generation
71
+ ### Data Generation [*](#experimental-features)
72
72
  MCT provides tools for generating synthetic images based on the statistics stored in a model's batch normalization layers. These generated images are valuable for various compression tasks where image data is required, such as quantization and pruning.
73
73
  You can customize data generation configurations to suit your specific needs. [Go to the Data Generation page.](model_compression_toolkit/data_generation/README.md)
74
74
 
@@ -76,7 +76,7 @@ You can customize data generation configurations to suit your specific needs. [G
76
76
  MCT supports different quantization methods:
77
77
  * Post-training quantization (PTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_post_training_quantization_experimental.html#ug-keras-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_post_training_quantization_experimental.html#ug-pytorch-post-training-quantization-experimental)
78
78
  * Gradient-based post-training quantization (GPTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_gradient_post_training_quantization_experimental.html#ug-keras-gradient-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_gradient_post_training_quantization_experimental.html#ug-pytorch-gradient-post-training-quantization-experimental)
79
- * Quantization-aware training (QAT)[*](#experimental-features)
79
+ * Quantization-aware training (QAT) [*](#experimental-features)
80
80
 
81
81
 
82
82
  | Quantization Method | Complexity | Computational Cost |
@@ -111,6 +111,15 @@ The specifications of the algorithm are detailed in the paper: _"**EPTQ: Enhance
111
111
  More details on the how to use EPTQ via MCT can be found in the [EPTQ guidelines](model_compression_toolkit/gptq/README.md).
112
112
 
113
113
 
114
+ ### Structured Pruning [*](#experimental-features)
115
+ MCT introduces a structured and hardware-aware model pruning.
116
+ This pruning technique is designed to compress models for specific hardware architectures,
117
+ taking into account the target platform's Single Instruction, Multiple Data (SIMD) capabilities.
118
+ By pruning groups of channels (SIMD groups), our approach not only reduces model size
119
+ and complexity, but ensures that better utilization of channels is in line with the SIMD architecture
120
+ for a target KPI of weights memory footprint.
121
+ [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_pruning_experimental.html)
122
+ [Pytorch API](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/pruning/pytorch/pruning_facade.py#L43)
114
123
 
115
124
  #### Experimental features
116
125
 
@@ -140,17 +149,8 @@ In the following table we present the ImageNet validation results for these mode
140
149
 
141
150
  For more results, please refer to [quick start](https://github.com/sony/model_optimization/tree/main/tutorials/quick_start).
142
151
 
143
- ### Structured Pruning
144
- MCT introduces a structured and hardware-aware model pruning.
145
- This pruning technique is designed to compress models for specific hardware architectures,
146
- taking into account the target platform's Single Instruction, Multiple Data (SIMD) capabilities.
147
- By pruning groups of channels (SIMD groups), our approach not only reduces model size
148
- and complexity, but ensures that better utilization of channels is in line with the SIMD architecture
149
- for a target KPI of weights memory footprint.
150
- [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_pruning_experimental.html)
151
- [Pytorch API](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/pruning/pytorch/pruning_facade.py#L43)
152
152
 
153
- #### Results
153
+ #### Pruning Results
154
154
 
155
155
  Results for applying pruning to reduce the parameters of the following models by 50%:
156
156
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 1.11.0.20240307.post318
3
+ Version: 1.11.0.20240309.post349
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -74,7 +74,7 @@ Description: # Model Compression Toolkit (MCT)
74
74
  ## Supported Features
75
75
  MCT offers a range of powerful features to optimize neural network models for efficient deployment. These supported features include:
76
76
 
77
- ### Data Generation
77
+ ### Data Generation [*](#experimental-features)
78
78
  MCT provides tools for generating synthetic images based on the statistics stored in a model's batch normalization layers. These generated images are valuable for various compression tasks where image data is required, such as quantization and pruning.
79
79
  You can customize data generation configurations to suit your specific needs. [Go to the Data Generation page.](model_compression_toolkit/data_generation/README.md)
80
80
 
@@ -82,7 +82,7 @@ Description: # Model Compression Toolkit (MCT)
82
82
  MCT supports different quantization methods:
83
83
  * Post-training quantization (PTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_post_training_quantization_experimental.html#ug-keras-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_post_training_quantization_experimental.html#ug-pytorch-post-training-quantization-experimental)
84
84
  * Gradient-based post-training quantization (GPTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_gradient_post_training_quantization_experimental.html#ug-keras-gradient-post-training-quantization-experimental), [PyTorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_gradient_post_training_quantization_experimental.html#ug-pytorch-gradient-post-training-quantization-experimental)
85
- * Quantization-aware training (QAT)[*](#experimental-features)
85
+ * Quantization-aware training (QAT) [*](#experimental-features)
86
86
 
87
87
 
88
88
  | Quantization Method | Complexity | Computational Cost |
@@ -117,6 +117,15 @@ Description: # Model Compression Toolkit (MCT)
117
117
  More details on the how to use EPTQ via MCT can be found in the [EPTQ guidelines](model_compression_toolkit/gptq/README.md).
118
118
 
119
119
 
120
+ ### Structured Pruning [*](#experimental-features)
121
+ MCT introduces a structured and hardware-aware model pruning.
122
+ This pruning technique is designed to compress models for specific hardware architectures,
123
+ taking into account the target platform's Single Instruction, Multiple Data (SIMD) capabilities.
124
+ By pruning groups of channels (SIMD groups), our approach not only reduces model size
125
+ and complexity, but ensures that better utilization of channels is in line with the SIMD architecture
126
+ for a target KPI of weights memory footprint.
127
+ [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_pruning_experimental.html)
128
+ [Pytorch API](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/pruning/pytorch/pruning_facade.py#L43)
120
129
 
121
130
  #### Experimental features
122
131
 
@@ -146,17 +155,8 @@ Description: # Model Compression Toolkit (MCT)
146
155
 
147
156
  For more results, please refer to [quick start](https://github.com/sony/model_optimization/tree/main/tutorials/quick_start).
148
157
 
149
- ### Structured Pruning
150
- MCT introduces a structured and hardware-aware model pruning.
151
- This pruning technique is designed to compress models for specific hardware architectures,
152
- taking into account the target platform's Single Instruction, Multiple Data (SIMD) capabilities.
153
- By pruning groups of channels (SIMD groups), our approach not only reduces model size
154
- and complexity, but ensures that better utilization of channels is in line with the SIMD architecture
155
- for a target KPI of weights memory footprint.
156
- [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_pruning_experimental.html)
157
- [Pytorch API](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/pruning/pytorch/pruning_facade.py#L43)
158
158
 
159
- #### Results
159
+ #### Pruning Results
160
160
 
161
161
  Results for applying pruning to reduce the parameters of the following models by 50%:
162
162
 
@@ -27,23 +27,4 @@ from model_compression_toolkit import data_generation
27
27
  from model_compression_toolkit import pruning
28
28
  from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
29
29
 
30
-
31
- # Old API (will not be accessible in future releases)
32
- from model_compression_toolkit.core.common import network_editors as network_editor
33
- from model_compression_toolkit.core.common.quantization import quantization_config
34
- from model_compression_toolkit.core.common.mixed_precision import mixed_precision_quantization_config
35
- from model_compression_toolkit.core.common.quantization.debug_config import DebugConfig
36
- from model_compression_toolkit.core.common.quantization.quantization_config import QuantizationConfig, QuantizationErrorMethod, DEFAULTCONFIG
37
- from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
38
- from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import MixedPrecisionQuantizationConfig
39
- from model_compression_toolkit.logger import set_log_folder
40
- from model_compression_toolkit.core.common.data_loader import FolderImageLoader
41
- from model_compression_toolkit.core.common.framework_info import FrameworkInfo, ChannelAxis
42
- from model_compression_toolkit.core.keras.kpi_data_facade import keras_kpi_data
43
- from model_compression_toolkit.core.pytorch.kpi_data_facade import pytorch_kpi_data
44
- from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig
45
- from model_compression_toolkit.gptq.common.gptq_config import RoundingType
46
- from model_compression_toolkit.gptq.keras.quantization_facade import get_keras_gptq_config
47
- from model_compression_toolkit.gptq.pytorch.quantization_facade import get_pytorch_gptq_config
48
-
49
30
  __version__ = "1.11.0"
@@ -43,7 +43,7 @@ class EditRule(_EditRule):
43
43
  >>> import model_compression_toolkit as mct
44
44
  >>> from model_compression_toolkit.core.keras.constants import KERNEL
45
45
  >>> from tensorflow.keras.layers import Conv2D
46
- >>> er_list = [mct.network_editor.EditRule(filter=mct.network_editor.NodeTypeFilter(Conv2D), action=mct.network_editor.ChangeCandidatesWeightsQuantConfigAttr(attr_name=KERNEL, weights_n_bits=9))]
46
+ >>> er_list = [mct.core.network_editor.EditRule(filter=mct.core.network_editor.NodeTypeFilter(Conv2D), action=mct.core.network_editor.ChangeCandidatesWeightsQuantConfigAttr(attr_name=KERNEL, weights_n_bits=9))]
47
47
 
48
48
  Then the rules list can be passed to :func:`~model_compression_toolkit.keras_post_training_quantization`
49
49
  to modify the network during the quantization process.
@@ -82,7 +82,7 @@ class QuantizationConfig:
82
82
  block_collapsing (bool): Whether to collapse block one to another in the input network
83
83
  shift_negative_ratio (float): Value for the ratio between the minimal negative value of a non-linearity output to its activation threshold, which above it - shifting negative activation should occur if enabled.
84
84
  shift_negative_threshold_recalculation (bool): Whether or not to recompute the threshold after shifting negative activation.
85
- shift_negative_params_search (bool): Whether to search for optimal shift and threshold in shift negative activation (experimental)
85
+ shift_negative_params_search (bool): Whether to search for optimal shift and threshold in shift negative activation.
86
86
 
87
87
  Examples:
88
88
  One may create a quantization configuration to quantize a model according to.
@@ -75,8 +75,10 @@ class SeparableConvDecomposition(common.BaseSubstitution):
75
75
  pw_bias = separable_node.get_weights_by_keys(BIAS)
76
76
 
77
77
  dw_weights_dict = {DEPTHWISE_KERNEL: dw_kernel}
78
- pw_weights_dict = {KERNEL: pw_kernel,
79
- BIAS: pw_bias}
78
+ pw_weights_dict = {KERNEL: pw_kernel}
79
+
80
+ if pw_bias is not None:
81
+ pw_weights_dict[BIAS] = pw_bias
80
82
 
81
83
  # Split separable node attributes into relevant attributes for each of the new nodes.
82
84
  # List of dw attributes that should take from separable as they are.
@@ -16,7 +16,7 @@ from model_compression_toolkit.constants import FOUND_TORCH, FOUND_TF
16
16
 
17
17
  if FOUND_TF:
18
18
  from model_compression_toolkit.data_generation.keras.keras_data_generation import (
19
- tensorflow_data_generation_experimental, get_tensorflow_data_generation_config)
19
+ keras_data_generation_experimental, get_keras_data_generation_config)
20
20
 
21
21
  if FOUND_TORCH:
22
22
  from model_compression_toolkit.data_generation.pytorch.pytorch_data_generation import (
@@ -49,7 +49,7 @@ if FOUND_TF:
49
49
  scheduler_step_function_dict
50
50
 
51
51
  # Function to create a DataGenerationConfig object with the specified configuration parameters for Tensorflow
52
- def get_tensorflow_data_generation_config(
52
+ def get_keras_data_generation_config(
53
53
  n_iter: int = DEFAULT_N_ITER,
54
54
  optimizer: Optimizer = Adam,
55
55
  data_gen_batch_size: int = DEFAULT_DATA_GEN_BS,
@@ -115,13 +115,13 @@ if FOUND_TF:
115
115
  output_loss_multiplier=output_loss_multiplier)
116
116
 
117
117
 
118
- def tensorflow_data_generation_experimental(
118
+ def keras_data_generation_experimental(
119
119
  model: tf.keras.Model,
120
120
  n_images: int,
121
121
  output_image_size: Tuple,
122
122
  data_generation_config: DataGenerationConfig) -> tf.Tensor:
123
123
  """
124
- Function to perform data generation using the provided model and data generation configuration.
124
+ Function to perform data generation using the provided Keras model and data generation configuration.
125
125
 
126
126
  Args:
127
127
  model (Model): Keras model to generate data for.
@@ -132,6 +132,11 @@ if FOUND_TF:
132
132
  Returns:
133
133
  List[tf.Tensor]: Finalized list containing generated images.
134
134
  """
135
+ Logger.warning(f"keras_data_generation_experimental is experimental "
136
+ f"and is subject to future changes."
137
+ f"If you encounter an issue, please open an issue in our GitHub "
138
+ f"project https://github.com/sony/model_optimization")
139
+
135
140
  # Get Data Generation functions and classes
136
141
  image_pipeline, normalization, bn_layer_weighting_fn, bn_alignment_loss_fn, output_loss_fn, \
137
142
  init_dataset = get_data_generation_classes(data_generation_config=data_generation_config,
@@ -323,11 +328,11 @@ if FOUND_TF:
323
328
 
324
329
 
325
330
  else:
326
- def get_tensorflow_data_generation_config(*args, **kwargs):
327
- Logger.critical('Installing tensorflow is mandatory when using get_tensorflow_data_generation_config. '
331
+ def get_keras_data_generation_config(*args, **kwargs):
332
+ Logger.critical('Installing tensorflow is mandatory when using get_keras_data_generation_config. '
328
333
  'Could not find Tensorflow package.') # pragma: no cover
329
334
 
330
335
 
331
- def tensorflow_data_generation_experimental(*args, **kwargs):
336
+ def keras_data_generation_experimental(*args, **kwargs):
332
337
  Logger.critical('Installing tensorflow is mandatory when using pytorch_data_generation_experimental. '
333
338
  'Could not find Tensorflow package.') # pragma: no cover
@@ -143,6 +143,12 @@ if FOUND_TORCH:
143
143
  Returns:
144
144
  List[Tensor]: Finalized list containing generated images.
145
145
  """
146
+
147
+ Logger.warning(f"pytorch_data_generation_experimental is experimental "
148
+ f"and is subject to future changes."
149
+ f"If you encounter an issue, please open an issue in our GitHub "
150
+ f"project https://github.com/sony/model_optimization")
151
+
146
152
  # get a static graph representation of the model using torch.fx
147
153
  fx_model = symbolic_trace(model)
148
154
 
@@ -13,7 +13,7 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
 
16
- from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, RoundingType, GradientPTQConfig, GPTQHessianScoresConfig
16
+ from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, RoundingType, GPTQHessianScoresConfig
17
17
  from model_compression_toolkit.gptq.keras.quantization_facade import keras_gradient_post_training_quantization
18
18
  from model_compression_toolkit.gptq.keras.quantization_facade import get_keras_gptq_config
19
19
  from model_compression_toolkit.gptq.pytorch.quantization_facade import pytorch_gradient_post_training_quantization
@@ -36,8 +36,7 @@ class GPTQHessianScoresConfig:
36
36
  hessians_num_samples: int = 16,
37
37
  norm_scores: bool = True,
38
38
  log_norm: bool = True,
39
- scale_log_norm: bool = False,
40
- hessians_n_iter: int = 50): #TODO: remove
39
+ scale_log_norm: bool = False):
41
40
 
42
41
  """
43
42
  Initialize a GPTQHessianWeightsConfig.
@@ -47,14 +46,12 @@ class GPTQHessianScoresConfig:
47
46
  norm_scores (bool): Whether to normalize the returned scores of the weighted loss function (to get values between 0 and 1).
48
47
  log_norm (bool): Whether to use log normalization for the GPTQ Hessian-based scores.
49
48
  scale_log_norm (bool): Whether to scale the final vector of the Hessian-based scores.
50
- hessians_n_iter (int): Number of random iterations to run Hessian approximation for GPTQ Hessian-based scores.
51
49
  """
52
50
 
53
51
  self.hessians_num_samples = hessians_num_samples
54
52
  self.norm_scores = norm_scores
55
53
  self.log_norm = log_norm
56
54
  self.scale_log_norm = scale_log_norm
57
- self.hessians_n_iter = hessians_n_iter
58
55
 
59
56
 
60
57
  class GradientPTQConfig:
@@ -129,6 +129,10 @@ if FOUND_TORCH:
129
129
 
130
130
  Examples:
131
131
 
132
+ Import Model Compression Toolkit:
133
+
134
+ >>> import model_compression_toolkit as mct
135
+
132
136
  Import a Pytorch module:
133
137
 
134
138
  >>> from torchvision import models
@@ -149,7 +153,7 @@ if FOUND_TORCH:
149
153
 
150
154
  Pass the module, the representative dataset generator and the configuration (optional) to get a quantized module
151
155
 
152
- >>> quantized_module, quantization_info = mct.gptq.pytorch_gradient_post_training_quantization_experimental(module, repr_datagen, core_config=config, gptq_config=gptq_conf)
156
+ >>> quantized_module, quantization_info = mct.gptq.pytorch_gradient_post_training_quantization(module, repr_datagen, core_config=config, gptq_config=gptq_conf)
153
157
 
154
158
  """
155
159
 
@@ -86,7 +86,7 @@ if FOUND_TF:
86
86
  are represented in float32 data type (thus, each parameter is represented using 4 bytes):
87
87
 
88
88
  >>> dense_nparams = sum([l.count_params() for l in model.layers])
89
- >>> target_kpi = mct.KPI(weights_memory=dense_nparams * 4 * 0.5)
89
+ >>> target_kpi = mct.core.KPI(weights_memory=dense_nparams * 4 * 0.5)
90
90
 
91
91
  Optionally, define a pruning configuration. num_score_approximations can be passed
92
92
  to configure the number of importance scores that will be calculated for each channel.
@@ -101,6 +101,10 @@ if FOUND_TF:
101
101
 
102
102
  """
103
103
 
104
+ Logger.warning(f"keras_pruning_experimental is experimental and is subject to future changes."
105
+ f"If you encounter an issue, please open an issue in our GitHub "
106
+ f"project https://github.com/sony/model_optimization")
107
+
104
108
  # Instantiate the Keras framework implementation.
105
109
  fw_impl = PruningKerasImplementation()
106
110
 
@@ -93,7 +93,7 @@ if FOUND_TORCH:
93
93
  are represented in float32 data type (thus, each parameter is represented using 4 bytes):
94
94
 
95
95
  >>> dense_nparams = sum(p.numel() for p in model.state_dict().values())
96
- >>> target_kpi = mct.KPI(weights_memory=dense_nparams * 4 * 0.5)
96
+ >>> target_kpi = mct.core.KPI(weights_memory=dense_nparams * 4 * 0.5)
97
97
 
98
98
  Optionally, define a pruning configuration. num_score_approximations can be passed
99
99
  to configure the number of importance scores that will be calculated for each channel.
@@ -108,6 +108,10 @@ if FOUND_TORCH:
108
108
 
109
109
  """
110
110
 
111
+ Logger.warning(f"pytorch_pruning_experimental is experimental and is subject to future changes."
112
+ f"If you encounter an issue, please open an issue in our GitHub "
113
+ f"project https://github.com/sony/model_optimization")
114
+
111
115
  # Instantiate the Pytorch framework implementation.
112
116
  fw_impl = PruningPytorchImplementation()
113
117
 
@@ -14,5 +14,5 @@
14
14
  # ==============================================================================
15
15
  from model_compression_toolkit.qat.common.qat_config import QATConfig, TrainingMethod
16
16
 
17
- from model_compression_toolkit.qat.keras.quantization_facade import keras_quantization_aware_training_init, keras_quantization_aware_training_finalize
18
- from model_compression_toolkit.qat.pytorch.quantization_facade import pytorch_quantization_aware_training_init, pytorch_quantization_aware_training_finalize
17
+ from model_compression_toolkit.qat.keras.quantization_facade import keras_quantization_aware_training_init_experimental, keras_quantization_aware_training_finalize_experimental
18
+ from model_compression_toolkit.qat.pytorch.quantization_facade import pytorch_quantization_aware_training_init_experimental, pytorch_quantization_aware_training_finalize_experimental
@@ -85,13 +85,13 @@ if FOUND_TF:
85
85
  return layer
86
86
 
87
87
 
88
- def keras_quantization_aware_training_init(in_model: Model,
89
- representative_data_gen: Callable,
90
- target_kpi: KPI = None,
91
- core_config: CoreConfig = CoreConfig(),
92
- qat_config: QATConfig = QATConfig(),
93
- fw_info: FrameworkInfo = DEFAULT_KERAS_INFO,
94
- target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC):
88
+ def keras_quantization_aware_training_init_experimental(in_model: Model,
89
+ representative_data_gen: Callable,
90
+ target_kpi: KPI = None,
91
+ core_config: CoreConfig = CoreConfig(),
92
+ qat_config: QATConfig = QATConfig(),
93
+ fw_info: FrameworkInfo = DEFAULT_KERAS_INFO,
94
+ target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC):
95
95
  """
96
96
  Prepare a trained Keras model for quantization aware training. First the model quantization is optimized
97
97
  with post-training quantization, then the model layers are wrapped with QuantizeWrappers. The model is
@@ -161,7 +161,7 @@ if FOUND_TF:
161
161
  Pass the model, the representative dataset generator, the configuration and the target KPI to get a
162
162
  quantized model:
163
163
 
164
- >>> quantized_model, quantization_info, custom_objects = mct.qat.keras_quantization_aware_training_init(model, repr_datagen, kpi, core_config=config)
164
+ >>> quantized_model, quantization_info, custom_objects = mct.qat.keras_quantization_aware_training_init_experimental(model, repr_datagen, kpi, core_config=config)
165
165
 
166
166
  Use the quantized model for fine-tuning. For loading the model from file, use the custom_objects dictionary:
167
167
 
@@ -170,6 +170,11 @@ if FOUND_TF:
170
170
  For more configuration options, please take a look at our `API documentation <https://sony.github.io/model_optimization/api/api_docs/modules/mixed_precision_quantization_config.html>`_.
171
171
 
172
172
  """
173
+
174
+ Logger.warning(f"keras_quantization_aware_training_init_experimental is experimental and is subject to future changes."
175
+ f"If you encounter an issue, please open an issue in our GitHub "
176
+ f"project https://github.com/sony/model_optimization")
177
+
173
178
  KerasModelValidation(model=in_model,
174
179
  fw_info=fw_info).validate()
175
180
 
@@ -207,7 +212,7 @@ if FOUND_TF:
207
212
  return qat_model, user_info, {}
208
213
 
209
214
 
210
- def keras_quantization_aware_training_finalize(in_model: Model) -> Model:
215
+ def keras_quantization_aware_training_finalize_experimental(in_model: Model) -> Model:
211
216
  """
212
217
  Convert a model fine-tuned by the user (Trainable quantizers) to a model with Inferable quantizers.
213
218
 
@@ -252,14 +257,19 @@ if FOUND_TF:
252
257
  Pass the model, the representative dataset generator, the configuration and the target KPI to get a
253
258
  quantized model:
254
259
 
255
- >>> quantized_model, quantization_info, custom_objects = mct.qat.keras_quantization_aware_training_init(model, repr_datagen, kpi, core_config=config)
260
+ >>> quantized_model, quantization_info, custom_objects = mct.qat.keras_quantization_aware_training_init_experimental(model, repr_datagen, kpi, core_config=config)
256
261
 
257
262
  Use the quantized model for fine-tuning. For loading the model from file, use the custom_objects dictionary:
258
263
 
259
264
  >>> quantized_model = tf.keras.models.load_model(model_file, custom_objects=custom_objects)
260
- >>> quantized_model = mct.qat.keras_quantization_aware_training_finalize(quantized_model)
265
+ >>> quantized_model = mct.qat.keras_quantization_aware_training_finalize_experimental(quantized_model)
261
266
 
262
267
  """
268
+ Logger.warning(
269
+ f"keras_quantization_aware_training_finalize_experimental is experimental and is subject to future changes."
270
+ f"If you encounter an issue, please open an issue in our GitHub "
271
+ f"project https://github.com/sony/model_optimization")
272
+
263
273
  def _export(layer):
264
274
  if isinstance(layer, KerasTrainableQuantizationWrapper):
265
275
  layer = layer.convert_to_inferable_quantizers()
@@ -282,13 +292,13 @@ if FOUND_TF:
282
292
  else:
283
293
  # If tensorflow is not installed,
284
294
  # we raise an exception when trying to use these functions.
285
- def keras_quantization_aware_training_init(*args, **kwargs):
295
+ def keras_quantization_aware_training_init_experimental(*args, **kwargs):
286
296
  Logger.critical('Installing tensorflow is mandatory '
287
- 'when using keras_quantization_aware_training_init. '
297
+ 'when using keras_quantization_aware_training_init_experimental. '
288
298
  'Could not find Tensorflow package.') # pragma: no cover
289
299
 
290
300
 
291
- def keras_quantization_aware_training_finalize(*args, **kwargs):
301
+ def keras_quantization_aware_training_finalize_experimental(*args, **kwargs):
292
302
  Logger.critical('Installing tensorflow is mandatory '
293
- 'when using keras_quantization_aware_training_finalize. '
303
+ 'when using keras_quantization_aware_training_finalize_experimental. '
294
304
  'Could not find Tensorflow package.') # pragma: no cover
@@ -73,13 +73,13 @@ if FOUND_TORCH:
73
73
  return module
74
74
 
75
75
 
76
- def pytorch_quantization_aware_training_init(in_model: Module,
77
- representative_data_gen: Callable,
78
- target_kpi: KPI = None,
79
- core_config: CoreConfig = CoreConfig(),
80
- qat_config: QATConfig = QATConfig(),
81
- fw_info: FrameworkInfo = DEFAULT_PYTORCH_INFO,
82
- target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_PYTORCH_TPC):
76
+ def pytorch_quantization_aware_training_init_experimental(in_model: Module,
77
+ representative_data_gen: Callable,
78
+ target_kpi: KPI = None,
79
+ core_config: CoreConfig = CoreConfig(),
80
+ qat_config: QATConfig = QATConfig(),
81
+ fw_info: FrameworkInfo = DEFAULT_PYTORCH_INFO,
82
+ target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_PYTORCH_TPC):
83
83
  """
84
84
  Prepare a trained Pytorch model for quantization aware training. First the model quantization is optimized
85
85
  with post-training quantization, then the model layers are wrapped with QuantizeWrappers. The model is
@@ -136,11 +136,15 @@ if FOUND_TORCH:
136
136
  Pass the model, the representative dataset generator, the configuration and the target KPI to get a
137
137
  quantized model. Now the model contains quantizer wrappers for fine tunning the weights:
138
138
 
139
- >>> quantized_model, quantization_info = pytorch_quantization_aware_training_init(model, repr_datagen, core_config=config)
139
+ >>> quantized_model, quantization_info = mct.qat.pytorch_quantization_aware_training_init_experimental(model, repr_datagen, core_config=config)
140
140
 
141
141
  For more configuration options, please take a look at our `API documentation <https://sony.github.io/model_optimization/api/api_docs/modules/mixed_precision_quantization_config.html>`_.
142
142
 
143
143
  """
144
+ Logger.warning(
145
+ f"pytorch_quantization_aware_training_init_experimental is experimental and is subject to future changes."
146
+ f"If you encounter an issue, please open an issue in our GitHub "
147
+ f"project https://github.com/sony/model_optimization")
144
148
 
145
149
  if core_config.mixed_precision_enable:
146
150
  if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfig):
@@ -180,7 +184,7 @@ if FOUND_TORCH:
180
184
  return qat_model, user_info
181
185
 
182
186
 
183
- def pytorch_quantization_aware_training_finalize(in_model: Module):
187
+ def pytorch_quantization_aware_training_finalize_experimental(in_model: Module):
184
188
  """
185
189
  Convert a model fine-tuned by the user to a network with QuantizeWrappers containing
186
190
  InferableQuantizers, that quantizes both the layers weights and outputs
@@ -214,13 +218,18 @@ if FOUND_TORCH:
214
218
  Pass the model, the representative dataset generator, the configuration and the target KPI to get a
215
219
  quantized model:
216
220
 
217
- >>> quantized_model, quantization_info = pytorch_quantization_aware_training_init(model, repr_datagen, core_config=config)
221
+ >>> quantized_model, quantization_info = mct.qat.pytorch_quantization_aware_training_init_experimental(model, repr_datagen, core_config=config)
218
222
 
219
223
  Use the quantized model for fine-tuning. Finally, remove the quantizer wrappers and keep a quantize model ready for inference.
220
224
 
221
- >>> quantized_model = mct.pytorch_quantization_aware_training_finalize(quantized_model)
225
+ >>> quantized_model = mct.qat.pytorch_quantization_aware_training_finalize_experimental(quantized_model)
222
226
 
223
227
  """
228
+ Logger.warning(
229
+ f"pytorch_quantization_aware_training_finalize_experimental is experimental and is subject to future changes."
230
+ f"If you encounter an issue, please open an issue in our GitHub "
231
+ f"project https://github.com/sony/model_optimization")
232
+
224
233
  for _, layer in in_model.named_children():
225
234
  if isinstance(layer, (PytorchQuantizationWrapper, PytorchActivationQuantizationHolder)):
226
235
  layer.convert_to_inferable_quantizers()
@@ -231,13 +240,13 @@ if FOUND_TORCH:
231
240
  else:
232
241
  # If torch is not installed,
233
242
  # we raise an exception when trying to use these functions.
234
- def pytorch_quantization_aware_training_init(*args, **kwargs):
243
+ def pytorch_quantization_aware_training_init_experimental(*args, **kwargs):
235
244
  Logger.critical('Installing Pytorch is mandatory '
236
- 'when using pytorch_quantization_aware_training_init. '
245
+ 'when using pytorch_quantization_aware_training_init_experimental. '
237
246
  'Could not find the torch package.') # pragma: no cover
238
247
 
239
248
 
240
- def pytorch_quantization_aware_training_finalize(*args, **kwargs):
249
+ def pytorch_quantization_aware_training_finalize_experimental(*args, **kwargs):
241
250
  Logger.critical('Installing Pytorch is mandatory '
242
- 'when using pytorch_quantization_aware_training_finalize. '
251
+ 'when using pytorch_quantization_aware_training_finalize_experimental. '
243
252
  'Could not find the torch package.') # pragma: no cover
@@ -1,6 +1,6 @@
1
1
  [metadata]
2
2
  description-file = README.md
3
- version = 1.11.0.20240307-000318
3
+ version = 1.11.0.20240309-000349
4
4
 
5
5
  [egg_info]
6
6
  tag_build =