mct-nightly 2.2.0.20241026.508__tar.gz → 2.2.0.20241027.532__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (584) hide show
  1. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/PKG-INFO +1 -1
  2. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/mct_nightly.egg-info/PKG-INFO +1 -1
  3. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/mct_nightly.egg-info/SOURCES.txt +9 -4
  4. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/__init__.py +1 -1
  5. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/common/gptq_constants.py +8 -1
  6. {mct-nightly-2.2.0.20241026.508/model_compression_toolkit/gptq/pytorch/quantizer → mct-nightly-2.2.0.20241027.532/model_compression_toolkit/gptq/common}/gradual_activation_quantization.py +10 -10
  7. {mct-nightly-2.2.0.20241026.508/model_compression_toolkit/gptq/pytorch/quantizer → mct-nightly-2.2.0.20241027.532/model_compression_toolkit/gptq/common}/regularization_factory.py +25 -11
  8. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/gptq_training.py +26 -11
  9. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/quantization_facade.py +35 -24
  10. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +10 -9
  11. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +5 -45
  12. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/gptq_training.py +13 -9
  13. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +3 -13
  14. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +2 -4
  15. mct-nightly-2.2.0.20241027.532/model_compression_toolkit/trainable_infrastructure/common/annealing_schedulers.py +68 -0
  16. mct-nightly-2.2.0.20241027.532/model_compression_toolkit/trainable_infrastructure/keras/annealing_schedulers.py +32 -0
  17. mct-nightly-2.2.0.20241027.532/model_compression_toolkit/trainable_infrastructure/pytorch/annealing_schedulers.py +31 -0
  18. mct-nightly-2.2.0.20241027.532/tests_pytest/keras/gptq/__init__.py +14 -0
  19. mct-nightly-2.2.0.20241027.532/tests_pytest/keras/gptq/test_gradual_act_quantization.py +102 -0
  20. mct-nightly-2.2.0.20241027.532/tests_pytest/keras/trainable_infrastructure/__init__.py +16 -0
  21. mct-nightly-2.2.0.20241027.532/tests_pytest/keras/trainable_infrastructure/test_linear_annealing.py +49 -0
  22. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/tests_pytest/pytorch/gptq/test_gradual_act_quantization.py +4 -4
  23. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/tests_pytest/pytorch/trainable_infrastructure/test_linear_annealing.py +4 -4
  24. mct-nightly-2.2.0.20241026.508/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +0 -44
  25. mct-nightly-2.2.0.20241026.508/model_compression_toolkit/trainable_infrastructure/pytorch/annealing_schedulers.py +0 -39
  26. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/LICENSE.md +0 -0
  27. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/README.md +0 -0
  28. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/mct_nightly.egg-info/dependency_links.txt +0 -0
  29. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/mct_nightly.egg-info/requires.txt +0 -0
  30. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/mct_nightly.egg-info/top_level.txt +0 -0
  31. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/constants.py +0 -0
  32. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/__init__.py +0 -0
  33. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/analyzer.py +0 -0
  34. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/__init__.py +0 -0
  35. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
  36. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
  37. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
  38. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
  39. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
  40. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
  41. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
  42. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
  43. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
  44. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
  45. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/framework_info.py +0 -0
  46. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
  47. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/fusion/graph_fuser.py +0 -0
  48. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
  49. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
  50. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
  51. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
  52. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/edge.py +0 -0
  53. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
  54. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
  55. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
  56. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
  57. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
  58. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
  59. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
  60. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
  61. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
  62. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
  63. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
  64. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
  65. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
  66. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
  67. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/hessian/hessian_scores_calculator.py +0 -0
  68. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/hessian/hessian_scores_request.py +0 -0
  69. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
  70. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
  71. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
  72. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
  73. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/matchers/function.py +0 -0
  74. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
  75. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
  76. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/memory_computation.py +0 -0
  77. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
  78. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
  79. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
  80. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
  81. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
  82. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py +0 -0
  83. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
  84. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
  85. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
  86. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
  87. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
  88. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -0
  89. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
  90. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
  91. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
  92. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
  93. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
  94. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
  95. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
  96. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
  97. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
  98. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/model_collector.py +0 -0
  99. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/model_validation.py +0 -0
  100. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
  101. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
  102. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
  103. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
  104. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
  105. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
  106. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
  107. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
  108. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
  109. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
  110. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
  111. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
  112. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
  113. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
  114. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
  115. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
  116. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
  117. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
  118. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
  119. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
  120. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
  121. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
  122. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
  123. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/bit_width_config.py +0 -0
  124. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
  125. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
  126. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
  127. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
  128. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
  129. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
  130. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
  131. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
  132. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
  133. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
  134. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
  135. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
  136. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
  137. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
  138. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
  139. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
  140. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
  141. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
  142. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
  143. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
  144. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
  145. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
  146. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
  147. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
  148. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
  149. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
  150. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
  151. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
  152. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
  153. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
  154. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
  155. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
  156. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
  157. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
  158. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
  159. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
  160. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
  161. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
  162. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
  163. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
  164. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
  165. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
  166. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
  167. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
  168. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
  169. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
  170. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/user_info.py +0 -0
  171. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
  172. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
  173. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
  174. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
  175. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
  176. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/__init__.py +0 -0
  177. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
  178. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
  179. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
  180. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
  181. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
  182. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
  183. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
  184. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/constants.py +0 -0
  185. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
  186. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/data_util.py +0 -0
  187. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
  188. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
  189. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
  190. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
  191. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  192. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  193. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  194. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
  195. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +0 -0
  196. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
  197. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
  198. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  199. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
  200. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  201. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  202. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
  203. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  204. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
  205. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
  206. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  207. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/sigmoid_mul_to_swish.py +0 -0
  208. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
  209. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  210. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  211. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
  212. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +0 -0
  213. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/hessian/hessian_scores_calculator_keras.py +0 -0
  214. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/hessian/weights_hessian_scores_calculator_keras.py +0 -0
  215. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
  216. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
  217. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
  218. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
  219. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
  220. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
  221. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
  222. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
  223. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
  224. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
  225. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
  226. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
  227. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/reader/common.py +0 -0
  228. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
  229. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
  230. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
  231. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
  232. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
  233. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
  234. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
  235. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
  236. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
  237. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
  238. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
  239. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
  240. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
  241. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
  242. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
  243. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
  244. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
  245. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
  246. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
  247. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
  248. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
  249. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
  250. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
  251. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
  252. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/constants.py +0 -0
  253. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/data_util.py +0 -0
  254. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
  255. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
  256. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
  257. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  258. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  259. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  260. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
  261. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
  262. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
  263. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
  264. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  265. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  266. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  267. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
  268. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
  269. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  270. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
  271. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scaled_dot_product_attention.py +0 -0
  272. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  273. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
  274. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
  275. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  276. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  277. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
  278. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +0 -0
  279. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/hessian/hessian_scores_calculator_pytorch.py +0 -0
  280. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/hessian/weights_hessian_scores_calculator_pytorch.py +0 -0
  281. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
  282. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
  283. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
  284. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
  285. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
  286. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
  287. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
  288. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
  289. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
  290. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
  291. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
  292. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
  293. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
  294. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
  295. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
  296. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
  297. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
  298. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
  299. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/pytorch/utils.py +0 -0
  300. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
  301. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/core/runner.py +0 -0
  302. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/__init__.py +0 -0
  303. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
  304. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/common/constants.py +0 -0
  305. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
  306. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
  307. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/common/enums.py +0 -0
  308. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
  309. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
  310. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
  311. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
  312. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
  313. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/keras/image_operations.py +0 -0
  314. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
  315. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
  316. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
  317. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
  318. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
  319. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
  320. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
  321. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/keras/optimization_functions/lr_scheduler.py +0 -0
  322. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
  323. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
  324. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
  325. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
  326. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
  327. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/pytorch/image_operations.py +0 -0
  328. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
  329. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
  330. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
  331. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
  332. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
  333. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
  334. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/pytorch/optimization_functions/lr_scheduler.py +0 -0
  335. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
  336. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
  337. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
  338. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
  339. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/defaultdict.py +0 -0
  340. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/__init__.py +0 -0
  341. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
  342. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
  343. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
  344. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
  345. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
  346. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
  347. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
  348. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
  349. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
  350. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
  351. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
  352. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
  353. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
  354. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
  355. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
  356. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
  357. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
  358. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
  359. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
  360. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
  361. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
  362. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
  363. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
  364. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
  365. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
  366. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
  367. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
  368. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
  369. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
  370. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
  371. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
  372. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/__init__.py +0 -0
  373. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/common/__init__.py +0 -0
  374. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/common/gptq_config.py +0 -0
  375. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
  376. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
  377. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/common/gptq_training.py +0 -0
  378. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
  379. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
  380. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
  381. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
  382. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
  383. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
  384. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
  385. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
  386. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  387. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  388. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
  389. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  390. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
  391. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
  392. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
  393. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
  394. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
  395. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
  396. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
  397. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
  398. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
  399. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  400. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  401. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  402. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  403. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  404. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/gptq/runner.py +0 -0
  405. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/logger.py +0 -0
  406. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/metadata.py +0 -0
  407. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/pruning/__init__.py +0 -0
  408. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
  409. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
  410. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
  411. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
  412. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/ptq/__init__.py +0 -0
  413. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
  414. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
  415. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
  416. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
  417. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/ptq/runner.py +0 -0
  418. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/__init__.py +0 -0
  419. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/common/__init__.py +0 -0
  420. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/common/qat_config.py +0 -0
  421. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/keras/__init__.py +0 -0
  422. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
  423. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
  424. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_weight_quantizer.py +0 -0
  425. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
  426. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
  427. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
  428. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
  429. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
  430. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  431. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
  432. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
  433. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
  434. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
  435. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_weight_quantizer.py +0 -0
  436. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
  437. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
  438. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
  439. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
  440. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  441. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  442. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
  443. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
  444. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
  445. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
  446. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
  447. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
  448. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
  449. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
  450. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
  451. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
  452. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
  453. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
  454. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
  455. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
  456. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
  457. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
  458. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
  459. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
  460. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
  461. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
  462. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
  463. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
  464. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
  465. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
  466. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
  467. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
  468. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
  469. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
  470. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
  471. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
  472. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
  473. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
  474. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
  475. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
  476. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
  477. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
  478. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +0 -0
  479. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
  480. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
  481. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
  482. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +0 -0
  483. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
  484. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
  485. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/__init__.py +0 -0
  486. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +0 -0
  487. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_keras.py +0 -0
  488. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_pytorch.py +0 -0
  489. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/__init__.py +0 -0
  490. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +0 -0
  491. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py +0 -0
  492. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py +0 -0
  493. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/__init__.py +0 -0
  494. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py +0 -0
  495. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_keras.py +0 -0
  496. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_pytorch.py +0 -0
  497. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
  498. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
  499. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
  500. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
  501. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
  502. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
  503. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
  504. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
  505. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
  506. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
  507. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
  508. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
  509. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
  510. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
  511. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
  512. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
  513. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
  514. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
  515. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
  516. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
  517. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
  518. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
  519. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/common/training_method.py +0 -0
  520. {mct-nightly-2.2.0.20241026.508/model_compression_toolkit/trainable_infrastructure/pytorch → mct-nightly-2.2.0.20241027.532/model_compression_toolkit/trainable_infrastructure/common}/util.py +0 -0
  521. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
  522. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/__init__.py +0 -0
  523. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/base_activation_quantizer.py +0 -0
  524. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/__init__.py +0 -0
  525. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py +0 -0
  526. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/uniform_lsq.py +0 -0
  527. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/__init__.py +0 -0
  528. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/symmetric_ste.py +0 -0
  529. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/uniform_ste.py +0 -0
  530. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
  531. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
  532. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
  533. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
  534. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
  535. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
  536. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py +0 -0
  537. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py +0 -0
  538. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py +0 -0
  539. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/symmetric_lsq.py +0 -0
  540. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/uniform_lsq.py +0 -0
  541. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/__init__.py +0 -0
  542. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/symmetric_ste.py +0 -0
  543. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/uniform_ste.py +0 -0
  544. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
  545. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py +0 -0
  546. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/verify_packages.py +0 -0
  547. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/__init__.py +0 -0
  548. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/common/__init__.py +0 -0
  549. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/common/constants.py +0 -0
  550. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/common/core_report_generator.py +0 -0
  551. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
  552. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/common/framework_report_utils.py +0 -0
  553. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
  554. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/common/model_folding_utils.py +0 -0
  555. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/common/similarity_calculator.py +0 -0
  556. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
  557. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/common/tensorboard_utils.py +0 -0
  558. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/common/xquant_config.py +0 -0
  559. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
  560. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
  561. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +0 -0
  562. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/keras/keras_report_utils.py +0 -0
  563. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
  564. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
  565. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +0 -0
  566. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
  567. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
  568. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +0 -0
  569. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
  570. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +0 -0
  571. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
  572. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +0 -0
  573. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/setup.cfg +0 -0
  574. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/setup.py +0 -0
  575. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/tests_pytest/__init__.py +0 -0
  576. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/tests_pytest/keras/__init__.py +0 -0
  577. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/tests_pytest/keras/core/__init__.py +0 -0
  578. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/tests_pytest/keras/core/test_data_util.py +0 -0
  579. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/tests_pytest/pytorch/__init__.py +0 -0
  580. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/tests_pytest/pytorch/core/__init__.py +0 -0
  581. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/tests_pytest/pytorch/core/test_data_util.py +0 -0
  582. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/tests_pytest/pytorch/gptq/__init__.py +0 -0
  583. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/tests_pytest/pytorch/gptq/test_annealing_cfg.py +0 -0
  584. {mct-nightly-2.2.0.20241026.508 → mct-nightly-2.2.0.20241027.532}/tests_pytest/pytorch/trainable_infrastructure/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 2.2.0.20241026.508
3
+ Version: 2.2.0.20241027.532
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 2.2.0.20241026.508
3
+ Version: 2.2.0.20241027.532
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -360,6 +360,8 @@ model_compression_toolkit/gptq/common/gptq_constants.py
360
360
  model_compression_toolkit/gptq/common/gptq_framework_implementation.py
361
361
  model_compression_toolkit/gptq/common/gptq_graph.py
362
362
  model_compression_toolkit/gptq/common/gptq_training.py
363
+ model_compression_toolkit/gptq/common/gradual_activation_quantization.py
364
+ model_compression_toolkit/gptq/common/regularization_factory.py
363
365
  model_compression_toolkit/gptq/keras/__init__.py
364
366
  model_compression_toolkit/gptq/keras/gptq_keras_implementation.py
365
367
  model_compression_toolkit/gptq/keras/gptq_loss.py
@@ -370,7 +372,6 @@ model_compression_toolkit/gptq/keras/quantizer/__init__.py
370
372
  model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py
371
373
  model_compression_toolkit/gptq/keras/quantizer/quant_utils.py
372
374
  model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py
373
- model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py
374
375
  model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py
375
376
  model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py
376
377
  model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py
@@ -385,10 +386,8 @@ model_compression_toolkit/gptq/pytorch/graph_info.py
385
386
  model_compression_toolkit/gptq/pytorch/quantization_facade.py
386
387
  model_compression_toolkit/gptq/pytorch/quantizer/__init__.py
387
388
  model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py
388
- model_compression_toolkit/gptq/pytorch/quantizer/gradual_activation_quantization.py
389
389
  model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py
390
390
  model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py
391
- model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py
392
391
  model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py
393
392
  model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py
394
393
  model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py
@@ -502,6 +501,7 @@ model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/
502
501
  model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py
503
502
  model_compression_toolkit/trainable_infrastructure/__init__.py
504
503
  model_compression_toolkit/trainable_infrastructure/common/__init__.py
504
+ model_compression_toolkit/trainable_infrastructure/common/annealing_schedulers.py
505
505
  model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py
506
506
  model_compression_toolkit/trainable_infrastructure/common/constants.py
507
507
  model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py
@@ -509,7 +509,9 @@ model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py
509
509
  model_compression_toolkit/trainable_infrastructure/common/quant_utils.py
510
510
  model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py
511
511
  model_compression_toolkit/trainable_infrastructure/common/training_method.py
512
+ model_compression_toolkit/trainable_infrastructure/common/util.py
512
513
  model_compression_toolkit/trainable_infrastructure/keras/__init__.py
514
+ model_compression_toolkit/trainable_infrastructure/keras/annealing_schedulers.py
513
515
  model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py
514
516
  model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py
515
517
  model_compression_toolkit/trainable_infrastructure/keras/load_model.py
@@ -527,7 +529,6 @@ model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py
527
529
  model_compression_toolkit/trainable_infrastructure/pytorch/annealing_schedulers.py
528
530
  model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py
529
531
  model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py
530
- model_compression_toolkit/trainable_infrastructure/pytorch/util.py
531
532
  model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py
532
533
  model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py
533
534
  model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py
@@ -566,6 +567,10 @@ tests_pytest/__init__.py
566
567
  tests_pytest/keras/__init__.py
567
568
  tests_pytest/keras/core/__init__.py
568
569
  tests_pytest/keras/core/test_data_util.py
570
+ tests_pytest/keras/gptq/__init__.py
571
+ tests_pytest/keras/gptq/test_gradual_act_quantization.py
572
+ tests_pytest/keras/trainable_infrastructure/__init__.py
573
+ tests_pytest/keras/trainable_infrastructure/test_linear_annealing.py
569
574
  tests_pytest/pytorch/__init__.py
570
575
  tests_pytest/pytorch/core/__init__.py
571
576
  tests_pytest/pytorch/core/test_data_util.py
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
27
27
  from model_compression_toolkit import pruning
28
28
  from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
29
29
 
30
- __version__ = "2.2.0.20241026.000508"
30
+ __version__ = "2.2.0.20241027.000532"
@@ -22,4 +22,11 @@ SOFT_ROUNDING_ZETA = 1.1
22
22
 
23
23
  # GPTQ config constant
24
24
  QUANT_PARAM_LEARNING_STR = 'quantization_parameter_learning'
25
- MAX_LSB_STR = 'max_lsbs_change_map'
25
+ MAX_LSB_STR = 'max_lsbs_change_map'
26
+
27
+ # GPTQ learning hyperparameters
28
+ LR_DEFAULT = 3e-2
29
+ LR_REST_DEFAULT = 1e-4
30
+ LR_BIAS_DEFAULT = 1e-3
31
+ LR_QUANTIZATION_PARAM_DEFAULT = 1e-3
32
+ GPTQ_MOMENTUM = 0.9
@@ -13,23 +13,23 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
  from functools import partial
16
- from typing import Callable
16
+ from typing import Callable, Any
17
17
 
18
18
  from model_compression_toolkit.gptq import GradientPTQConfig, QFractionLinearAnnealingConfig
19
- from model_compression_toolkit.trainable_infrastructure import BasePytorchTrainableQuantizer
20
-
21
- from model_compression_toolkit.trainable_infrastructure.pytorch.annealing_schedulers import LinearAnnealingScheduler
19
+ from model_compression_toolkit.trainable_infrastructure.common.base_trainable_quantizer import BaseTrainableQuantizer
22
20
 
23
21
 
24
22
  def get_gradual_activation_quantizer_wrapper_factory(gptq_config: GradientPTQConfig,
25
- get_total_grad_steps_fn: Callable[[], int]) \
26
- -> Callable[[BasePytorchTrainableQuantizer], 'GradualActivationQuantizerWrapper']:
23
+ get_total_grad_steps_fn: Callable[[], int],
24
+ fw_linear_annealing_scheduler: type) \
25
+ -> Callable[[Any], 'GradualActivationQuantizerWrapper']:
27
26
  """
28
27
  Get a factory for 'GradualActivationQuantizerWrapper'.
29
28
 
30
29
  Args:
31
30
  gptq_config: GPTQ configuration.
32
31
  get_total_grad_steps_fn: a callable to obtain the total expected number of gradient steps.
32
+ fw_linear_annealing_scheduler: LinearAnnealingScheduler implementation of the framework (tf/pytorch).
33
33
 
34
34
  Returns:
35
35
  A factory function to build 'GradualActivationQuantizerWrapper' from Quantizer.
@@ -40,9 +40,9 @@ def get_gradual_activation_quantizer_wrapper_factory(gptq_config: GradientPTQCon
40
40
  annealing_cfg = gptq_config.gradual_activation_quantization_config.q_fraction_scheduler_policy
41
41
  if isinstance(annealing_cfg, QFractionLinearAnnealingConfig):
42
42
  t_end = annealing_cfg.end_step or get_total_grad_steps_fn()
43
- factor_scheduler = LinearAnnealingScheduler(t_start=annealing_cfg.start_step, t_end=t_end,
44
- initial_val=annealing_cfg.initial_q_fraction,
45
- target_val=annealing_cfg.target_q_fraction)
43
+ factor_scheduler = fw_linear_annealing_scheduler(t_start=annealing_cfg.start_step, t_end=t_end,
44
+ initial_val=annealing_cfg.initial_q_fraction,
45
+ target_val=annealing_cfg.target_q_fraction)
46
46
  else:
47
47
  raise ValueError(f'Unknown annealing policy {annealing_cfg}')
48
48
 
@@ -64,7 +64,7 @@ class GradualActivationQuantizerWrapper:
64
64
  quantizer: quantizer to wrap.
65
65
  q_fraction_scheduler: a callable that accepts a gradient step and returns the corresponding quantized fraction.
66
66
  """
67
- def __init__(self, quantizer: BasePytorchTrainableQuantizer, q_fraction_scheduler: Callable[[int], float]):
67
+ def __init__(self, quantizer: BaseTrainableQuantizer, q_fraction_scheduler: Callable[[int], float]):
68
68
  self.quantizer = quantizer
69
69
  self.q_fraction_scheduler = q_fraction_scheduler
70
70
  self.step_cnt = 0
@@ -12,17 +12,20 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
- from typing import Callable
16
15
 
17
- from model_compression_toolkit.gptq import RoundingType, GradientPTQConfig
18
- from model_compression_toolkit.gptq.pytorch.quantizer.soft_rounding.soft_quantizer_reg import \
19
- SoftQuantizerRegularization
20
- from model_compression_toolkit.trainable_infrastructure.pytorch.annealing_schedulers import LinearAnnealingScheduler
16
+ from tqdm import tqdm
17
+ from typing import Callable, Type
21
18
 
19
+ from model_compression_toolkit.gptq import RoundingType, GradientPTQConfig
22
20
 
21
+ # Common warmup fraction
23
22
  WARMUP_STEP_FRACTION = 0.2
24
23
 
25
- def get_regularization(gptq_config: GradientPTQConfig, get_total_grad_steps_fn: Callable[[], int]) -> Callable:
24
+
25
+ def get_regularization(gptq_config: GradientPTQConfig,
26
+ get_total_grad_steps_fn: Callable[[], int],
27
+ SoftQuantizerRegularizationFWClass: Type,
28
+ LinearAnnealingSchedulerFWClass: Type) -> Callable:
26
29
  """
27
30
  Returns a function that computes the regularization term for GPTQ training based on the given
28
31
  rounding type in the GPTQ configuration.
@@ -30,15 +33,26 @@ def get_regularization(gptq_config: GradientPTQConfig, get_total_grad_steps_fn:
30
33
  Args:
31
34
  gptq_config: A GPTQ configuration.
32
35
  get_total_grad_steps_fn: a callable to obtain the total expected number of gradient steps.
36
+ SoftQuantizerRegularizationFWClass: The class to use for soft quantizer regularization (framework-specific).
37
+ LinearAnnealingSchedulerFWClass: The class to use for the annealing scheduler (framework-specific).
33
38
 
34
- Returns: A function for computing the regularization. If there is no regularization function defined for the given
35
- rounding type, then it returns a function that just returns 0.
36
-
39
+ Returns:
40
+ Callable: A function for computing the regularization. If there is no regularization function
41
+ defined for the given rounding type, then it returns a function that just returns 0.
37
42
  """
38
43
  if gptq_config.rounding_type == RoundingType.SoftQuantizer:
39
44
  total_gradient_steps = get_total_grad_steps_fn()
40
45
  t_start = int(WARMUP_STEP_FRACTION * total_gradient_steps)
41
- scheduler = LinearAnnealingScheduler(t_start=t_start, t_end=total_gradient_steps, initial_val=20, target_val=2)
42
- return SoftQuantizerRegularization(scheduler)
46
+
47
+ # Directly initializing the scheduler within the method
48
+ scheduler = LinearAnnealingSchedulerFWClass(
49
+ t_start=t_start,
50
+ t_end=total_gradient_steps,
51
+ initial_val=20,
52
+ target_val=2
53
+ )
54
+
55
+ # Return the framework-specific soft quantizer regularization
56
+ return SoftQuantizerRegularizationFWClass(scheduler)
43
57
  else:
44
58
  return lambda *args, **kwargs: 0
@@ -26,9 +26,14 @@ from model_compression_toolkit.core.common.user_info import UserInformation
26
26
  from model_compression_toolkit.core.keras.back2framework.keras_model_builder import KerasModelBuilder
27
27
  from model_compression_toolkit.core.keras.data_util import data_gen_to_dataloader
28
28
  from model_compression_toolkit.gptq.common.gptq_graph import get_kernel_attribute_name_for_gptq
29
+ from model_compression_toolkit.gptq.common.gradual_activation_quantization import \
30
+ get_gradual_activation_quantizer_wrapper_factory
31
+ from model_compression_toolkit.gptq.common.regularization_factory import get_regularization
29
32
  from model_compression_toolkit.gptq.keras.quantizer.quantization_builder import quantization_builder
30
33
  from model_compression_toolkit.logger import Logger
31
34
  from mct_quantizers import KerasActivationQuantizationHolder
35
+ from model_compression_toolkit.trainable_infrastructure.common.util import get_total_grad_steps
36
+ from model_compression_toolkit.trainable_infrastructure.keras.annealing_schedulers import KerasLinearAnnealingScheduler
32
37
 
33
38
  if version.parse(tf.__version__) >= version.parse("2.13"):
34
39
  from keras.src.engine.base_layer import TensorFlowOpLayer
@@ -41,13 +46,12 @@ from model_compression_toolkit.gptq.common.gptq_training import GPTQTrainer
41
46
  from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig
42
47
  from model_compression_toolkit.core.common import Graph
43
48
  from model_compression_toolkit.gptq.keras.graph_info import get_weights_for_loss, get_gptq_trainable_parameters
44
- from model_compression_toolkit.gptq.keras.quantizer.regularization_factory import get_regularization
45
49
  from model_compression_toolkit.core.common.framework_info import FrameworkInfo
46
50
  from model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation
47
51
  import numpy as np
48
52
  import copy
49
53
  from model_compression_toolkit.core.keras.constants import BIAS, USE_BIAS
50
-
54
+ from model_compression_toolkit.gptq.keras.quantizer.soft_rounding.soft_quantizer_reg import SoftQuantizerRegularization
51
55
 
52
56
  class KerasGPTQTrainer(GPTQTrainer):
53
57
  """
@@ -78,6 +82,15 @@ class KerasGPTQTrainer(GPTQTrainer):
78
82
  hessian_info_service: HessianScoresService for fetching and computing Hessian's approximation scores.
79
83
 
80
84
  """
85
+
86
+ def _get_total_grad_steps():
87
+ return get_total_grad_steps(representative_data_gen) * gptq_config.n_epochs
88
+
89
+ # This must be set before the model building (as it is required for activation holder construction),
90
+ # which occurs in the base constructor.
91
+ self.gradual_act_quantizer_wrapper_factory = get_gradual_activation_quantizer_wrapper_factory(
92
+ gptq_config, _get_total_grad_steps, KerasLinearAnnealingScheduler)
93
+
81
94
  super().__init__(graph_float,
82
95
  graph_quant,
83
96
  gptq_config,
@@ -119,7 +132,10 @@ class KerasGPTQTrainer(GPTQTrainer):
119
132
 
120
133
  self.weights_for_average_loss = self._get_compare_points_loss_weights()
121
134
 
122
- self.reg_func = get_regularization(self.gptq_config, representative_data_gen)
135
+ self.reg_func = get_regularization(self.gptq_config,
136
+ _get_total_grad_steps,
137
+ SoftQuantizerRegularization,
138
+ KerasLinearAnnealingScheduler)
123
139
 
124
140
  def _get_compare_points_loss_weights(self):
125
141
  """ Get compare points weights for the distillation loss. """
@@ -185,14 +201,13 @@ class KerasGPTQTrainer(GPTQTrainer):
185
201
  _, activation_quantizers = quantization_builder(n, self.gptq_config) # TODO: split quantizers building into two functions: for weights and activations
186
202
 
187
203
  # Holder by definition uses a single quantizer for the activation quantization
188
- # thus we make sure this is the only possible case (unless it's a node with no activation
189
- # quantization, which in this case has an empty list).
190
- if len(activation_quantizers) == 1:
191
- return KerasActivationQuantizationHolder(activation_quantizers[0])
192
-
193
- Logger.critical(f"'KerasActivationQuantizationHolder' is designed to support a single quantizer, "
194
- f"but {len(activation_quantizers)} quantizers were found for node '{n}'. "
195
- f"Ensure only one quantizer is configured for each node's activation.")
204
+ # thus we make sure this is the only possible case.
205
+ if len(activation_quantizers) != 1:
206
+ Logger.critical(f"'KerasActivationQuantizationHolder' is designed to support a single quantizer, "
207
+ f"but {len(activation_quantizers)} quantizers were found for node '{n}'. "
208
+ f"Ensure only one quantizer is configured for each node's activation.")
209
+ quantizer = self.gradual_act_quantizer_wrapper_factory(activation_quantizers[0])
210
+ return KerasActivationQuantizationHolder(quantizer)
196
211
 
197
212
  def build_gptq_model(self) -> Tuple[Model, UserInformation]:
198
213
  """
@@ -14,17 +14,18 @@
14
14
  # ==============================================================================
15
15
  import copy
16
16
 
17
- from typing import Callable, Tuple
17
+ from typing import Callable, Tuple, Union
18
18
  from packaging import version
19
19
 
20
- from model_compression_toolkit.core.common.quantization.quantize_graph_weights import quantize_graph_weights
21
20
  from model_compression_toolkit.core.common.visualization.tensorboard_writer import init_tensorboard_writer
22
- from model_compression_toolkit.gptq.common.gptq_constants import REG_DEFAULT
21
+ from model_compression_toolkit.gptq.common.gptq_constants import REG_DEFAULT, LR_DEFAULT, LR_REST_DEFAULT, \
22
+ LR_BIAS_DEFAULT, GPTQ_MOMENTUM
23
23
  from model_compression_toolkit.logger import Logger
24
24
  from model_compression_toolkit.constants import TENSORFLOW, ACT_HESSIAN_DEFAULT_BATCH_SIZE
25
25
  from model_compression_toolkit.verify_packages import FOUND_TF
26
26
  from model_compression_toolkit.core.common.user_info import UserInformation
27
- from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, GPTQHessianScoresConfig
27
+ from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, GPTQHessianScoresConfig, \
28
+ GradualActivationQuantizationConfig
28
29
  from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
29
30
  from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import MixedPrecisionQuantizationConfig
30
31
  from model_compression_toolkit.core import CoreConfig
@@ -32,13 +33,8 @@ from model_compression_toolkit.core.runner import core_runner
32
33
  from model_compression_toolkit.gptq.runner import gptq_runner
33
34
  from model_compression_toolkit.core.analyzer import analyzer_model_quantization
34
35
  from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import TargetPlatformCapabilities
35
- from model_compression_toolkit.metadata import get_versions_dict, create_model_metadata
36
+ from model_compression_toolkit.metadata import create_model_metadata
36
37
 
37
- LR_DEFAULT = 0.15
38
- LR_REST_DEFAULT = 1e-4
39
- LR_BIAS_DEFAULT = 1e-4
40
- LR_QUANTIZATION_PARAM_DEFAULT = 1e-3
41
- GPTQ_MOMENTUM = 0.9
42
38
 
43
39
  if FOUND_TF:
44
40
  import tensorflow as tf
@@ -54,25 +50,25 @@ if FOUND_TF:
54
50
 
55
51
  # As from TF2.9 optimizers package is changed
56
52
  if version.parse(tf.__version__) < version.parse("2.9"):
57
- from keras.optimizer_v2.optimizer_v2 import OptimizerV2
53
+ from keras.optimizer_v2.optimizer_v2 import OptimizerV2 # pragma: no cover
58
54
  elif version.parse(tf.__version__) < version.parse("2.12"):
59
- from keras.optimizers.optimizer_v2.optimizer_v2 import OptimizerV2
55
+ from keras.optimizers.optimizer_v2.optimizer_v2 import OptimizerV2 # pragma: no cover
60
56
  else:
61
57
  from tensorflow.python.keras.optimizer_v2.optimizer_v2 import OptimizerV2
62
58
 
63
59
  DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
64
60
 
65
-
66
61
  def get_keras_gptq_config(n_epochs: int,
67
- optimizer: OptimizerV2 = tf.keras.optimizers.Adam(learning_rate=LR_DEFAULT),
68
- optimizer_rest: OptimizerV2 = tf.keras.optimizers.Adam(learning_rate=LR_REST_DEFAULT),
62
+ optimizer: OptimizerV2 = None,
63
+ optimizer_rest: OptimizerV2 = None,
69
64
  loss: Callable = GPTQMultipleTensorsLoss(),
70
65
  log_function: Callable = None,
71
66
  use_hessian_based_weights: bool = True,
72
67
  regularization_factor: float = REG_DEFAULT,
73
- hessian_batch_size: int = ACT_HESSIAN_DEFAULT_BATCH_SIZE) -> GradientPTQConfig:
68
+ hessian_batch_size: int = ACT_HESSIAN_DEFAULT_BATCH_SIZE,
69
+ gradual_activation_quantization: Union[bool, GradualActivationQuantizationConfig] = False) -> GradientPTQConfig:
74
70
  """
75
- Create a GradientPTQConfigV2 instance for Keras models.
71
+ Create a GradientPTQConfig instance for Keras models.
76
72
 
77
73
  args:
78
74
  n_epochs (int): Number of epochs for running the representative dataset for fine-tuning.
@@ -83,9 +79,10 @@ if FOUND_TF:
83
79
  use_hessian_based_weights (bool): Whether to use Hessian-based weights for weighted average loss.
84
80
  regularization_factor (float): A floating point number that defines the regularization factor.
85
81
  hessian_batch_size (int): Batch size for Hessian computation in Hessian-based weights GPTQ.
82
+ gradual_activation_quantization (bool, GradualActivationQuantizationConfig): If False, GradualActivationQuantization is disabled. If True, GradualActivationQuantization is enabled with the default settings. GradualActivationQuantizationConfig object can be passed to use non-default settings.
86
83
 
87
84
  returns:
88
- a GradientPTQConfigV2 object to use when fine-tuning the quantized model using gptq.
85
+ a GradientPTQConfig object to use when fine-tuning the quantized model using gptq.
89
86
 
90
87
  Examples:
91
88
 
@@ -94,7 +91,7 @@ if FOUND_TF:
94
91
  >>> import model_compression_toolkit as mct
95
92
  >>> import tensorflow as tf
96
93
 
97
- Create a GradientPTQConfigV2 to run for 5 epochs:
94
+ Create a GradientPTQConfig to run for 5 epochs:
98
95
 
99
96
  >>> gptq_conf = mct.gptq.get_keras_gptq_config(n_epochs=5)
100
97
 
@@ -102,11 +99,24 @@ if FOUND_TF:
102
99
 
103
100
  >>> gptq_conf = mct.gptq.get_keras_gptq_config(n_epochs=3, optimizer=tf.keras.optimizers.Nadam())
104
101
 
105
- The configuration can be passed to :func:`~model_compression_toolkit.keras_post_training_quantization` in order to quantize a keras model using gptq.
102
+ The configuration can be passed to :func:`~model_compression_toolkit.keras_gradient_post_training_quantization` in order to quantize a keras model using gptq.
103
+
106
104
 
107
105
  """
106
+ optimizer = optimizer or tf.keras.optimizers.Adam(learning_rate=LR_DEFAULT)
107
+ optimizer_rest = optimizer_rest or tf.keras.optimizers.Adam(learning_rate=LR_REST_DEFAULT)
108
+
108
109
  bias_optimizer = tf.keras.optimizers.SGD(learning_rate=LR_BIAS_DEFAULT,
109
110
  momentum=GPTQ_MOMENTUM)
111
+
112
+ if isinstance(gradual_activation_quantization, bool):
113
+ gradual_quant_config = GradualActivationQuantizationConfig() if gradual_activation_quantization else None
114
+ elif isinstance(gradual_activation_quantization, GradualActivationQuantizationConfig):
115
+ gradual_quant_config = gradual_activation_quantization
116
+ else:
117
+ raise TypeError(f'gradual_activation_quantization argument should be bool or '
118
+ f'GradualActivationQuantizationConfig, received {type(gradual_activation_quantization)}')
119
+
110
120
  return GradientPTQConfig(n_epochs,
111
121
  optimizer,
112
122
  optimizer_rest=optimizer_rest,
@@ -116,7 +126,8 @@ if FOUND_TF:
116
126
  optimizer_bias=bias_optimizer,
117
127
  use_hessian_based_weights=use_hessian_based_weights,
118
128
  regularization_factor=regularization_factor,
119
- hessian_weights_config=GPTQHessianScoresConfig(hessian_batch_size=hessian_batch_size))
129
+ hessian_weights_config=GPTQHessianScoresConfig(hessian_batch_size=hessian_batch_size),
130
+ gradual_activation_quantization_config=gradual_quant_config)
120
131
 
121
132
 
122
133
  def keras_gradient_post_training_quantization(in_model: Model, representative_data_gen: Callable,
@@ -251,13 +262,13 @@ if FOUND_TF:
251
262
  else:
252
263
  # If tensorflow is not installed,
253
264
  # we raise an exception when trying to use these functions.
254
- def get_keras_gptq_config(*args, **kwargs):
265
+ def get_keras_gptq_config(*args, **kwargs): # pragma: no cover
255
266
  Logger.critical("Tensorflow must be installed with a version of 2.15 or lower to use "
256
267
  "get_keras_gptq_config. The 'tensorflow' package is missing or is "
257
268
  "installed with a version higher than 2.15.") # pragma: no cover
258
269
 
259
270
 
260
- def keras_gradient_post_training_quantization(*args, **kwargs):
271
+ def keras_gradient_post_training_quantization(*args, **kwargs): # pragma: no cover
261
272
  Logger.critical("Tensorflow must be installed with a version of 2.15 or lower to use "
262
273
  "keras_gradient_post_training_quantization. The 'tensorflow' package is missing or is "
263
- "installed with a version higher than 2.15.") # pragma: no cover
274
+ "installed with a version higher than 2.15.")
@@ -16,18 +16,18 @@ from typing import Dict, List, Tuple
16
16
 
17
17
  from model_compression_toolkit.gptq import GradientPTQConfig
18
18
  from model_compression_toolkit.core import common
19
- from model_compression_toolkit.exporter.model_wrapper.keras.builder.node_to_quantizer import \
20
- get_inferable_quantizer_kwargs
21
19
  from model_compression_toolkit.gptq.keras.quantizer.base_keras_gptq_quantizer import BaseKerasGPTQTrainableQuantizer
22
20
  from mct_quantizers import QuantizationTarget
23
- from mct_quantizers.common.get_quantizers import get_inferable_quantizer_class
24
21
  from mct_quantizers.keras.quantizers import BaseKerasInferableQuantizer
25
22
 
26
23
  from model_compression_toolkit.logger import Logger
24
+ from model_compression_toolkit.trainable_infrastructure import TrainingMethod
27
25
  from model_compression_toolkit.trainable_infrastructure.common.get_quantizer_config import \
28
- get_trainable_quantizer_weights_config
26
+ get_trainable_quantizer_weights_config, get_trainable_quantizer_activation_config
29
27
  from model_compression_toolkit.trainable_infrastructure.common.get_quantizers import \
30
28
  get_trainable_quantizer_class
29
+ from model_compression_toolkit.trainable_infrastructure.keras.activation_quantizers.base_activation_quantizer import \
30
+ BaseKerasActivationTrainableQuantizer
31
31
 
32
32
 
33
33
  def quantization_builder(n: common.BaseNode,
@@ -70,12 +70,13 @@ def quantization_builder(n: common.BaseNode,
70
70
 
71
71
  quant_method = n.final_activation_quantization_cfg.activation_quantization_method
72
72
 
73
- quantizer_class = get_inferable_quantizer_class(quant_target=QuantizationTarget.Activation,
73
+ quantizer_class = get_trainable_quantizer_class(quant_target=QuantizationTarget.Activation,
74
+ quantizer_id=TrainingMethod.STE,
74
75
  quant_method=quant_method,
75
- quantizer_base_class=BaseKerasInferableQuantizer)
76
+ quantizer_base_class=BaseKerasActivationTrainableQuantizer)
77
+ cfg = get_trainable_quantizer_activation_config(n, None)
76
78
 
77
- kwargs = get_inferable_quantizer_kwargs(n.final_activation_quantization_cfg, QuantizationTarget.Activation)
78
-
79
- activation_quantizers.append(quantizer_class(**kwargs))
79
+ # freeze_quant_params is True since in GPTQ the activation quantization parameters should not be trained.
80
+ activation_quantizers.append(quantizer_class(cfg, freeze_quant_params=True))
80
81
 
81
82
  return weights_quantizers, activation_quantizers
@@ -12,7 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
- from typing import List
15
+ from typing import List, Callable
16
16
 
17
17
  import tensorflow as tf
18
18
  from keras import Model
@@ -22,61 +22,21 @@ from model_compression_toolkit.gptq.common.gptq_graph import get_kernel_attribut
22
22
  from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
23
23
 
24
24
 
25
- class LinearTempDecay:
26
- """
27
- Annealing process for the soft quantizer regularization temperature term.
28
- """
29
-
30
- def __init__(self, t_max: int, rel_start_decay: float = 0.2, start_b: int = 20, end_b: int = 2):
31
- """
32
- Initializes a LinearTempDecay object.
33
-
34
- Args:
35
- t_max: maximal time step.
36
- rel_start_decay: Decay step size at the beginning of the process.
37
- start_b: Starting value of the regularization term.
38
- end_b: Target value of the regularization term.
39
- """
40
-
41
- self.t_max = t_max
42
- self.start_decay = rel_start_decay * t_max
43
- self.start_b = start_b
44
- self.end_b = end_b
45
-
46
- def __call__(self, t: int) -> float:
47
- """
48
- Cosine annealing scheduler for soft quantizer regularization temperature term.
49
-
50
- Args:
51
- t: The current time step.
52
-
53
- Returns: Scheduled temperature.
54
- """
55
-
56
- is_before_start_decay = tf.cast(t < self.start_decay, tf.float32)
57
-
58
- rel_t = (t - self.start_decay) / (self.t_max - self.start_decay)
59
-
60
- return self.start_b * is_before_start_decay + \
61
- (1 - is_before_start_decay) * \
62
- (self.end_b + (self.start_b - self.end_b) * tf.math.maximum(0.0, (1 - rel_t)))
63
-
64
25
 
65
26
  class SoftQuantizerRegularization:
66
27
  """
67
28
  A class to handle the computation of soft quantizer regularization for GPTQ training.
68
29
  """
69
30
 
70
- def __init__(self, total_gradient_steps: int):
31
+ def __init__(self, beta_scheduler: Callable[[int], float]):
71
32
  """
72
33
  Initializes the regularization computation object with a LinearDecay object.
73
34
 
74
35
  Args:
75
- total_gradient_steps: The number of gradient steps during optimization.
36
+ beta_scheduler: a callable that accepts current time step and returns a corresponding beta value.
76
37
  """
77
38
  # Initializing the temperature decay according to the number of expected gradient steps
78
- self.linear_decay = LinearTempDecay(total_gradient_steps)
79
-
39
+ self.beta_scheduler = beta_scheduler
80
40
  self.count_iter = tf.Variable(0.)
81
41
 
82
42
 
@@ -91,7 +51,7 @@ class SoftQuantizerRegularization:
91
51
  Returns: Regularization value.
92
52
  """
93
53
  soft_reg_aux: List[tf.Tensor] = []
94
- b = self.linear_decay(self.count_iter.value())
54
+ b = self.beta_scheduler(self.count_iter.value())
95
55
  for layer in model.layers:
96
56
  if isinstance(layer, KerasTrainableQuantizationWrapper):
97
57
  kernel_attribute = get_kernel_attribute_name_for_gptq(layer_type=type(layer.layer),
@@ -17,15 +17,18 @@ from typing import Callable, List, Tuple, Union, Generator
17
17
 
18
18
  import numpy as np
19
19
  import torch
20
- from mct_quantizers import PytorchQuantizationWrapper, PytorchActivationQuantizationHolder
21
20
  from torch.nn import Module
22
21
  from torch.utils.data import DataLoader
23
22
  from tqdm import tqdm
24
23
 
24
+ from model_compression_toolkit.gptq.common.gradual_activation_quantization import get_gradual_activation_quantizer_wrapper_factory
25
+ from model_compression_toolkit.gptq.common.regularization_factory import get_regularization
26
+
25
27
  from model_compression_toolkit.core.common import Graph, BaseNode
26
28
  from model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation
27
29
  from model_compression_toolkit.core.common.framework_info import FrameworkInfo
28
30
  from model_compression_toolkit.core.common.hessian import HessianInfoService, HessianScoresGranularity
31
+
29
32
  from model_compression_toolkit.core.pytorch.back2framework.pytorch_model_builder import PyTorchModelBuilder
30
33
  from model_compression_toolkit.core.pytorch.constants import BIAS
31
34
  from model_compression_toolkit.core.pytorch.data_util import FixedDatasetFromGenerator, IterableDatasetFromGenerator, \
@@ -34,14 +37,15 @@ from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, set_mo
34
37
  from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig
35
38
  from model_compression_toolkit.gptq.common.gptq_graph import get_kernel_attribute_name_for_gptq
36
39
  from model_compression_toolkit.gptq.common.gptq_training import GPTQTrainer
37
- from model_compression_toolkit.gptq.pytorch.graph_info import get_gptq_trainable_parameters, \
38
- get_weights_for_loss
39
- from model_compression_toolkit.gptq.pytorch.quantizer.gradual_activation_quantization import \
40
- get_gradual_activation_quantizer_wrapper_factory
40
+ from model_compression_toolkit.gptq.pytorch.graph_info import get_gptq_trainable_parameters, get_weights_for_loss
41
41
  from model_compression_toolkit.gptq.pytorch.quantizer.quantization_builder import quantization_builder
42
- from model_compression_toolkit.gptq.pytorch.quantizer.regularization_factory import get_regularization
42
+
43
+ from mct_quantizers import PytorchQuantizationWrapper, PytorchActivationQuantizationHolder
44
+ from model_compression_toolkit.trainable_infrastructure.common.util import get_total_grad_steps
45
+ from model_compression_toolkit.trainable_infrastructure.pytorch.annealing_schedulers import PytorchLinearAnnealingScheduler
46
+ from model_compression_toolkit.gptq.pytorch.quantizer.soft_rounding.soft_quantizer_reg import SoftQuantizerRegularization as PytorchSoftQuantizerRegularization
47
+
43
48
  from model_compression_toolkit.logger import Logger
44
- from model_compression_toolkit.trainable_infrastructure.pytorch.util import get_total_grad_steps
45
49
 
46
50
 
47
51
  class PytorchGPTQTrainer(GPTQTrainer):
@@ -78,7 +82,7 @@ class PytorchGPTQTrainer(GPTQTrainer):
78
82
 
79
83
  # must be set prior to model building in the base class constructor
80
84
  self.gradual_act_quantizer_wrapper_factory = get_gradual_activation_quantizer_wrapper_factory(
81
- gptq_config, _get_total_grad_steps)
85
+ gptq_config, _get_total_grad_steps, PytorchLinearAnnealingScheduler)
82
86
 
83
87
  super().__init__(graph_float,
84
88
  graph_quant,
@@ -121,7 +125,7 @@ class PytorchGPTQTrainer(GPTQTrainer):
121
125
  else:
122
126
  self.train_dataloader = self._prepare_train_dataloader_for_non_sla(representative_data_gen)
123
127
 
124
- self.reg_func = get_regularization(self.gptq_config, _get_total_grad_steps)
128
+ self.reg_func = get_regularization(self.gptq_config, _get_total_grad_steps, PytorchSoftQuantizerRegularization, PytorchLinearAnnealingScheduler)
125
129
 
126
130
  def _prepare_train_dataloader_sla(self, data_gen_fn: Callable[[], Generator]) -> DataLoader:
127
131
  """