mct-nightly 2.3.0.20250408.522__tar.gz → 2.3.0.20250410.526__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (538) hide show
  1. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/PKG-INFO +2 -1
  2. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/mct_nightly.egg-info/PKG-INFO +2 -1
  3. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/mct_nightly.egg-info/requires.txt +1 -0
  4. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/__init__.py +1 -1
  5. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/framework_implementation.py +11 -0
  6. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/fusion/fusing_info.py +4 -5
  7. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/base_graph.py +2 -1
  8. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/base_node.py +15 -19
  9. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/hessian/hessian_scores_calculator.py +4 -2
  10. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_calculator.py +2 -2
  11. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +2 -1
  12. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/debug_config.py +2 -0
  13. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +31 -6
  14. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +5 -3
  15. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/statistics_correction/compute_activation_bias_correction_of_graph.py +1 -2
  16. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +3 -2
  17. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +3 -2
  18. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/keras_implementation.py +14 -1
  19. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +1 -2
  20. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scaled_dot_product_attention.py +0 -1
  21. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +1 -2
  22. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +16 -3
  23. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/reader/reader.py +28 -7
  24. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/utils.py +2 -2
  25. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/quantization_facade.py +6 -2
  26. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +7 -2
  27. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/ptq/keras/quantization_facade.py +7 -2
  28. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +7 -2
  29. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2keras.py +2 -10
  30. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py +2 -1
  31. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/verify_packages.py +0 -1
  32. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/LICENSE.md +0 -0
  33. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/README.md +0 -0
  34. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/mct_nightly.egg-info/SOURCES.txt +0 -0
  35. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/mct_nightly.egg-info/dependency_links.txt +0 -0
  36. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/mct_nightly.egg-info/top_level.txt +0 -0
  37. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/constants.py +0 -0
  38. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/__init__.py +0 -0
  39. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/analyzer.py +0 -0
  40. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/__init__.py +0 -0
  41. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
  42. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
  43. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
  44. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
  45. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
  46. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
  47. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
  48. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
  49. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
  50. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/collectors/weighted_histogram_collector.py +0 -0
  51. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/framework_info.py +0 -0
  52. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
  53. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/fusion/graph_fuser.py +0 -0
  54. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
  55. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/edge.py +0 -0
  56. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
  57. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
  58. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
  59. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
  60. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
  61. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
  62. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
  63. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
  64. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
  65. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
  66. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
  67. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
  68. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
  69. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
  70. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/hessian/hessian_scores_request.py +0 -0
  71. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
  72. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
  73. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
  74. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
  75. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/matchers/function.py +0 -0
  76. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
  77. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
  78. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/memory_computation.py +0 -0
  79. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
  80. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
  81. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
  82. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
  83. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
  84. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py +0 -0
  85. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
  86. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_ru_helper.py +0 -0
  87. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
  88. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
  89. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
  90. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
  91. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -0
  92. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
  93. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
  94. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
  95. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
  96. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
  97. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/model_collector.py +0 -0
  98. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/model_validation.py +0 -0
  99. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
  100. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
  101. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
  102. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
  103. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
  104. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
  105. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
  106. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
  107. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
  108. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
  109. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
  110. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
  111. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
  112. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
  113. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
  114. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
  115. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
  116. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
  117. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
  118. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
  119. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
  120. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
  121. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
  122. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/bit_width_config.py +0 -0
  123. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
  124. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
  125. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
  126. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
  127. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
  128. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
  129. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
  130. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
  131. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
  132. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
  133. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
  134. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
  135. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
  136. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
  137. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
  138. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
  139. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
  140. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
  141. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
  142. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
  143. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
  144. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
  145. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
  146. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
  147. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
  148. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/statistics_correction/apply_activation_bias_correction_to_graph.py +0 -0
  149. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
  150. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
  151. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
  152. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
  153. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
  154. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
  155. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
  156. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
  157. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
  158. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
  159. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
  160. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
  161. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
  162. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
  163. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
  164. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
  165. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/user_info.py +0 -0
  166. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
  167. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
  168. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
  169. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
  170. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
  171. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/__init__.py +0 -0
  172. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
  173. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
  174. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
  175. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
  176. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
  177. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
  178. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
  179. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/constants.py +0 -0
  180. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
  181. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/data_util.py +0 -0
  182. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
  183. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
  184. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
  185. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
  186. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  187. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  188. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  189. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
  190. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +0 -0
  191. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
  192. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
  193. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  194. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
  195. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  196. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  197. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
  198. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  199. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
  200. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
  201. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  202. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/sigmoid_mul_to_swish.py +0 -0
  203. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
  204. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  205. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  206. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
  207. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +0 -0
  208. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/hessian/hessian_scores_calculator_keras.py +0 -0
  209. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/hessian/weights_hessian_scores_calculator_keras.py +0 -0
  210. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
  211. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
  212. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
  213. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
  214. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
  215. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
  216. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
  217. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
  218. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
  219. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
  220. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/reader/common.py +0 -0
  221. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
  222. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
  223. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
  224. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
  225. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
  226. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
  227. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
  228. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
  229. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
  230. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
  231. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
  232. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/statistics_correction/keras_compute_activation_bias_correction_of_graph.py +0 -0
  233. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
  234. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
  235. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
  236. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
  237. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
  238. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
  239. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
  240. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
  241. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
  242. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
  243. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
  244. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
  245. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
  246. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/constants.py +0 -0
  247. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/data_util.py +0 -0
  248. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
  249. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
  250. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
  251. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  252. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  253. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  254. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
  255. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
  256. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/convtranspose_dynamic_padding.py +0 -0
  257. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
  258. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
  259. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_linear.py +0 -0
  260. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  261. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/matmul_decomposition.py +0 -0
  262. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  263. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  264. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
  265. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
  266. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  267. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
  268. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  269. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
  270. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
  271. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  272. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  273. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
  274. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +0 -0
  275. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/hessian/hessian_scores_calculator_pytorch.py +0 -0
  276. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/hessian/weights_hessian_scores_calculator_pytorch.py +0 -0
  277. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
  278. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
  279. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
  280. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
  281. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
  282. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
  283. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
  284. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
  285. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
  286. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
  287. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
  288. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
  289. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
  290. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
  291. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
  292. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/pytorch/statistics_correction/pytorch_compute_activation_bias_correction_of_graph.py +0 -0
  293. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
  294. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/core/runner.py +0 -0
  295. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/__init__.py +0 -0
  296. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
  297. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/common/constants.py +0 -0
  298. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
  299. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
  300. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/common/enums.py +0 -0
  301. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
  302. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
  303. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
  304. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
  305. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
  306. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/keras/image_operations.py +0 -0
  307. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
  308. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
  309. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
  310. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
  311. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
  312. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
  313. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
  314. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/keras/optimization_functions/lr_scheduler.py +0 -0
  315. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
  316. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
  317. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
  318. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
  319. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
  320. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/pytorch/image_operations.py +0 -0
  321. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
  322. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
  323. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
  324. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
  325. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
  326. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
  327. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/pytorch/optimization_functions/lr_scheduler.py +0 -0
  328. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
  329. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
  330. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
  331. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
  332. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/defaultdict.py +0 -0
  333. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/__init__.py +0 -0
  334. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
  335. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
  336. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
  337. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
  338. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
  339. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
  340. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
  341. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
  342. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
  343. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
  344. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
  345. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
  346. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
  347. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
  348. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
  349. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
  350. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
  351. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
  352. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
  353. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
  354. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
  355. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
  356. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
  357. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
  358. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
  359. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
  360. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
  361. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
  362. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
  363. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
  364. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
  365. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/__init__.py +0 -0
  366. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/common/__init__.py +0 -0
  367. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/common/gptq_config.py +0 -0
  368. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
  369. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
  370. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
  371. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/common/gptq_training.py +0 -0
  372. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/common/gradual_activation_quantization.py +0 -0
  373. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/common/regularization_factory.py +0 -0
  374. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
  375. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
  376. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
  377. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
  378. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
  379. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
  380. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
  381. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
  382. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
  383. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
  384. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  385. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  386. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  387. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
  388. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  389. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
  390. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
  391. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
  392. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/gptq_training.py +0 -0
  393. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
  394. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
  395. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
  396. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
  397. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
  398. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
  399. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  400. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  401. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  402. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  403. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  404. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/gptq/runner.py +0 -0
  405. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/logger.py +0 -0
  406. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/metadata.py +0 -0
  407. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/pruning/__init__.py +0 -0
  408. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
  409. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
  410. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
  411. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
  412. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/ptq/__init__.py +0 -0
  413. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
  414. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
  415. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/ptq/runner.py +0 -0
  416. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/__init__.py +0 -0
  417. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/common/__init__.py +0 -0
  418. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/common/qat_config.py +0 -0
  419. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/keras/__init__.py +0 -0
  420. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
  421. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
  422. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_weight_quantizer.py +0 -0
  423. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
  424. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
  425. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
  426. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
  427. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
  428. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
  429. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  430. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
  431. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
  432. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
  433. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
  434. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_weight_quantizer.py +0 -0
  435. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
  436. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
  437. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
  438. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
  439. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  440. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  441. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
  442. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
  443. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
  444. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
  445. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/schema/__init__.py +0 -0
  446. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/schema/mct_current_schema.py +0 -0
  447. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/schema/schema_functions.py +0 -0
  448. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/schema/v1.py +0 -0
  449. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/schema/v2.py +0 -0
  450. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py +0 -0
  451. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2fw.py +0 -0
  452. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attribute_filter.py +0 -0
  453. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/current_tpc.py +0 -0
  454. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities.py +0 -0
  455. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities_component.py +0 -0
  456. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/layer_filter_params.py +0 -0
  457. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/operations_to_layers.py +0 -0
  458. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_io_handler.py +0 -0
  459. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
  460. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
  461. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
  462. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
  463. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
  464. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py +0 -0
  465. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
  466. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
  467. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
  468. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py +0 -0
  469. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
  470. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
  471. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
  472. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py +0 -0
  473. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
  474. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
  475. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/common/annealing_schedulers.py +0 -0
  476. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
  477. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
  478. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
  479. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
  480. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
  481. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
  482. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/common/training_method.py +0 -0
  483. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/common/util.py +0 -0
  484. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
  485. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/__init__.py +0 -0
  486. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/base_activation_quantizer.py +0 -0
  487. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/__init__.py +0 -0
  488. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py +0 -0
  489. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/uniform_lsq.py +0 -0
  490. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/__init__.py +0 -0
  491. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/symmetric_ste.py +0 -0
  492. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/ste/uniform_ste.py +0 -0
  493. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/annealing_schedulers.py +0 -0
  494. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
  495. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
  496. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
  497. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
  498. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
  499. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
  500. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py +0 -0
  501. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py +0 -0
  502. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py +0 -0
  503. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/symmetric_lsq.py +0 -0
  504. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/uniform_lsq.py +0 -0
  505. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/__init__.py +0 -0
  506. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/symmetric_ste.py +0 -0
  507. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/uniform_ste.py +0 -0
  508. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/pytorch/annealing_schedulers.py +0 -0
  509. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
  510. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py +0 -0
  511. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/__init__.py +0 -0
  512. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/common/__init__.py +0 -0
  513. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/common/constants.py +0 -0
  514. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/common/core_report_generator.py +0 -0
  515. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
  516. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/common/framework_report_utils.py +0 -0
  517. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
  518. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/common/model_folding_utils.py +0 -0
  519. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/common/similarity_calculator.py +0 -0
  520. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
  521. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/common/tensorboard_utils.py +0 -0
  522. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/common/xquant_config.py +0 -0
  523. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
  524. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
  525. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +0 -0
  526. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/keras/keras_report_utils.py +0 -0
  527. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
  528. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
  529. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +0 -0
  530. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
  531. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
  532. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +0 -0
  533. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
  534. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +0 -0
  535. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
  536. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +0 -0
  537. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/setup.cfg +0 -0
  538. {mct_nightly-2.3.0.20250408.522 → mct_nightly-2.3.0.20250410.526}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mct-nightly
3
- Version: 2.3.0.20250408.522
3
+ Version: 2.3.0.20250410.526
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: License :: OSI Approved :: Apache Software License
@@ -22,6 +22,7 @@ Requires-Dist: scipy
22
22
  Requires-Dist: protobuf
23
23
  Requires-Dist: mct-quantizers-nightly
24
24
  Requires-Dist: pydantic<2.0
25
+ Requires-Dist: sony-custom-layers-dev==0.4.0.dev6
25
26
  Dynamic: classifier
26
27
  Dynamic: description
27
28
  Dynamic: description-content-type
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mct-nightly
3
- Version: 2.3.0.20250408.522
3
+ Version: 2.3.0.20250410.526
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: License :: OSI Approved :: Apache Software License
@@ -22,6 +22,7 @@ Requires-Dist: scipy
22
22
  Requires-Dist: protobuf
23
23
  Requires-Dist: mct-quantizers-nightly
24
24
  Requires-Dist: pydantic<2.0
25
+ Requires-Dist: sony-custom-layers-dev==0.4.0.dev6
25
26
  Dynamic: classifier
26
27
  Dynamic: description
27
28
  Dynamic: description-content-type
@@ -11,3 +11,4 @@ scipy
11
11
  protobuf
12
12
  mct-quantizers-nightly
13
13
  pydantic<2.0
14
+ sony-custom-layers-dev==0.4.0.dev6
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
27
27
  from model_compression_toolkit import pruning
28
28
  from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
29
29
 
30
- __version__ = "2.3.0.20250408.000522"
30
+ __version__ = "2.3.0.20250410.000526"
@@ -93,6 +93,17 @@ class FrameworkImplementation(ABC):
93
93
  raise NotImplementedError(f'{self.__class__.__name__} has to implement the '
94
94
  f'framework\'s to_tensor method.') # pragma: no cover
95
95
 
96
+ @abstractmethod
97
+ def is_tuple_of_tensors(self, obj: Any) -> bool:
98
+ """
99
+ Check if a given object if a tuple of tensors
100
+ :param obj: Object to check its type
101
+ :return: True if obj is a tuple of tensors, False otherwise
102
+ """
103
+ raise NotImplementedError(f'{self.__class__.__name__} has to implement the '
104
+ f'framework\'s is_tuple_of_tensors method.') # pragma: no cover
105
+
106
+
96
107
  @abstractmethod
97
108
  def model_reader(self,
98
109
  model: Any,
@@ -150,7 +150,6 @@ class FusingInfo:
150
150
  """
151
151
  return self.fusing_data
152
152
 
153
-
154
153
  @staticmethod
155
154
  def generate_fused_op_id(nodes: List['BaseNode']) -> str:
156
155
  """
@@ -166,7 +165,7 @@ class FusingInfo:
166
165
  id = FUSED_OP_ID_PREFIX + '_'.join([node.name for node in nodes])
167
166
  return id
168
167
 
169
- def validate(self, graph) -> None:
168
+ def validate(self, graph: 'Graph') -> None:
170
169
  """
171
170
  Validate that the fusing information is consistent with the given graph and generation logic.
172
171
 
@@ -267,7 +266,7 @@ class FusingInfoGenerator:
267
266
  def __init__(self, fusing_patterns):
268
267
  self._fusing_patterns = fusing_patterns
269
268
 
270
- def generate_fusing_info(self, graph) -> FusingInfo:
269
+ def generate_fusing_info(self, graph: 'Graph') -> FusingInfo:
271
270
  """
272
271
  Generate fusing information based on the graph and fusing patterns.
273
272
 
@@ -289,7 +288,7 @@ class FusingInfoGenerator:
289
288
  return FusingInfo(fusing_patterns=self._fusing_patterns)
290
289
 
291
290
  # Find max fusion
292
- max_layers_fusing = 0 if len(self._fusing_patterns) == 0 else max([len(fusing_pattern) for fusing_pattern in self._fusing_patterns])
291
+ max_layers_fusing = max([len(fusing_pattern) for fusing_pattern in self._fusing_patterns])
293
292
 
294
293
  # Travel along the graph to find layers for fusing
295
294
  nodes = graph.get_topo_sorted_nodes()
@@ -331,7 +330,7 @@ def get_valid_fusing_patterns_for_node(fusing_patterns: List[List[Any]],
331
330
  Returns only the fusing patterns where a specific layer (at index idx) matches the given node — either by type or filter params.
332
331
 
333
332
  Args:
334
- fusing_patterns: supported fusings
333
+ fusing_patterns: supported fusing patterns
335
334
  node: node to decide if it can be a part of fusion
336
335
  idx: index of layer in the fusion
337
336
 
@@ -33,6 +33,7 @@ from model_compression_toolkit.core.common.collectors.statistics_collector impor
33
33
  from model_compression_toolkit.core.common.collectors.statistics_collector import scale_statistics, shift_statistics
34
34
  from model_compression_toolkit.core.common.pruning.pruning_section import PruningSection
35
35
  from model_compression_toolkit.core.common.user_info import UserInformation
36
+ from model_compression_toolkit.core.common.quantization.node_quantization_config import ActivationQuantizationMode
36
37
  from model_compression_toolkit.logger import Logger
37
38
  from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
38
39
  from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
@@ -920,7 +921,7 @@ class Graph(nx.MultiDiGraph, GraphSearches):
920
921
  nodes_to_disable = [node for nodes in self.fusing_info.get_all_fused_operations().values() for node in nodes[:-1]]
921
922
  for node in nodes_to_disable:
922
923
  for qc in node.candidates_quantization_cfg:
923
- qc.activation_quantization_cfg.enable_activation_quantization = False
924
+ qc.activation_quantization_cfg.quant_mode = ActivationQuantizationMode.FLN_QUANT
924
925
 
925
926
  def validate(self):
926
927
  """
@@ -20,7 +20,8 @@ import numpy as np
20
20
 
21
21
  from model_compression_toolkit.constants import WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE, \
22
22
  ACTIVATION_N_BITS_ATTRIBUTE, FP32_BYTES_PER_PARAMETER
23
- from model_compression_toolkit.core.common.quantization.node_quantization_config import WeightsAttrQuantizationConfig
23
+ from model_compression_toolkit.core.common.quantization.node_quantization_config import WeightsAttrQuantizationConfig, \
24
+ ActivationQuantizationMode
24
25
  from model_compression_toolkit.logger import Logger
25
26
  from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import QuantizationConfigOptions, \
26
27
  OpQuantizationConfig
@@ -116,33 +117,28 @@ class BaseNode:
116
117
  """
117
118
  return any(isinstance(key, int) for key in self.weights.keys())
118
119
 
120
+ def _is_single_quant_mode(self, q_mode: ActivationQuantizationMode) -> bool:
121
+ """ Check whether all candidates have the same unique quantization mode, and if it is 'q_mode'. """
122
+
123
+ if self.final_activation_quantization_cfg:
124
+ # if we have a final configuration, then we only care to check if it enables activation quantization.
125
+ return self.final_activation_quantization_cfg.quant_mode == q_mode
126
+
127
+ q_modes = {qc.activation_quantization_cfg.quant_mode for qc in self.candidates_quantization_cfg}
128
+ assert len(q_modes) == 1
129
+ return q_modes.pop() == q_mode
130
+
119
131
  def is_activation_quantization_enabled(self) -> bool:
120
132
  """
121
-
122
133
  Returns: Whether node activation quantization is enabled or not.
123
-
124
134
  """
125
- if self.final_activation_quantization_cfg:
126
- # if we have a final configuration, then we only care to check if it enables activation quantization
127
- return self.final_activation_quantization_cfg.enable_activation_quantization
128
-
129
- for qc in self.candidates_quantization_cfg:
130
- assert self.candidates_quantization_cfg[0].activation_quantization_cfg.enable_activation_quantization == \
131
- qc.activation_quantization_cfg.enable_activation_quantization
132
- return self.candidates_quantization_cfg[0].activation_quantization_cfg.enable_activation_quantization
135
+ return self._is_single_quant_mode(ActivationQuantizationMode.QUANT)
133
136
 
134
137
  def is_quantization_preserving(self) -> bool:
135
138
  """
136
139
  Returns: Whether node activation quantization information is preserved from its inputs.
137
140
  """
138
- if self.final_activation_quantization_cfg:
139
- # if we have a final configuration, then we only care to check if it enables activation quantization.
140
- return self.final_activation_quantization_cfg.quantization_preserving
141
-
142
- for qc in self.candidates_quantization_cfg:
143
- assert self.candidates_quantization_cfg[0].activation_quantization_cfg.quantization_preserving == \
144
- qc.activation_quantization_cfg.quantization_preserving
145
- return self.candidates_quantization_cfg[0].activation_quantization_cfg.quantization_preserving
141
+ return self._is_single_quant_mode(ActivationQuantizationMode.PRESERVE_QUANT)
146
142
 
147
143
  def is_weights_quantization_enabled(self, attr_name: str) -> bool:
148
144
  """
@@ -72,8 +72,7 @@ class HessianScoresCalculator(ABC):
72
72
  """
73
73
  raise NotImplemented(f'{self.__class__.__name__} have to implement compute method.') # pragma: no cover
74
74
 
75
- @staticmethod
76
- def unfold_tensors_list(tensors_to_unfold: Any) -> List[Any]:
75
+ def unfold_tensors_list(self, tensors_to_unfold: Any) -> List[Any]:
77
76
  """
78
77
  Unfold (flatten) a nested tensors list.
79
78
  Given a mixed list of single tensors and nested tensor lists,
@@ -85,6 +84,9 @@ class HessianScoresCalculator(ABC):
85
84
  """
86
85
  unfold_tensors = []
87
86
  for tensor in tensors_to_unfold:
87
+ if self.fw_impl.is_tuple_of_tensors(tensor):
88
+ tensor = list(tensor) # converts named tuple to list
89
+
88
90
  if isinstance(tensor, List):
89
91
  unfold_tensors += tensor
90
92
  else:
@@ -31,7 +31,7 @@ from model_compression_toolkit.core.common.graph.virtual_activation_weights_node
31
31
  from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import \
32
32
  RUTarget, ResourceUtilization
33
33
  from model_compression_toolkit.core.common.quantization.node_quantization_config import NodeWeightsQuantizationConfig, \
34
- NodeActivationQuantizationConfig, BaseNodeQuantizationConfig
34
+ NodeActivationQuantizationConfig, BaseNodeQuantizationConfig, ActivationQuantizationMode
35
35
  from model_compression_toolkit.core.common.substitutions.virtual_activation_weights_composition import \
36
36
  get_input_activation_if_composable
37
37
 
@@ -710,7 +710,7 @@ class ResourceUtilizationCalculator:
710
710
  """
711
711
  if act_qc:
712
712
  assert bitwidth_mode == BitwidthMode.QCustom
713
- return act_qc.activation_n_bits if act_qc.enable_activation_quantization else FLOAT_BITWIDTH
713
+ return act_qc.activation_n_bits if act_qc.quant_mode == ActivationQuantizationMode.QUANT else FLOAT_BITWIDTH
714
714
 
715
715
  if bitwidth_mode == BitwidthMode.Float or not (n.is_activation_quantization_enabled() or
716
716
  n.is_quantization_preserving()):
@@ -20,6 +20,7 @@ from typing import Callable, Any, List, Tuple
20
20
  from model_compression_toolkit.constants import AXIS
21
21
  from model_compression_toolkit.core import FrameworkInfo, MixedPrecisionQuantizationConfig
22
22
  from model_compression_toolkit.core.common import Graph, BaseNode
23
+ from model_compression_toolkit.core.common.quantization.node_quantization_config import ActivationQuantizationMode
23
24
  from model_compression_toolkit.core.common.graph.functional_node import FunctionalNode
24
25
  from model_compression_toolkit.core.common.similarity_analyzer import compute_kl_divergence
25
26
  from model_compression_toolkit.core.common.model_builder_mode import ModelBuilderMode
@@ -207,7 +208,7 @@ class SensitivityEvaluation:
207
208
  if self.disable_activation_for_metric:
208
209
  for n in evaluation_graph.get_topo_sorted_nodes():
209
210
  for c in n.candidates_quantization_cfg:
210
- c.activation_quantization_cfg.enable_activation_quantization = False
211
+ c.activation_quantization_cfg.quant_mode = ActivationQuantizationMode.NO_QUANT
211
212
 
212
213
  model_mp, _, conf_node2layers = self.fw_impl.model_builder(evaluation_graph,
213
214
  mode=ModelBuilderMode.MIXEDPRECISION,
@@ -29,8 +29,10 @@ class DebugConfig:
29
29
  enabled) or not. Can be used to pinpoint problematic layers in the quantization process.
30
30
  network_editor (List[EditRule]): A list of rules and actions to edit the network for quantization.
31
31
  simulate_scheduler (bool): Simulate scheduler behavior to compute operators' order and cuts.
32
+ bypass (bool): A flag to enable MCT bypass, which skips MCT runner and returns the input model unchanged.
32
33
  """
33
34
 
34
35
  analyze_similarity: bool = False
35
36
  network_editor: List[EditRule] = field(default_factory=list)
36
37
  simulate_scheduler: bool = False
38
+ bypass: bool = False
@@ -15,7 +15,7 @@
15
15
 
16
16
 
17
17
  from typing import Callable, Any, List, Tuple, Union, Dict, TYPE_CHECKING
18
-
18
+ from enum import Enum, auto
19
19
  import numpy as np
20
20
 
21
21
  from model_compression_toolkit.core.common.quantization.quantization_fn_selection import get_weights_quantization_fn
@@ -40,6 +40,14 @@ if TYPE_CHECKING:
40
40
  ##########################################
41
41
 
42
42
 
43
+ class ActivationQuantizationMode(Enum):
44
+ """ An enum defining the output activation quantization mode of a node. """
45
+ QUANT = auto()
46
+ FLN_QUANT = auto()
47
+ PRESERVE_QUANT = auto()
48
+ NO_QUANT = auto()
49
+
50
+
43
51
  class BaseNodeQuantizationConfig(object):
44
52
  """
45
53
  Base class for node quantization configuration
@@ -100,8 +108,14 @@ class NodeActivationQuantizationConfig(BaseNodeQuantizationConfig):
100
108
  self.activation_n_bits = op_cfg.activation_n_bits
101
109
  self.relu_bound_to_power_of_2 = qc.relu_bound_to_power_of_2
102
110
  self.activation_bias_correction_term = None
103
- self.enable_activation_quantization = op_cfg.enable_activation_quantization
104
- self.quantization_preserving = op_cfg.quantization_preserving
111
+ if op_cfg.enable_activation_quantization and op_cfg.quantization_preserving:
112
+ raise ValueError("An OpQuantizationConfig can't have both enable_activation_quantization and quantization_preserving enabled.")
113
+ if op_cfg.enable_activation_quantization:
114
+ self.quant_mode = ActivationQuantizationMode.QUANT
115
+ elif op_cfg.quantization_preserving:
116
+ self.quant_mode = ActivationQuantizationMode.PRESERVE_QUANT
117
+ else:
118
+ self.quant_mode = ActivationQuantizationMode.NO_QUANT
105
119
  self.signedness = op_cfg.signedness
106
120
  self.activation_channel_equalization = qc.activation_channel_equalization
107
121
  self.input_scaling = qc.input_scaling
@@ -113,6 +127,17 @@ class NodeActivationQuantizationConfig(BaseNodeQuantizationConfig):
113
127
  self.shift_negative_threshold_recalculation = qc.shift_negative_threshold_recalculation
114
128
  self.concat_threshold_update = qc.concat_threshold_update
115
129
 
130
+ @property
131
+ def enable_activation_quantization(self):
132
+ return self.quant_mode == ActivationQuantizationMode.QUANT
133
+
134
+ @property
135
+ def quantization_preserving(self):
136
+ return self.quant_mode == ActivationQuantizationMode.PRESERVE_QUANT
137
+
138
+ def fln_quantization(self):
139
+ return self.quant_mode == ActivationQuantizationMode.FLN_QUANT
140
+
116
141
  def quantize_node_output(self,
117
142
  tensors: Any) -> Any:
118
143
  """
@@ -181,7 +206,7 @@ class NodeActivationQuantizationConfig(BaseNodeQuantizationConfig):
181
206
  activation_params: Dictionary that contains weight quantization params.
182
207
 
183
208
  """
184
- assert self.enable_activation_quantization
209
+ assert self.quant_mode == ActivationQuantizationMode.QUANT
185
210
  for param_name, param_value in activation_params.items():
186
211
  self.activation_quantization_params[param_name] = param_value
187
212
 
@@ -203,7 +228,7 @@ class NodeActivationQuantizationConfig(BaseNodeQuantizationConfig):
203
228
  self.activation_error_method == other.activation_error_method and \
204
229
  self.activation_quantization_method == other.activation_quantization_method and \
205
230
  self.activation_n_bits == other.activation_n_bits and \
206
- self.enable_activation_quantization == other.enable_activation_quantization and \
231
+ self.quant_mode == other.quant_mode and \
207
232
  self.activation_channel_equalization == other.activation_channel_equalization and \
208
233
  self.input_scaling == other.input_scaling and \
209
234
  self.min_threshold == other.min_threshold and \
@@ -219,7 +244,7 @@ class NodeActivationQuantizationConfig(BaseNodeQuantizationConfig):
219
244
  self.activation_error_method,
220
245
  self.activation_quantization_method,
221
246
  self.activation_n_bits,
222
- self.enable_activation_quantization,
247
+ self.quant_mode,
223
248
  self.activation_channel_equalization,
224
249
  self.input_scaling,
225
250
  self.min_threshold,
@@ -25,7 +25,8 @@ from model_compression_toolkit.core.common.framework_info import FrameworkInfo
25
25
  from model_compression_toolkit.core.common.graph.base_graph import Graph
26
26
  from model_compression_toolkit.core.common.quantization.candidate_node_quantization_config import \
27
27
  CandidateNodeQuantizationConfig
28
- from model_compression_toolkit.core.common.quantization.node_quantization_config import NodeActivationQuantizationConfig
28
+ from model_compression_toolkit.core.common.quantization.node_quantization_config import NodeActivationQuantizationConfig, \
29
+ ActivationQuantizationMode
29
30
  from model_compression_toolkit.core.common.quantization.quantization_config import QuantizationConfig, \
30
31
  QuantizationErrorMethod
31
32
  from model_compression_toolkit.core.common.quantization.quantization_params_fn_selection import \
@@ -191,8 +192,9 @@ def set_quantization_configs_to_node(node: BaseNode,
191
192
  node.sort_node_candidates(fw_info)
192
193
 
193
194
  for candidate_qc in node.candidates_quantization_cfg:
194
- candidate_qc.activation_quantization_cfg.enable_activation_quantization = \
195
- candidate_qc.activation_quantization_cfg.enable_activation_quantization and node.get_has_activation()
195
+ if candidate_qc.activation_quantization_cfg.quant_mode == ActivationQuantizationMode.QUANT and \
196
+ not node.get_has_activation():
197
+ candidate_qc.activation_quantization_cfg.quant_mode = ActivationQuantizationMode.NO_QUANT
196
198
 
197
199
 
198
200
  def create_node_activation_qc(qc: QuantizationConfig,
@@ -45,8 +45,7 @@ def get_previous_node_with_activation_quantization(linear_node: BaseNode,
45
45
  activation_quantization_config = prev_node.final_activation_quantization_cfg
46
46
 
47
47
  # Search for node with activation quantization
48
- if (activation_quantization_config.enable_activation_quantization and
49
- not activation_quantization_config.quantization_preserving):
48
+ if activation_quantization_config.enable_activation_quantization:
50
49
  return prev_node
51
50
  else:
52
51
  return get_previous_node_with_activation_quantization(prev_node, graph)
@@ -22,7 +22,8 @@ import numpy as np
22
22
  from model_compression_toolkit.core.common import Graph
23
23
  from model_compression_toolkit.core.common.quantization.quantization_config import QuantizationConfig
24
24
  from model_compression_toolkit.core import common
25
- from model_compression_toolkit.core.common.quantization.node_quantization_config import WeightsAttrQuantizationConfig
25
+ from model_compression_toolkit.core.common.quantization.node_quantization_config import WeightsAttrQuantizationConfig, \
26
+ ActivationQuantizationMode
26
27
  from model_compression_toolkit.logger import Logger
27
28
  from model_compression_toolkit.core.common.graph.base_node import BaseNode
28
29
  from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher
@@ -127,7 +128,7 @@ class BatchNormalizationReconstruction(common.BaseSubstitution):
127
128
  bn_node.candidates_quantization_cfg = copy.deepcopy(source_node.candidates_quantization_cfg)
128
129
 
129
130
  for qc in bn_node.candidates_quantization_cfg:
130
- qc.activation_quantization_cfg.enable_activation_quantization = False
131
+ qc.activation_quantization_cfg.quant_mode = ActivationQuantizationMode.NO_QUANT
131
132
  for attr in bn_node.get_node_weights_attributes():
132
133
  if qc.weights_quantization_cfg.has_attribute_config(attr):
133
134
  # we only create a BN layer to collect statistics, so we don't need to quantize anything,
@@ -17,7 +17,8 @@ import numpy as np
17
17
  from typing import List, Tuple, Any, Callable
18
18
 
19
19
  from model_compression_toolkit.core.common.quantization.quantization_config import QuantizationConfig
20
- from model_compression_toolkit.core.common.quantization.node_quantization_config import WeightsAttrQuantizationConfig
20
+ from model_compression_toolkit.core.common.quantization.node_quantization_config import WeightsAttrQuantizationConfig, \
21
+ ActivationQuantizationMode
21
22
  from model_compression_toolkit.logger import Logger
22
23
  from model_compression_toolkit.core.common import FrameworkInfo, Graph, BaseNode
23
24
  from model_compression_toolkit.constants import THRESHOLD, SIGNED, SHIFT_NEGATIVE_NON_LINEAR_NUM_BITS
@@ -363,7 +364,7 @@ def shift_negative_function(graph: Graph,
363
364
  mixed_precision_enable=core_config.is_mixed_precision_enabled)
364
365
 
365
366
  for candidate_qc in pad_node.candidates_quantization_cfg:
366
- candidate_qc.activation_quantization_cfg.enable_activation_quantization = False
367
+ candidate_qc.activation_quantization_cfg.quant_mode = ActivationQuantizationMode.NO_QUANT
367
368
  for attr in pad_node.get_node_weights_attributes():
368
369
  candidate_qc.weights_quantization_cfg.get_attr_config(attr).enable_weights_quantization = False
369
370
 
@@ -159,6 +159,19 @@ class KerasImplementation(FrameworkImplementation):
159
159
  """
160
160
  return to_tf_tensor(tensor)
161
161
 
162
+ def is_tuple_of_tensors(self, obj: Any) -> bool:
163
+ """
164
+ Check if a given object if a tuple of tensors
165
+ :param obj: Object to check its type
166
+ :return: True if obj is a tuple of tensors, False otherwise
167
+ """
168
+ if not isinstance(obj, tuple):
169
+ return False
170
+ for item in obj:
171
+ if not isinstance(item, tf.Tensor):
172
+ return False
173
+ return True
174
+
162
175
  def model_builder(self,
163
176
  graph: Graph,
164
177
  mode: ModelBuilderMode,
@@ -454,7 +467,7 @@ class KerasImplementation(FrameworkImplementation):
454
467
  return True
455
468
 
456
469
  return any([node.is_match_type(_type) for _type in [Conv2D, DepthwiseConv2D, Conv2DTranspose, Dense,
457
- Concatenate, tf.concat, Add, tf.add]])
470
+ Concatenate, tf.concat, Add, tf.add, tf.stack]])
458
471
 
459
472
  def get_mp_node_distance_fn(self, n: BaseNode,
460
473
  compute_distance_fn: Callable = None,
@@ -64,8 +64,7 @@ class ConfigurableActivationQuantizer(BaseKerasInferableQuantizer):
64
64
  verify_candidates_descending_order(self.node_q_cfg, kernel_attr)
65
65
 
66
66
  for qc in node_q_cfg:
67
- if qc.activation_quantization_cfg.enable_activation_quantization != \
68
- node_q_cfg[0].activation_quantization_cfg.enable_activation_quantization:
67
+ if qc.activation_quantization_cfg.quant_mode != node_q_cfg[0].activation_quantization_cfg.quant_mode:
69
68
  Logger.critical("Unsupported configuration: Mixing candidates with differing activation quantization states (enabled/disabled).") # pragma: no cover
70
69
 
71
70
  self.activation_quantizers = init_activation_quantizers(self.node_q_cfg)
@@ -198,7 +198,6 @@ class ScaledDotProductDecomposition(BaseSubstitution):
198
198
  :param attention_node: the node to replace
199
199
  :return: A graph after the substitution
200
200
  """
201
- print("In scale_dot_product_attention substitution@@@@@@@@")
202
201
  input_nodes = self._get_attention_input_nodes(graph, attention_node)
203
202
  q_node, k_node, v_node = input_nodes["q"], input_nodes["k"], input_nodes["v"]
204
203
  transpose_k_node = self._get_transpose_k_node(attention_node.name, k_node)
@@ -63,8 +63,7 @@ class ConfigurableActivationQuantizer(BasePyTorchInferableQuantizer):
63
63
  verify_candidates_descending_order(self.node_q_cfg, kernel_attr)
64
64
 
65
65
  for qc in self.node_q_cfg:
66
- if qc.activation_quantization_cfg.enable_activation_quantization != \
67
- self.node_q_cfg[0].activation_quantization_cfg.enable_activation_quantization:
66
+ if qc.activation_quantization_cfg.quant_mode != self.node_q_cfg[0].activation_quantization_cfg.quant_mode:
68
67
  Logger.critical("Unsupported configuration: Mixing candidates with differing activation quantization states (enabled/disabled).") # pragma: no cover
69
68
 
70
69
  # Setting layer's activation
@@ -15,12 +15,12 @@
15
15
  import operator
16
16
  from copy import deepcopy
17
17
  from functools import partial
18
- from typing import List, Any, Tuple, Callable, Type, Dict, Generator
18
+ from typing import List, Any, Tuple, Callable, Generator
19
19
 
20
20
  import numpy as np
21
21
  import torch
22
22
  from mct_quantizers import PytorchQuantizationWrapper, PytorchActivationQuantizationHolder
23
- from torch import sigmoid, softmax, add, cat, argmax, concat, concatenate
23
+ from torch import sigmoid, softmax, add, cat, argmax, concat, concatenate, stack
24
24
  from torch.nn import Conv2d, ConvTranspose2d, Linear
25
25
  from torch.nn import Module, Sigmoid, Softmax
26
26
 
@@ -144,6 +144,19 @@ class PytorchImplementation(FrameworkImplementation):
144
144
  """
145
145
  return to_torch_tensor(tensor)
146
146
 
147
+ def is_tuple_of_tensors(self, obj: Any) -> bool:
148
+ """
149
+ Check if a given object if a tuple of tensors
150
+ :param obj: Object to check its type
151
+ :return: True if obj is a tuple of tensors, False otherwise
152
+ """
153
+ if not isinstance(obj, tuple):
154
+ return False
155
+ for item in obj:
156
+ if not isinstance(item, torch.Tensor):
157
+ return False
158
+ return True
159
+
147
160
  def model_reader(self,
148
161
  module: Module,
149
162
  representative_data_gen: Callable) -> Graph:
@@ -449,7 +462,7 @@ class PytorchImplementation(FrameworkImplementation):
449
462
 
450
463
  return any(node.is_match_type(_type) for _type in [Conv2d, Linear, ConvTranspose2d, Sigmoid, sigmoid, Softmax,
451
464
  softmax, operator.add, add, cat, concat, concatenate,
452
- operator.concat])
465
+ operator.concat, stack])
453
466
 
454
467
  def get_mp_node_distance_fn(self, n: BaseNode,
455
468
  compute_distance_fn: Callable = None,
@@ -13,19 +13,40 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
 
16
-
17
- import logging
18
- from typing import Callable, Dict
19
-
20
- import numpy as np
21
16
  import torch
22
- from torch.fx import symbolic_trace
17
+ import logging
18
+ from typing import Callable, Dict, Union, Any
23
19
  from torch.fx.passes.shape_prop import ShapeProp
20
+ from torch.fx import Tracer, GraphModule, symbolic_trace
24
21
 
25
22
  from model_compression_toolkit.logger import Logger
26
23
  from model_compression_toolkit.core.common import Graph
27
24
  from model_compression_toolkit.core.pytorch.reader.graph_builders import edges_builder, nodes_builder
28
25
  from model_compression_toolkit.core.pytorch.utils import set_model
26
+ from sony_custom_layers.pytorch import CustomLayer
27
+
28
+
29
+ def _trace_model(root: Union[torch.nn.Module, Callable[..., Any]]) -> GraphModule:
30
+ """
31
+ Given an ``nn.Module`` or function instance ``root``, this function will return a ``GraphModule``
32
+ constructed by recording operations seen while tracing through ``root``.
33
+ This function replaces torch.fx.symbolic_trace in order to handle custom layers tracing - treating them as graph
34
+ leafs.
35
+ :param root: Module or function to be traced and converted into a Graph representation.
36
+ :return: GraphModule: a Module created from the recorded operations from ``root``.
37
+ """
38
+
39
+ class MCTTracer(Tracer):
40
+ def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
41
+ if isinstance(m, CustomLayer):
42
+ return True
43
+ return super().is_leaf_module(m, module_qualified_name)
44
+
45
+ tracer = MCTTracer()
46
+ graph = tracer.trace(root)
47
+ # handling the possibility that the model (root) might be a torch.nn.Module or a function
48
+ model_name = (root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__)
49
+ return GraphModule(tracer.root, graph, model_name)
29
50
 
30
51
 
31
52
  def generate_module_dict(model: torch.nn.Module) -> Dict:
@@ -87,7 +108,7 @@ def fx_graph_module_generation(pytorch_model: torch.nn.Module,
87
108
  set_model(pytorch_model)
88
109
 
89
110
  try:
90
- symbolic_traced = symbolic_trace(pytorch_model)
111
+ symbolic_traced = _trace_model(pytorch_model)
91
112
  except torch.fx.proxy.TraceError as e:
92
113
  Logger.critical(f'Error parsing model with torch.fx\n'
93
114
  f'fx error: {e}')
@@ -15,7 +15,7 @@
15
15
  import torch
16
16
  from torch import Tensor
17
17
  import numpy as np
18
- from typing import Union, Sequence, Optional, List, Tuple
18
+ from typing import Union, Optional, List, Tuple, Any
19
19
 
20
20
  from model_compression_toolkit.core.pytorch.constants import MAX_FLOAT16, MIN_FLOAT16
21
21
  from model_compression_toolkit.core.pytorch.pytorch_device_config import get_working_device
@@ -112,4 +112,4 @@ def clip_inf_values_float16(tensor: Tensor) -> Tensor:
112
112
  # Replace inf values with max float16 value
113
113
  tensor[inf_mask] = MAX_FLOAT16 * torch.sign(tensor[inf_mask])
114
114
 
115
- return tensor
115
+ return tensor
@@ -14,7 +14,7 @@
14
14
  # ==============================================================================
15
15
  import copy
16
16
 
17
- from typing import Callable, Tuple, Union
17
+ from typing import Callable, Tuple, Union, Optional
18
18
  from packaging import version
19
19
 
20
20
  from model_compression_toolkit.core.common.visualization.tensorboard_writer import init_tensorboard_writer
@@ -158,7 +158,7 @@ if FOUND_TF:
158
158
  target_resource_utilization: ResourceUtilization = None,
159
159
  core_config: CoreConfig = CoreConfig(),
160
160
  target_platform_capabilities: Union[TargetPlatformCapabilities, str]
161
- = DEFAULT_KERAS_TPC) -> Tuple[Model, UserInformation]:
161
+ = DEFAULT_KERAS_TPC) -> Tuple[Model, Optional[UserInformation]]:
162
162
  """
163
163
  Quantize a trained Keras model using post-training quantization. The model is quantized using a
164
164
  symmetric constraint quantization thresholds (power of two).
@@ -230,6 +230,10 @@ if FOUND_TF:
230
230
  >>> quantized_model, quantization_info = mct.gptq.keras_gradient_post_training_quantization(model, repr_datagen, gptq_config, target_resource_utilization=ru, core_config=config)
231
231
 
232
232
  """
233
+
234
+ if core_config.debug_config.bypass:
235
+ return in_model, None
236
+
233
237
  KerasModelValidation(model=in_model,
234
238
  fw_info=DEFAULT_KERAS_INFO).validate()
235
239