mct-nightly 2.2.0.20240916.525__tar.gz → 2.2.0.20240918.448__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (564) hide show
  1. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/PKG-INFO +1 -1
  2. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/mct_nightly.egg-info/PKG-INFO +1 -1
  3. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/mct_nightly.egg-info/SOURCES.txt +11 -1
  4. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/mct_nightly.egg-info/top_level.txt +1 -0
  5. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/__init__.py +1 -1
  6. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/base_node.py +3 -0
  7. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/functional_node.py +1 -1
  8. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +1 -1
  9. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/reader/node_builder.py +23 -1
  10. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +5 -1
  11. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +13 -4
  12. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +12 -3
  13. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +10 -1
  14. mct-nightly-2.2.0.20240918.448/model_compression_toolkit/gptq/__init__.py +32 -0
  15. mct-nightly-2.2.0.20240918.448/model_compression_toolkit/gptq/common/gptq_config.py +135 -0
  16. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/gptq_training.py +18 -9
  17. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +49 -29
  18. mct-nightly-2.2.0.20240918.448/model_compression_toolkit/gptq/pytorch/quantizer/gradual_activation_quantization.py +80 -0
  19. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +10 -10
  20. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +6 -49
  21. mct-nightly-2.2.0.20240918.448/model_compression_toolkit/trainable_infrastructure/pytorch/annealing_schedulers.py +39 -0
  22. mct-nightly-2.2.0.20240918.448/model_compression_toolkit/trainable_infrastructure/pytorch/util.py +29 -0
  23. mct-nightly-2.2.0.20240918.448/tests_pytest/__init__.py +14 -0
  24. mct-nightly-2.2.0.20240918.448/tests_pytest/pytorch/__init__.py +14 -0
  25. mct-nightly-2.2.0.20240918.448/tests_pytest/pytorch/gptq/__init__.py +14 -0
  26. mct-nightly-2.2.0.20240918.448/tests_pytest/pytorch/gptq/test_annealing_cfg.py +40 -0
  27. mct-nightly-2.2.0.20240918.448/tests_pytest/pytorch/gptq/test_gradual_act_quantization.py +100 -0
  28. mct-nightly-2.2.0.20240918.448/tests_pytest/pytorch/trainable_infrastructure/__init__.py +14 -0
  29. mct-nightly-2.2.0.20240918.448/tests_pytest/pytorch/trainable_infrastructure/test_linear_annealing.py +49 -0
  30. mct-nightly-2.2.0.20240916.525/model_compression_toolkit/gptq/__init__.py +0 -20
  31. mct-nightly-2.2.0.20240916.525/model_compression_toolkit/gptq/common/gptq_config.py +0 -122
  32. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/LICENSE.md +0 -0
  33. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/README.md +0 -0
  34. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/mct_nightly.egg-info/dependency_links.txt +0 -0
  35. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/mct_nightly.egg-info/requires.txt +0 -0
  36. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/constants.py +0 -0
  37. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/__init__.py +0 -0
  38. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/analyzer.py +0 -0
  39. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/__init__.py +0 -0
  40. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
  41. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
  42. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
  43. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
  44. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
  45. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
  46. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
  47. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
  48. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
  49. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
  50. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/framework_info.py +0 -0
  51. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
  52. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/fusion/graph_fuser.py +0 -0
  53. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
  54. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
  55. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
  56. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/edge.py +0 -0
  57. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
  58. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
  59. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
  60. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
  61. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
  62. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
  63. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
  64. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
  65. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
  66. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
  67. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
  68. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
  69. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
  70. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/hessian/hessian_scores_calculator.py +0 -0
  71. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/hessian/hessian_scores_request.py +0 -0
  72. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
  73. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
  74. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
  75. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
  76. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/matchers/function.py +0 -0
  77. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
  78. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
  79. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/memory_computation.py +0 -0
  80. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
  81. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
  82. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
  83. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
  84. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
  85. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py +0 -0
  86. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
  87. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
  88. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
  89. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
  90. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
  91. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -0
  92. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
  93. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
  94. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
  95. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
  96. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
  97. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
  98. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
  99. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
  100. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
  101. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/model_collector.py +0 -0
  102. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/model_validation.py +0 -0
  103. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
  104. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
  105. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
  106. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
  107. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
  108. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
  109. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
  110. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
  111. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
  112. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
  113. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
  114. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
  115. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
  116. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
  117. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
  118. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
  119. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
  120. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
  121. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
  122. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
  123. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
  124. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
  125. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
  126. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/bit_width_config.py +0 -0
  127. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
  128. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
  129. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
  130. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
  131. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
  132. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
  133. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
  134. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
  135. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
  136. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
  137. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
  138. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
  139. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
  140. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
  141. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
  142. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
  143. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
  144. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
  145. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
  146. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
  147. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
  148. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
  149. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
  150. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
  151. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
  152. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
  153. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
  154. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
  155. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
  156. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
  157. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
  158. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
  159. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
  160. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
  161. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
  162. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
  163. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
  164. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
  165. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
  166. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
  167. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
  168. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
  169. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
  170. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
  171. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
  172. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
  173. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/user_info.py +0 -0
  174. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
  175. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
  176. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
  177. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
  178. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
  179. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/__init__.py +0 -0
  180. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
  181. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
  182. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
  183. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
  184. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
  185. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
  186. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/constants.py +0 -0
  187. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
  188. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
  189. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
  190. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
  191. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
  192. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  193. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  194. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  195. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
  196. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +0 -0
  197. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
  198. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
  199. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  200. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
  201. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  202. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  203. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
  204. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  205. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
  206. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
  207. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  208. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/sigmoid_mul_to_swish.py +0 -0
  209. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
  210. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  211. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  212. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
  213. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +0 -0
  214. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/hessian/hessian_scores_calculator_keras.py +0 -0
  215. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/hessian/weights_hessian_scores_calculator_keras.py +0 -0
  216. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
  217. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
  218. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
  219. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
  220. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
  221. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
  222. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
  223. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
  224. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
  225. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
  226. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
  227. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
  228. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
  229. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/reader/common.py +0 -0
  230. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
  231. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
  232. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
  233. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
  234. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
  235. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
  236. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
  237. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
  238. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
  239. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
  240. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
  241. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
  242. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
  243. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
  244. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
  245. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
  246. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
  247. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
  248. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
  249. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
  250. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
  251. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
  252. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/constants.py +0 -0
  253. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
  254. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
  255. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
  256. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  257. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  258. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  259. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
  260. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
  261. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
  262. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
  263. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  264. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  265. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  266. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
  267. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
  268. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  269. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
  270. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  271. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
  272. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
  273. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  274. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  275. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
  276. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +0 -0
  277. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/hessian/hessian_scores_calculator_pytorch.py +0 -0
  278. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/hessian/weights_hessian_scores_calculator_pytorch.py +0 -0
  279. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
  280. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
  281. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
  282. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
  283. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
  284. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
  285. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
  286. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
  287. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
  288. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
  289. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
  290. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
  291. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
  292. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
  293. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
  294. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
  295. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
  296. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/pytorch/utils.py +0 -0
  297. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
  298. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/core/runner.py +0 -0
  299. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/__init__.py +0 -0
  300. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
  301. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/common/constants.py +0 -0
  302. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
  303. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
  304. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/common/enums.py +0 -0
  305. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
  306. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
  307. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
  308. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
  309. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
  310. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/keras/image_operations.py +0 -0
  311. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
  312. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
  313. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
  314. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
  315. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
  316. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
  317. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
  318. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/keras/optimization_functions/lr_scheduler.py +0 -0
  319. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
  320. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
  321. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
  322. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
  323. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
  324. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/pytorch/image_operations.py +0 -0
  325. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
  326. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
  327. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
  328. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
  329. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
  330. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
  331. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/pytorch/optimization_functions/lr_scheduler.py +0 -0
  332. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
  333. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
  334. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
  335. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
  336. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/defaultdict.py +0 -0
  337. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/__init__.py +0 -0
  338. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
  339. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
  340. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
  341. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
  342. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
  343. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
  344. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
  345. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
  346. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
  347. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
  348. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
  349. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
  350. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
  351. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
  352. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
  353. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
  354. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
  355. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
  356. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
  357. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
  358. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
  359. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
  360. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
  361. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
  362. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
  363. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
  364. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
  365. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
  366. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
  367. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/common/__init__.py +0 -0
  368. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
  369. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
  370. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
  371. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/common/gptq_training.py +0 -0
  372. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
  373. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
  374. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
  375. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
  376. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
  377. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/quantization_facade.py +0 -0
  378. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
  379. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
  380. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
  381. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
  382. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +0 -0
  383. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
  384. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  385. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  386. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  387. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
  388. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  389. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
  390. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
  391. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
  392. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
  393. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
  394. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
  395. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
  396. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
  397. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
  398. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  399. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  400. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  401. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  402. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/gptq/runner.py +0 -0
  403. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/logger.py +0 -0
  404. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/metadata.py +0 -0
  405. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/pruning/__init__.py +0 -0
  406. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
  407. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
  408. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
  409. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
  410. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/ptq/__init__.py +0 -0
  411. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
  412. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
  413. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
  414. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
  415. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/ptq/runner.py +0 -0
  416. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/__init__.py +0 -0
  417. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/common/__init__.py +0 -0
  418. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/common/qat_config.py +0 -0
  419. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/keras/__init__.py +0 -0
  420. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
  421. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
  422. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +0 -0
  423. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
  424. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
  425. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
  426. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
  427. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
  428. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
  429. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  430. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
  431. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
  432. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
  433. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
  434. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_weight_quantizer.py +0 -0
  435. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
  436. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
  437. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
  438. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
  439. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  440. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  441. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
  442. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
  443. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
  444. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
  445. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
  446. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
  447. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
  448. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
  449. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
  450. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
  451. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
  452. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
  453. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
  454. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
  455. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
  456. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
  457. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
  458. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
  459. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
  460. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
  461. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
  462. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
  463. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
  464. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
  465. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
  466. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
  467. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
  468. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
  469. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
  470. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
  471. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
  472. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
  473. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
  474. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
  475. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
  476. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
  477. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +0 -0
  478. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
  479. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
  480. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
  481. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +0 -0
  482. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
  483. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
  484. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/__init__.py +0 -0
  485. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +0 -0
  486. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_keras.py +0 -0
  487. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_pytorch.py +0 -0
  488. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/__init__.py +0 -0
  489. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +0 -0
  490. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py +0 -0
  491. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py +0 -0
  492. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/__init__.py +0 -0
  493. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py +0 -0
  494. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_keras.py +0 -0
  495. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_pytorch.py +0 -0
  496. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
  497. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
  498. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
  499. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
  500. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
  501. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
  502. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
  503. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
  504. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
  505. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
  506. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
  507. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
  508. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
  509. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
  510. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
  511. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
  512. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
  513. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
  514. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
  515. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
  516. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
  517. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
  518. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/common/training_method.py +0 -0
  519. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
  520. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
  521. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
  522. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
  523. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
  524. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
  525. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
  526. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py +0 -0
  527. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py +0 -0
  528. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py +0 -0
  529. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/symmetric_lsq.py +0 -0
  530. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/uniform_lsq.py +0 -0
  531. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/__init__.py +0 -0
  532. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/symmetric_ste.py +0 -0
  533. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/uniform_ste.py +0 -0
  534. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
  535. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py +0 -0
  536. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/verify_packages.py +0 -0
  537. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/__init__.py +0 -0
  538. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/common/__init__.py +0 -0
  539. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/common/constants.py +0 -0
  540. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/common/core_report_generator.py +0 -0
  541. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
  542. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/common/framework_report_utils.py +0 -0
  543. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
  544. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/common/model_folding_utils.py +0 -0
  545. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/common/similarity_calculator.py +0 -0
  546. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
  547. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/common/tensorboard_utils.py +0 -0
  548. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/common/xquant_config.py +0 -0
  549. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
  550. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
  551. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +0 -0
  552. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/keras/keras_report_utils.py +0 -0
  553. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
  554. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
  555. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +0 -0
  556. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
  557. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
  558. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +0 -0
  559. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
  560. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +0 -0
  561. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
  562. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +0 -0
  563. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/setup.cfg +0 -0
  564. {mct-nightly-2.2.0.20240916.525 → mct-nightly-2.2.0.20240918.448}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 2.2.0.20240916.525
3
+ Version: 2.2.0.20240918.448
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 2.2.0.20240916.525
3
+ Version: 2.2.0.20240918.448
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -383,6 +383,7 @@ model_compression_toolkit/gptq/pytorch/graph_info.py
383
383
  model_compression_toolkit/gptq/pytorch/quantization_facade.py
384
384
  model_compression_toolkit/gptq/pytorch/quantizer/__init__.py
385
385
  model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py
386
+ model_compression_toolkit/gptq/pytorch/quantizer/gradual_activation_quantization.py
386
387
  model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py
387
388
  model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py
388
389
  model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py
@@ -513,8 +514,10 @@ model_compression_toolkit/trainable_infrastructure/keras/load_model.py
513
514
  model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py
514
515
  model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py
515
516
  model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py
517
+ model_compression_toolkit/trainable_infrastructure/pytorch/annealing_schedulers.py
516
518
  model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py
517
519
  model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py
520
+ model_compression_toolkit/trainable_infrastructure/pytorch/util.py
518
521
  model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py
519
522
  model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py
520
523
  model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py
@@ -548,4 +551,11 @@ model_compression_toolkit/xquant/pytorch/facade_xquant_report.py
548
551
  model_compression_toolkit/xquant/pytorch/model_analyzer.py
549
552
  model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py
550
553
  model_compression_toolkit/xquant/pytorch/similarity_functions.py
551
- model_compression_toolkit/xquant/pytorch/tensorboard_utils.py
554
+ model_compression_toolkit/xquant/pytorch/tensorboard_utils.py
555
+ tests_pytest/__init__.py
556
+ tests_pytest/pytorch/__init__.py
557
+ tests_pytest/pytorch/gptq/__init__.py
558
+ tests_pytest/pytorch/gptq/test_annealing_cfg.py
559
+ tests_pytest/pytorch/gptq/test_gradual_act_quantization.py
560
+ tests_pytest/pytorch/trainable_infrastructure/__init__.py
561
+ tests_pytest/pytorch/trainable_infrastructure/test_linear_annealing.py
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
27
27
  from model_compression_toolkit import pruning
28
28
  from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
29
29
 
30
- __version__ = "2.2.0.20240916.000525"
30
+ __version__ = "2.2.0.20240918.000448"
@@ -40,6 +40,7 @@ class BaseNode:
40
40
  layer_class: type,
41
41
  reuse: bool = False,
42
42
  reuse_group: str = None,
43
+ inputs_as_list: bool = False,
43
44
  quantization_attr: Dict[str, Any] = None,
44
45
  has_activation: bool = True,
45
46
  is_custom: bool = False
@@ -58,6 +59,7 @@ class BaseNode:
58
59
  layer_class: Class path of the layer this node represents.
59
60
  reuse: Whether this node was duplicated and represents a reused layer.
60
61
  reuse_group: Name of group of nodes from the same reused layer.
62
+ inputs_as_list: Whether to pass the node its input tensors as a list or not when calling the layer.
61
63
  quantization_attr: Attributes the node holds regarding how it should be quantized.
62
64
  has_activation: Whether the node has activations that we might want to quantize.
63
65
  is_custom: Whether the node is custom layer or not.
@@ -71,6 +73,7 @@ class BaseNode:
71
73
  self.layer_class = layer_class
72
74
  self.reuse = reuse
73
75
  self.reuse_group = reuse_group
76
+ self.inputs_as_list = inputs_as_list
74
77
  self.final_weights_quantization_cfg = None
75
78
  self.final_activation_quantization_cfg = None
76
79
  self.candidates_quantization_cfg = None
@@ -55,13 +55,13 @@ class FunctionalNode(BaseNode):
55
55
  layer_class,
56
56
  reuse,
57
57
  reuse_group,
58
+ inputs_as_list,
58
59
  quantization_attr,
59
60
  has_activation=has_activation)
60
61
 
61
62
  self.op_call_kwargs = op_call_kwargs
62
63
  self.op_call_args = list(op_call_args)
63
64
  self.functional_op = functional_op
64
- self.inputs_as_list = inputs_as_list
65
65
  self.tensor_input_allocs = [] if tensor_input_allocs is None else tensor_input_allocs
66
66
 
67
67
  @property
@@ -308,7 +308,7 @@ class KerasModelBuilder(BaseModelBuilder):
308
308
  else:
309
309
  # If operator expects a single input tensor, it cannot be a list as it should
310
310
  # have a dtype field.
311
- if len(input_tensors) == 1:
311
+ if len(input_tensors) == 1 and not n.inputs_as_list:
312
312
  input_tensors = input_tensors[0]
313
313
  out_tensors_of_n_float = op_func(input_tensors)
314
314
 
@@ -30,10 +30,12 @@ if version.parse(tf.__version__) >= version.parse("2.13"):
30
30
  from keras.src.layers.core import TFOpLambda, SlicingOpLambda
31
31
  from keras.src.engine.keras_tensor import KerasTensor
32
32
  from keras.src.engine.node import Node as KerasNode
33
+ from keras.src.layers.merging.base_merge import _Merge
33
34
  else:
34
35
  from keras.layers.core import TFOpLambda, SlicingOpLambda
35
36
  from keras.engine.keras_tensor import KerasTensor
36
37
  from keras.engine.node import Node as KerasNode
38
+ from keras.layers.merging.base_merge import _Merge
37
39
 
38
40
  from model_compression_toolkit.core.common.graph.base_node import BaseNode
39
41
  from model_compression_toolkit.core.common.graph.functional_node import FunctionalNode
@@ -287,6 +289,7 @@ def build_node(node: KerasNode,
287
289
  for i, arg in enumerate(op_call_args[0]):
288
290
  if is_const(arg):
289
291
  weights.update({i: to_numpy(arg, is_single_tensor=True)})
292
+ inputs_as_list = __is_node_inputs_a_list(op_call_args, keras_layer)
290
293
 
291
294
  node = BaseNode(node_name,
292
295
  layer_config,
@@ -296,6 +299,7 @@ def build_node(node: KerasNode,
296
299
  layer_class,
297
300
  is_reused,
298
301
  reuse_group,
302
+ inputs_as_list,
299
303
  is_custom=is_keras_custom_layer(layer_class))
300
304
 
301
305
  node_name_to_node[node_name] = node
@@ -316,6 +320,24 @@ def __is_functional_inputs_a_list(op_call_args: Any, keras_layer: Any) -> bool:
316
320
  """
317
321
 
318
322
  return (keras_layer.symbol in
319
- [TFOpLambda(tf.concat).symbol, TFOpLambda(tf.stack).symbol,TFOpLambda(tf.add_n).symbol] and
323
+ [TFOpLambda(tf.concat).symbol, TFOpLambda(tf.stack).symbol, TFOpLambda(tf.add_n).symbol] and
320
324
  len(op_call_args) > 0 and
321
325
  isinstance(op_call_args[0], list))
326
+
327
+
328
+ def __is_node_inputs_a_list(op_call_args: Any, keras_layer: Any) -> bool:
329
+ """
330
+ Check whether the input tensors should be passed as a list or not. This is relevant
331
+ only for layers that inherit from _Merge such as Concatenate and Add.
332
+
333
+ Args:
334
+ op_call_args: Arguments list to check.
335
+ keras_layer: Keras layer.
336
+
337
+ Returns:
338
+ Whether the input tensors should be passed as a list or not.
339
+ """
340
+
341
+ return (isinstance(keras_layer, _Merge) and
342
+ len(op_call_args) > 0 and
343
+ isinstance(op_call_args[0], (list, tuple)))
@@ -139,7 +139,11 @@ def _run_operation(n: BaseNode,
139
139
  _tensor_input_allocs = None
140
140
 
141
141
  if isinstance(n, FunctionalNode) and n.inputs_as_list:
142
- out_tensors_of_n_float = op_func(input_tensors, *op_call_args, **functional_kwargs)
142
+ if isinstance(op_func, PytorchQuantizationWrapper):
143
+ # in wrapped nodes, the op args & kwargs are already in the PytorchQuantizationWrapper.
144
+ out_tensors_of_n_float = op_func(*input_tensors)
145
+ else:
146
+ out_tensors_of_n_float = op_func(input_tensors, *op_call_args, **functional_kwargs)
143
147
  else:
144
148
  merged_inputs, functional_kwargs = _merge_inputs(n, input_tensors, op_call_args, functional_kwargs.copy(),
145
149
  tensor_input_allocs=_tensor_input_allocs)
@@ -232,10 +232,19 @@ def nodes_builder(model: GraphModule,
232
232
 
233
233
  # Add constants to weights dictionary.
234
234
  if node.op != PLACEHOLDER:
235
- for i, input_node in enumerate(node.all_input_nodes):
236
- if input_node in consts_dict:
237
- used_consts.add(input_node)
238
- weights.update({i: consts_dict[input_node]})
235
+ if len(node.args) and isinstance(node.args[0], (list, tuple)):
236
+ # handle weights in nodes with list input. Especially when there's a duplicate of a tensor
237
+ # in the input list (e.g. torch.concat([const1, x, const2, x, const3], 1)).
238
+ for input_node in node.all_input_nodes:
239
+ for i, input_arg in enumerate(node.args[0]):
240
+ if input_node is input_arg and input_node in consts_dict:
241
+ used_consts.add(input_node)
242
+ weights.update({i: consts_dict[input_node]})
243
+ else:
244
+ for i, input_node in enumerate(node.all_input_nodes):
245
+ if input_node in consts_dict:
246
+ used_consts.add(input_node)
247
+ weights.update({i: consts_dict[input_node]})
239
248
 
240
249
  # Extract input and output shapes of the node.
241
250
  input_shape, output_shape = _extract_input_and_output_shapes(node)
@@ -13,7 +13,7 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
 
16
- from typing import Tuple, Callable
16
+ from typing import Tuple, Callable, Union
17
17
  from model_compression_toolkit.core import common
18
18
  from model_compression_toolkit.core.common import Graph
19
19
  from model_compression_toolkit.verify_packages import FOUND_TF
@@ -25,10 +25,12 @@ if FOUND_TF:
25
25
  import tensorflow as tf
26
26
  from tensorflow.keras.layers import Layer
27
27
  from model_compression_toolkit.core.keras.back2framework.keras_model_builder import KerasModelBuilder
28
+ from model_compression_toolkit.core.common.graph.functional_node import FunctionalNode
28
29
  from mct_quantizers import KerasQuantizationWrapper
29
30
  from mct_quantizers import KerasActivationQuantizationHolder
31
+ from mct_quantizers.common.constants import OP_CALL_ARGS, OP_CALL_KWARGS
30
32
 
31
- def _get_wrapper(node: common.BaseNode,
33
+ def _get_wrapper(node: Union[common.BaseNode, FunctionalNode],
32
34
  layer: Layer,
33
35
  fw_impl=None) -> Layer:
34
36
  """
@@ -45,9 +47,16 @@ if FOUND_TF:
45
47
  # for positional weights we need to extract the weight's value.
46
48
  weights_values = {attr: node.get_weights_by_keys(attr)
47
49
  for attr in weights_quantizers if isinstance(attr, int)}
50
+ # When wrapping functional nodes, need to set call args\kwargs in wrapper, because they
51
+ # are used during wrapper call method.
52
+ func_node_kwargs = {OP_CALL_ARGS: node.op_call_args,
53
+ OP_CALL_KWARGS: node.op_call_kwargs
54
+ } if isinstance(node, FunctionalNode) else {}
48
55
  return KerasQuantizationWrapper(layer,
49
56
  weights_quantizers,
50
- weights_values)
57
+ weights_values,
58
+ is_inputs_as_list=node.inputs_as_list,
59
+ **func_node_kwargs)
51
60
  return layer
52
61
 
53
62
 
@@ -24,7 +24,9 @@ import model_compression_toolkit.core as C
24
24
  if FOUND_TORCH:
25
25
  import torch
26
26
  from mct_quantizers import PytorchQuantizationWrapper, PytorchActivationQuantizationHolder
27
+ from mct_quantizers.common.constants import OP_CALL_ARGS, OP_CALL_KWARGS
27
28
  from model_compression_toolkit.core.pytorch.back2framework.pytorch_model_builder import PyTorchModelBuilder
29
+ from model_compression_toolkit.core.common.graph.functional_node import FunctionalNode
28
30
 
29
31
 
30
32
  def fully_quantized_wrapper(node: common.BaseNode,
@@ -46,7 +48,14 @@ if FOUND_TORCH:
46
48
  # for positional weights we need to extract the weight's value.
47
49
  weights_values = {attr: fw_impl.to_tensor(node.get_weights_by_keys(attr))
48
50
  for attr in weight_quantizers if isinstance(attr, int)}
49
- return PytorchQuantizationWrapper(module, weight_quantizers, weights_values)
51
+ # When wrapping functional nodes, need to set call args\kwargs in wrapper, because they
52
+ # are used during wrapper call method.
53
+ func_node_kwargs = {OP_CALL_ARGS: node.op_call_args,
54
+ OP_CALL_KWARGS: node.op_call_kwargs
55
+ } if isinstance(node, FunctionalNode) else {}
56
+ return PytorchQuantizationWrapper(module, weight_quantizers, weights_values,
57
+ is_inputs_as_list=node.inputs_as_list,
58
+ **func_node_kwargs)
50
59
  return module
51
60
 
52
61
 
@@ -0,0 +1,32 @@
1
+ # Copyright 2022 Sony Semiconductor Israel, Inc. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ from model_compression_toolkit.gptq.common.gptq_config import (
17
+ GradientPTQConfig,
18
+ RoundingType,
19
+ GPTQHessianScoresConfig,
20
+ GradualActivationQuantizationConfig,
21
+ QFractionLinearAnnealingConfig
22
+ )
23
+
24
+ from model_compression_toolkit.verify_packages import FOUND_TF, FOUND_TORCH
25
+
26
+ if FOUND_TF:
27
+ from model_compression_toolkit.gptq.keras.quantization_facade import keras_gradient_post_training_quantization
28
+ from model_compression_toolkit.gptq.keras.quantization_facade import get_keras_gptq_config
29
+
30
+ if FOUND_TORCH:
31
+ from model_compression_toolkit.gptq.pytorch.quantization_facade import pytorch_gradient_post_training_quantization
32
+ from model_compression_toolkit.gptq.pytorch.quantization_facade import get_pytorch_gptq_config
@@ -0,0 +1,135 @@
1
+ # Copyright 2021 Sony Semiconductor Israel, Inc. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ from dataclasses import dataclass, field
16
+ from enum import Enum
17
+ from typing import Callable, Any, Dict, Optional
18
+
19
+ from model_compression_toolkit.constants import GPTQ_HESSIAN_NUM_SAMPLES, ACT_HESSIAN_DEFAULT_BATCH_SIZE
20
+ from model_compression_toolkit.gptq.common.gptq_constants import REG_DEFAULT
21
+
22
+
23
+ class RoundingType(Enum):
24
+ """
25
+ An enum for choosing the GPTQ rounding methods:
26
+
27
+ STE - STRAIGHT-THROUGH ESTIMATOR
28
+
29
+ SoftQuantizer - SoftQuantizer
30
+
31
+ """
32
+ STE = 0
33
+ SoftQuantizer = 1
34
+
35
+
36
+ @dataclass
37
+ class GPTQHessianScoresConfig:
38
+ """
39
+ Configuration to use for computing the Hessian-based scores for GPTQ loss metric.
40
+
41
+ Args:
42
+ hessians_num_samples (int): Number of samples to use for computing the Hessian-based scores.
43
+ norm_scores (bool): Whether to normalize the returned scores of the weighted loss function (to get values between 0 and 1).
44
+ log_norm (bool): Whether to use log normalization for the GPTQ Hessian-based scores.
45
+ scale_log_norm (bool): Whether to scale the final vector of the Hessian-based scores.
46
+ hessian_batch_size (int): The Hessian computation batch size. used only if using GPTQ with Hessian-based objective.
47
+ """
48
+ hessians_num_samples: int = GPTQ_HESSIAN_NUM_SAMPLES
49
+ norm_scores: bool = True
50
+ log_norm: bool = True
51
+ scale_log_norm: bool = False
52
+ hessian_batch_size: int = ACT_HESSIAN_DEFAULT_BATCH_SIZE
53
+
54
+
55
+ @dataclass
56
+ class QFractionLinearAnnealingConfig:
57
+ """
58
+ Config for the quantized fraction linear scheduler of Gradual Activation Quantization.
59
+
60
+ Args:
61
+ initial_q_fraction: initial quantized fraction
62
+ target_q_fraction: target quantized fraction
63
+ start_step: gradient step to begin annealing
64
+ end_step: gradient step to complete annealing. None means last step.
65
+ """
66
+ initial_q_fraction: float
67
+ target_q_fraction: float
68
+ start_step: int
69
+ end_step: Optional[int]
70
+
71
+ def __post_init__(self):
72
+ if not (0 <= self.initial_q_fraction < self.target_q_fraction <= 1):
73
+ raise ValueError(f'Expected 0 <= initial_q_fraction < target_q_fraction <= 1, received initial_q_fraction '
74
+ f'{self.initial_q_fraction} and target_q_fraction {self.target_q_fraction}.')
75
+ if self.start_step < 0:
76
+ raise ValueError(f'Expected start_step >= 0. received {self.start_step}.')
77
+ if self.end_step is not None and self.end_step <= self.start_step:
78
+ raise ValueError('Expected start_step < end_step, '
79
+ 'received end_step {self.end_step} and start_step {self.start_stap}.')
80
+
81
+
82
+ @dataclass
83
+ class GradualActivationQuantizationConfig:
84
+ """ Configuration for Gradual Activation Quantization.
85
+
86
+ By default, the quantized fraction increases linearly from 0 to 1 throughout the training.
87
+
88
+ Args:
89
+ q_fraction_scheduler_policy: config for the scheduling of the quantized fraction.
90
+ Only linear annealing is currently supported.
91
+ """
92
+ q_fraction_scheduler_policy: QFractionLinearAnnealingConfig = field(
93
+ default_factory=lambda: QFractionLinearAnnealingConfig(initial_q_fraction=0,
94
+ target_q_fraction=1,
95
+ start_step=0,
96
+ end_step=None)
97
+ )
98
+
99
+
100
+ @dataclass
101
+ class GradientPTQConfig:
102
+ """
103
+ Configuration to use for quantization with GradientPTQ.
104
+
105
+ Args:
106
+ n_epochs: Number of representative dataset epochs to train.
107
+ optimizer: Optimizer to use.
108
+ optimizer_rest: Optimizer to use for bias and quantizer parameters.
109
+ loss: The loss to use. See 'multiple_tensors_mse_loss' for the expected interface.
110
+ log_function: Function to log information about the GPTQ process.
111
+ train_bias: Whether to update the bias during the training or not.
112
+ rounding_type: An enum that defines the rounding type.
113
+ use_hessian_based_weights: Whether to use Hessian-based weights for weighted average loss.
114
+ optimizer_quantization_parameter: Optimizer to override the rest optimizer for quantizer parameters.
115
+ optimizer_bias: Optimizer to override the rest optimizer for bias.
116
+ regularization_factor: A floating point number that defines the regularization factor.
117
+ hessian_weights_config: A configuration that include all necessary arguments to run a computation of
118
+ Hessian scores for the GPTQ loss.
119
+ gradual_activation_quantization_config: A configuration for Gradual Activation Quantization.
120
+ gptq_quantizer_params_override: A dictionary of parameters to override in GPTQ quantizer instantiation.
121
+ """
122
+ n_epochs: int
123
+ optimizer: Any
124
+ optimizer_rest: Any = None
125
+ loss: Callable = None
126
+ log_function: Callable = None
127
+ train_bias: bool = True
128
+ rounding_type: RoundingType = RoundingType.SoftQuantizer
129
+ use_hessian_based_weights: bool = True
130
+ optimizer_quantization_parameter: Any = None
131
+ optimizer_bias: Any = None
132
+ regularization_factor: float = REG_DEFAULT
133
+ hessian_weights_config: GPTQHessianScoresConfig = field(default_factory=GPTQHessianScoresConfig)
134
+ gradual_activation_quantization_config: Optional[GradualActivationQuantizationConfig] = None
135
+ gptq_quantizer_params_override: Dict[str, Any] = field(default_factory=dict)
@@ -21,6 +21,8 @@ import copy
21
21
  import torch
22
22
 
23
23
  from model_compression_toolkit.core.common.hessian import HessianInfoService
24
+ from model_compression_toolkit.gptq.pytorch.quantizer.gradual_activation_quantization import \
25
+ get_gradual_activation_quantizer_wrapper_factory
24
26
  from model_compression_toolkit.logger import Logger
25
27
  from model_compression_toolkit.core.pytorch.back2framework.pytorch_model_builder import PyTorchModelBuilder
26
28
  from model_compression_toolkit.gptq.common.gptq_graph import get_kernel_attribute_name_for_gptq
@@ -36,6 +38,7 @@ from model_compression_toolkit.gptq.pytorch.graph_info import get_gptq_trainable
36
38
  from model_compression_toolkit.gptq.pytorch.quantizer.quantization_builder import quantization_builder
37
39
  from model_compression_toolkit.gptq.pytorch.quantizer.regularization_factory import get_regularization
38
40
  from mct_quantizers import PytorchQuantizationWrapper, PytorchActivationQuantizationHolder
41
+ from model_compression_toolkit.trainable_infrastructure.pytorch.util import get_total_grad_steps
39
42
 
40
43
 
41
44
  class PytorchGPTQTrainer(GPTQTrainer):
@@ -66,6 +69,13 @@ class PytorchGPTQTrainer(GPTQTrainer):
66
69
  representative_data_gen: Dataset to use for inputs of the models.
67
70
  hessian_info_service: HessianInfoService to fetch info based on the hessian approximation of the float model.
68
71
  """
72
+ def _get_total_grad_steps():
73
+ return get_total_grad_steps(representative_data_gen) * gptq_config.n_epochs
74
+
75
+ # must be set prior to model building in the base class constructor
76
+ self.gradual_act_quantizer_wrapper_factory = get_gradual_activation_quantizer_wrapper_factory(
77
+ gptq_config, _get_total_grad_steps)
78
+
69
79
  super().__init__(graph_float,
70
80
  graph_quant,
71
81
  gptq_config,
@@ -98,7 +108,7 @@ class PytorchGPTQTrainer(GPTQTrainer):
98
108
 
99
109
  self.weights_for_average_loss = to_torch_tensor(self.compute_hessian_based_weights())
100
110
 
101
- self.reg_func = get_regularization(self.gptq_config, representative_data_gen)
111
+ self.reg_func = get_regularization(self.gptq_config, _get_total_grad_steps)
102
112
 
103
113
  def _is_gptq_weights_trainable(self,
104
114
  node: BaseNode) -> bool:
@@ -145,7 +155,6 @@ class PytorchGPTQTrainer(GPTQTrainer):
145
155
  def get_activation_quantizer_holder(self, n: BaseNode) -> Callable:
146
156
  """
147
157
  Retrieve a PytorchActivationQuantizationHolder layer to use for activation quantization of a node.
148
- If the layer is not supposed to be wrapped with an activation quantizer - return None.
149
158
  Args:
150
159
  n: Node to attach a PytorchActivationQuantizationHolder to its output.
151
160
  Returns:
@@ -153,13 +162,13 @@ class PytorchGPTQTrainer(GPTQTrainer):
153
162
  """
154
163
  _, activation_quantizers = quantization_builder(n, self.gptq_config)
155
164
  # Holder by definition uses a single quantizer for the activation quantization
156
- # thus we make sure this is the only possible case (unless it's a node we no activation
157
- # quantization, which in this case has an empty list).
158
- if len(activation_quantizers) == 1:
159
- return PytorchActivationQuantizationHolder(activation_quantizers[0])
160
- Logger.critical(f"'PytorchActivationQuantizationHolder' requires exactly one quantizer, "
161
- f"but {len(activation_quantizers)} were found for node {n.name}. "
162
- f"Ensure the node is configured with a single activation quantizer.")
165
+ # thus we make sure this is the only possible case
166
+ if len(activation_quantizers) != 1:
167
+ Logger.critical(f"'PytorchActivationQuantizationHolder' requires exactly one quantizer, "
168
+ f"but {len(activation_quantizers)} were found for node {n.name}. "
169
+ f"Ensure the node is configured with a single activation quantizer.")
170
+ quantizer = self.gradual_act_quantizer_wrapper_factory(activation_quantizers[0])
171
+ return PytorchActivationQuantizationHolder(quantizer)
163
172
 
164
173
  def build_gptq_model(self):
165
174
  """