mct-nightly 2.2.0.20241006.532__tar.gz → 2.2.0.20241008.450__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (564) hide show
  1. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/PKG-INFO +1 -1
  2. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/mct_nightly.egg-info/PKG-INFO +1 -1
  3. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/__init__.py +1 -1
  4. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/hessian/__init__.py +3 -1
  5. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +60 -2
  6. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/hessian/hessian_scores_request.py +14 -3
  7. mct-nightly-2.2.0.20241008.450/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +201 -0
  8. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/common/gptq_config.py +7 -2
  9. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/common/gptq_training.py +43 -12
  10. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +36 -1
  11. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/gptq_training.py +58 -8
  12. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +28 -7
  13. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +1 -1
  14. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +14 -12
  15. mct-nightly-2.2.0.20241006.532/model_compression_toolkit/core/pytorch/hessian/activation_hessian_scores_calculator_pytorch.py +0 -152
  16. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/LICENSE.md +0 -0
  17. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/README.md +0 -0
  18. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/mct_nightly.egg-info/SOURCES.txt +0 -0
  19. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/mct_nightly.egg-info/dependency_links.txt +0 -0
  20. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/mct_nightly.egg-info/requires.txt +0 -0
  21. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/mct_nightly.egg-info/top_level.txt +0 -0
  22. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/constants.py +0 -0
  23. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/__init__.py +0 -0
  24. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/analyzer.py +0 -0
  25. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/__init__.py +0 -0
  26. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
  27. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
  28. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
  29. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
  30. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
  31. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
  32. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
  33. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
  34. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
  35. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
  36. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/framework_info.py +0 -0
  37. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
  38. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/fusion/graph_fuser.py +0 -0
  39. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
  40. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
  41. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
  42. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
  43. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/edge.py +0 -0
  44. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
  45. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
  46. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
  47. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
  48. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
  49. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
  50. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
  51. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
  52. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
  53. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
  54. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
  55. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
  56. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/hessian/hessian_scores_calculator.py +0 -0
  57. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
  58. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
  59. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
  60. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
  61. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/matchers/function.py +0 -0
  62. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
  63. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
  64. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/memory_computation.py +0 -0
  65. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
  66. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
  67. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
  68. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
  69. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
  70. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py +0 -0
  71. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
  72. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
  73. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
  74. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
  75. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
  76. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -0
  77. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
  78. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
  79. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
  80. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
  81. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
  82. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
  83. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
  84. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
  85. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
  86. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/model_collector.py +0 -0
  87. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/model_validation.py +0 -0
  88. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
  89. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
  90. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
  91. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
  92. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
  93. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
  94. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
  95. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
  96. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
  97. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
  98. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
  99. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
  100. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
  101. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
  102. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
  103. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
  104. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
  105. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
  106. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
  107. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
  108. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
  109. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
  110. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
  111. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/bit_width_config.py +0 -0
  112. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
  113. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
  114. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
  115. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
  116. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
  117. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
  118. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
  119. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
  120. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
  121. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
  122. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
  123. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
  124. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
  125. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
  126. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
  127. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
  128. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
  129. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
  130. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
  131. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
  132. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
  133. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
  134. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
  135. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
  136. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
  137. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
  138. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
  139. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
  140. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
  141. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
  142. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
  143. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
  144. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
  145. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
  146. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
  147. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
  148. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
  149. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
  150. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
  151. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
  152. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
  153. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
  154. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
  155. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
  156. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
  157. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
  158. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/user_info.py +0 -0
  159. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
  160. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
  161. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
  162. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
  163. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
  164. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/__init__.py +0 -0
  165. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
  166. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
  167. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
  168. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
  169. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
  170. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
  171. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
  172. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/constants.py +0 -0
  173. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
  174. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
  175. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
  176. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
  177. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
  178. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  179. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  180. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  181. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
  182. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +0 -0
  183. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
  184. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
  185. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  186. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
  187. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  188. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  189. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
  190. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  191. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
  192. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
  193. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  194. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/sigmoid_mul_to_swish.py +0 -0
  195. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
  196. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  197. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  198. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
  199. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/hessian/activation_hessian_scores_calculator_keras.py +0 -0
  200. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/hessian/hessian_scores_calculator_keras.py +0 -0
  201. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/hessian/weights_hessian_scores_calculator_keras.py +0 -0
  202. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
  203. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
  204. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
  205. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
  206. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
  207. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
  208. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
  209. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
  210. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
  211. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
  212. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
  213. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
  214. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
  215. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/common.py +0 -0
  216. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
  217. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
  218. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
  219. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
  220. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
  221. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
  222. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
  223. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
  224. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
  225. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
  226. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
  227. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
  228. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
  229. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
  230. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
  231. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
  232. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
  233. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
  234. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
  235. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
  236. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
  237. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
  238. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
  239. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
  240. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/constants.py +0 -0
  241. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
  242. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
  243. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
  244. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  245. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  246. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  247. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
  248. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
  249. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
  250. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
  251. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  252. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  253. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  254. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
  255. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
  256. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  257. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
  258. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scaled_dot_product_attention.py +0 -0
  259. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  260. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
  261. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/transform_function_call_method.py +0 -0
  262. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  263. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  264. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
  265. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/hessian/hessian_scores_calculator_pytorch.py +0 -0
  266. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/hessian/weights_hessian_scores_calculator_pytorch.py +0 -0
  267. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
  268. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
  269. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
  270. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
  271. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
  272. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
  273. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
  274. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
  275. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
  276. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
  277. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
  278. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
  279. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
  280. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
  281. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
  282. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
  283. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
  284. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
  285. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/pytorch/utils.py +0 -0
  286. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
  287. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/core/runner.py +0 -0
  288. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/__init__.py +0 -0
  289. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
  290. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/constants.py +0 -0
  291. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
  292. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
  293. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/enums.py +0 -0
  294. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
  295. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
  296. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
  297. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
  298. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
  299. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/image_operations.py +0 -0
  300. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
  301. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
  302. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
  303. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
  304. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
  305. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
  306. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
  307. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_functions/lr_scheduler.py +0 -0
  308. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
  309. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
  310. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
  311. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
  312. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
  313. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/image_operations.py +0 -0
  314. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
  315. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
  316. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
  317. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
  318. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
  319. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
  320. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_functions/lr_scheduler.py +0 -0
  321. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
  322. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
  323. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
  324. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
  325. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/defaultdict.py +0 -0
  326. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/__init__.py +0 -0
  327. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
  328. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
  329. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
  330. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
  331. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
  332. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
  333. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
  334. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
  335. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
  336. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
  337. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
  338. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
  339. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
  340. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
  341. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
  342. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
  343. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
  344. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
  345. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
  346. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
  347. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
  348. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
  349. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
  350. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
  351. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
  352. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
  353. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
  354. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
  355. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
  356. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
  357. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
  358. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/__init__.py +0 -0
  359. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/common/__init__.py +0 -0
  360. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
  361. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
  362. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
  363. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
  364. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
  365. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
  366. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
  367. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
  368. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantization_facade.py +0 -0
  369. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
  370. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
  371. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
  372. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
  373. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +0 -0
  374. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
  375. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  376. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  377. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  378. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
  379. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  380. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
  381. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
  382. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
  383. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
  384. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
  385. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/gradual_activation_quantization.py +0 -0
  386. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
  387. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
  388. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
  389. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  390. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  391. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  392. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  393. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/gptq/runner.py +0 -0
  394. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/logger.py +0 -0
  395. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/metadata.py +0 -0
  396. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/pruning/__init__.py +0 -0
  397. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
  398. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
  399. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
  400. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
  401. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/ptq/__init__.py +0 -0
  402. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
  403. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
  404. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
  405. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
  406. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/ptq/runner.py +0 -0
  407. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/__init__.py +0 -0
  408. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/common/__init__.py +0 -0
  409. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/common/qat_config.py +0 -0
  410. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/__init__.py +0 -0
  411. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
  412. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
  413. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +0 -0
  414. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
  415. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
  416. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
  417. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
  418. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
  419. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
  420. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  421. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
  422. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
  423. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
  424. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
  425. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_weight_quantizer.py +0 -0
  426. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
  427. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
  428. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
  429. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
  430. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  431. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  432. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
  433. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
  434. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
  435. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
  436. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
  437. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
  438. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
  439. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
  440. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
  441. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
  442. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
  443. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
  444. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
  445. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
  446. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
  447. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
  448. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
  449. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
  450. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
  451. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
  452. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
  453. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
  454. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
  455. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
  456. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
  457. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
  458. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
  459. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
  460. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
  461. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
  462. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
  463. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
  464. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
  465. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
  466. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
  467. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
  468. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +0 -0
  469. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
  470. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
  471. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
  472. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +0 -0
  473. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
  474. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
  475. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/__init__.py +0 -0
  476. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tp_model.py +0 -0
  477. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_keras.py +0 -0
  478. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3/tpc_pytorch.py +0 -0
  479. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/__init__.py +0 -0
  480. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tp_model.py +0 -0
  481. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_keras.py +0 -0
  482. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v3_lut/tpc_pytorch.py +0 -0
  483. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/__init__.py +0 -0
  484. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tp_model.py +0 -0
  485. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_keras.py +0 -0
  486. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v4/tpc_pytorch.py +0 -0
  487. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
  488. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
  489. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
  490. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
  491. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
  492. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
  493. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
  494. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
  495. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
  496. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
  497. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
  498. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
  499. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
  500. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
  501. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
  502. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
  503. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
  504. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
  505. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
  506. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
  507. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
  508. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
  509. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/common/training_method.py +0 -0
  510. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
  511. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
  512. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
  513. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
  514. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
  515. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
  516. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
  517. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/__init__.py +0 -0
  518. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/base_activation_quantizer.py +0 -0
  519. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/__init__.py +0 -0
  520. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/symmetric_lsq.py +0 -0
  521. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/lsq/uniform_lsq.py +0 -0
  522. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/__init__.py +0 -0
  523. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/symmetric_ste.py +0 -0
  524. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/activation_quantizers/ste/uniform_ste.py +0 -0
  525. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/annealing_schedulers.py +0 -0
  526. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
  527. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/quantizer_utils.py +0 -0
  528. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/trainable_infrastructure/pytorch/util.py +0 -0
  529. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/verify_packages.py +0 -0
  530. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/__init__.py +0 -0
  531. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/__init__.py +0 -0
  532. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/constants.py +0 -0
  533. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/core_report_generator.py +0 -0
  534. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/dataset_utils.py +0 -0
  535. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/framework_report_utils.py +0 -0
  536. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/model_analyzer.py +0 -0
  537. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/model_folding_utils.py +0 -0
  538. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/similarity_calculator.py +0 -0
  539. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/similarity_functions.py +0 -0
  540. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/tensorboard_utils.py +0 -0
  541. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/common/xquant_config.py +0 -0
  542. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/keras/__init__.py +0 -0
  543. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/keras/dataset_utils.py +0 -0
  544. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/keras/facade_xquant_report.py +0 -0
  545. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/keras/keras_report_utils.py +0 -0
  546. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/keras/model_analyzer.py +0 -0
  547. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/keras/similarity_functions.py +0 -0
  548. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/keras/tensorboard_utils.py +0 -0
  549. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/pytorch/__init__.py +0 -0
  550. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/pytorch/dataset_utils.py +0 -0
  551. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/pytorch/facade_xquant_report.py +0 -0
  552. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/pytorch/model_analyzer.py +0 -0
  553. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py +0 -0
  554. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/pytorch/similarity_functions.py +0 -0
  555. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/model_compression_toolkit/xquant/pytorch/tensorboard_utils.py +0 -0
  556. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/setup.cfg +0 -0
  557. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/setup.py +0 -0
  558. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/tests_pytest/__init__.py +0 -0
  559. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/tests_pytest/pytorch/__init__.py +0 -0
  560. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/tests_pytest/pytorch/gptq/__init__.py +0 -0
  561. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/tests_pytest/pytorch/gptq/test_annealing_cfg.py +0 -0
  562. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/tests_pytest/pytorch/gptq/test_gradual_act_quantization.py +0 -0
  563. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/tests_pytest/pytorch/trainable_infrastructure/__init__.py +0 -0
  564. {mct-nightly-2.2.0.20241006.532 → mct-nightly-2.2.0.20241008.450}/tests_pytest/pytorch/trainable_infrastructure/test_linear_annealing.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 2.2.0.20241006.532
3
+ Version: 2.2.0.20241008.450
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 2.2.0.20241006.532
3
+ Version: 2.2.0.20241008.450
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
27
27
  from model_compression_toolkit import pruning
28
28
  from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
29
29
 
30
- __version__ = "2.2.0.20241006.000532"
30
+ __version__ = "2.2.0.20241008.000450"
@@ -12,6 +12,8 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
- from model_compression_toolkit.core.common.hessian.hessian_scores_request import HessianScoresRequest, HessianMode, HessianScoresGranularity
15
+ from model_compression_toolkit.core.common.hessian.hessian_scores_request import (
16
+ HessianScoresRequest, HessianMode, HessianScoresGranularity, HessianEstimationDistribution
17
+ )
16
18
  from model_compression_toolkit.core.common.hessian.hessian_info_service import HessianInfoService
17
19
  import model_compression_toolkit.core.common.hessian.hessian_info_utils as hessian_utils
@@ -12,16 +12,19 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
+ import hashlib
15
16
 
16
17
  import numpy as np
17
18
  from functools import partial
18
19
  from tqdm import tqdm
19
- from typing import Callable, List, Dict, Any, Tuple
20
+ from typing import Callable, List, Dict, Any, Tuple, TYPE_CHECKING
20
21
 
21
22
  from model_compression_toolkit.constants import HESSIAN_NUM_ITERATIONS
22
23
  from model_compression_toolkit.core.common.hessian.hessian_scores_request import HessianScoresRequest, \
23
24
  HessianScoresGranularity, HessianMode
24
25
  from model_compression_toolkit.logger import Logger
26
+ if TYPE_CHECKING: # pragma: no cover
27
+ from model_compression_toolkit.core.common import BaseNode
25
28
 
26
29
 
27
30
  class HessianInfoService:
@@ -228,6 +231,61 @@ class HessianInfoService:
228
231
  return next_iter_remain_samples if next_iter_remain_samples is not None and len(next_iter_remain_samples) > 0 \
229
232
  and len(next_iter_remain_samples[0]) > 0 else None
230
233
 
234
+ def compute_trackable_per_sample_hessian(self,
235
+ hessian_scores_request: HessianScoresRequest,
236
+ inputs_batch: List[np.ndarray]) -> Dict[str, Dict['BaseNode', np.ndarray]]:
237
+ """
238
+ Compute hessian score per image hash. We compute the score directly for images rather than via data generator,
239
+ as data generator might yield different images each time, depending on how it was defined,
240
+
241
+ Args:
242
+ hessian_scores_request: hessian scores request
243
+ inputs_batch: a list containing a batch of inputs.
244
+
245
+ Returns:
246
+ A dict of Hessian scores per image hash per layer {image hash: {layer: score}}
247
+ """
248
+ topo_sorted_nodes_names = [x.name for x in self.graph.get_topo_sorted_nodes()]
249
+ hessian_scores_request.target_nodes.sort(key=lambda x: topo_sorted_nodes_names.index(x.name))
250
+
251
+ hessian_score_by_image_hash = {}
252
+
253
+ if not inputs_batch or not isinstance(inputs_batch, list):
254
+ raise TypeError('Expected a non-empty list of inputs') # pragma: no cover
255
+ if len(inputs_batch) > 1:
256
+ raise NotImplementedError('Per-sample hessian computation is not supported for networks with multiple inputs') # pragma: no cover
257
+
258
+ # Get the framework-specific calculator Hessian-approximation scores
259
+ fw_hessian_calculator = self.fw_impl.get_hessian_scores_calculator(graph=self.graph,
260
+ input_images=inputs_batch,
261
+ hessian_scores_request=hessian_scores_request,
262
+ num_iterations_for_approximation=self.num_iterations_for_approximation)
263
+ hessian_scores = fw_hessian_calculator.compute()
264
+ for i in range(inputs_batch[0].shape[0]):
265
+ img_hash = self.calc_image_hash(inputs_batch[0][i])
266
+ hessian_score_by_image_hash[img_hash] = {
267
+ node: score[i] for node, score in zip(hessian_scores_request.target_nodes, hessian_scores)
268
+ }
269
+
270
+ return hessian_score_by_image_hash
271
+
272
+ @staticmethod
273
+ def calc_image_hash(image):
274
+ """
275
+ Calculates hash for an input image.
276
+
277
+ Args:
278
+ image: input 3d image (without batch).
279
+
280
+ Returns:
281
+ Image hash.
282
+
283
+ """
284
+ if not len(image.shape) == 3: # pragma: no cover
285
+ raise ValueError(f'Expected 3d image (without batch) for image hash calculation, got {len(image.shape)}')
286
+ image_bytes = image.astype(np.float32).tobytes()
287
+ return hashlib.md5(image_bytes).hexdigest()
288
+
231
289
  def fetch_hessian(self,
232
290
  hessian_scores_request: HessianScoresRequest,
233
291
  required_size: int,
@@ -248,7 +306,7 @@ class HessianInfoService:
248
306
  OC for per-output-channel when the requested node has OC output-channels, etc.)
249
307
  """
250
308
 
251
- if len(hessian_scores_request.target_nodes) == 0:
309
+ if len(hessian_scores_request.target_nodes) == 0: # pragma: no cover
252
310
  return []
253
311
 
254
312
  if required_size == 0:
@@ -40,6 +40,14 @@ class HessianScoresGranularity(Enum):
40
40
  PER_TENSOR = 2
41
41
 
42
42
 
43
+ class HessianEstimationDistribution(str, Enum):
44
+ """
45
+ Distribution for Hutchinson estimator random vector
46
+ """
47
+ GAUSSIAN = 'gaussian'
48
+ RADEMACHER = 'rademacher'
49
+
50
+
43
51
  class HessianScoresRequest:
44
52
  """
45
53
  Request configuration for the Hessian-approximation scores.
@@ -53,7 +61,8 @@ class HessianScoresRequest:
53
61
  def __init__(self,
54
62
  mode: HessianMode,
55
63
  granularity: HessianScoresGranularity,
56
- target_nodes: List):
64
+ target_nodes: List,
65
+ distribution: HessianEstimationDistribution = HessianEstimationDistribution.GAUSSIAN):
57
66
  """
58
67
  Attributes:
59
68
  mode (HessianMode): Mode of Hessian-approximation score (w.r.t weights or activations).
@@ -64,6 +73,7 @@ class HessianScoresRequest:
64
73
  self.mode = mode # w.r.t activations or weights
65
74
  self.granularity = granularity # per element, per layer, per channel
66
75
  self.target_nodes = target_nodes
76
+ self.distribution = distribution
67
77
 
68
78
  def __eq__(self, other):
69
79
  # Checks if the other object is an instance of HessianScoresRequest
@@ -71,9 +81,10 @@ class HessianScoresRequest:
71
81
  return isinstance(other, HessianScoresRequest) and \
72
82
  self.mode == other.mode and \
73
83
  self.granularity == other.granularity and \
74
- self.target_nodes == other.target_nodes
84
+ self.target_nodes == other.target_nodes and \
85
+ self.distribution == other.distribution
75
86
 
76
87
  def __hash__(self):
77
88
  # Computes the hash based on the attributes.
78
89
  # The use of a tuple here ensures that the hash is influenced by all the attributes.
79
- return hash((self.mode, self.granularity, tuple(self.target_nodes)))
90
+ return hash((self.mode, self.granularity, tuple(self.target_nodes), self.distribution))
@@ -0,0 +1,201 @@
1
+ # Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ from typing import List
17
+
18
+ from torch import autograd
19
+ from tqdm import tqdm
20
+ import numpy as np
21
+
22
+ from model_compression_toolkit.constants import MIN_HESSIAN_ITER, HESSIAN_COMP_TOLERANCE, HESSIAN_NUM_ITERATIONS
23
+ from model_compression_toolkit.core.common import Graph
24
+ from model_compression_toolkit.core.common.hessian import (HessianScoresRequest, HessianScoresGranularity,
25
+ HessianEstimationDistribution)
26
+ from model_compression_toolkit.core.pytorch.back2framework.float_model_builder import FloatPyTorchModelBuilder
27
+ from model_compression_toolkit.core.pytorch.hessian.hessian_scores_calculator_pytorch import \
28
+ HessianScoresCalculatorPytorch
29
+ from model_compression_toolkit.core.pytorch.utils import torch_tensor_to_numpy
30
+ from model_compression_toolkit.logger import Logger
31
+ import torch
32
+
33
+
34
+ class ActivationHessianScoresCalculatorPytorch(HessianScoresCalculatorPytorch):
35
+ """
36
+ Pytorch implementation of the Hessian approximation scores Calculator for activations.
37
+ """
38
+ def __init__(self,
39
+ graph: Graph,
40
+ input_images: List[torch.Tensor],
41
+ fw_impl,
42
+ hessian_scores_request: HessianScoresRequest,
43
+ num_iterations_for_approximation: int = HESSIAN_NUM_ITERATIONS):
44
+ """
45
+ Args:
46
+ graph: Computational graph for the float model.
47
+ input_images: List of input images for the computation.
48
+ fw_impl: Framework-specific implementation for Hessian approximation scores computation.
49
+ hessian_scores_request: Configuration request for which to compute the Hessian approximation scores.
50
+ num_iterations_for_approximation: Number of iterations to use when approximating the Hessian scores.
51
+
52
+ """
53
+ super(ActivationHessianScoresCalculatorPytorch, self).__init__(graph=graph,
54
+ input_images=input_images,
55
+ fw_impl=fw_impl,
56
+ hessian_scores_request=hessian_scores_request,
57
+ num_iterations_for_approximation=num_iterations_for_approximation)
58
+
59
+ def forward_pass(self):
60
+ model_output_nodes = [ot.node for ot in self.graph.get_outputs()]
61
+
62
+ if len([n for n in self.hessian_request.target_nodes if n in model_output_nodes]) > 0:
63
+ Logger.critical("Activation Hessian approximation cannot be computed for model outputs. "
64
+ "Exclude output nodes from Hessian request targets.")
65
+
66
+ grad_model_outputs = self.hessian_request.target_nodes + model_output_nodes
67
+ model, _ = FloatPyTorchModelBuilder(graph=self.graph, append2output=grad_model_outputs).build_model()
68
+ model.eval()
69
+
70
+ # Run model inference
71
+ # Set inputs to track gradients during inference
72
+ for input_tensor in self.input_images:
73
+ input_tensor.requires_grad_()
74
+ input_tensor.retain_grad()
75
+
76
+ outputs = model(*self.input_images)
77
+
78
+ if len(outputs) != len(grad_model_outputs): # pragma: no cover
79
+ Logger.critical(f"Mismatch in expected and actual model outputs for activation Hessian approximation. "
80
+ f"Expected {len(grad_model_outputs)} outputs, received {len(outputs)}.")
81
+
82
+ # Extracting the intermediate activation tensors and the model real output.
83
+ # Note that we do not allow computing Hessian for output nodes, so there shouldn't be an overlap.
84
+ num_target_nodes = len(self.hessian_request.target_nodes)
85
+ # Extract activation tensors of nodes for which we want to compute Hessian
86
+ target_activation_tensors = outputs[:num_target_nodes]
87
+ # Extract the model outputs
88
+ output_tensors = outputs[num_target_nodes:]
89
+ device = output_tensors[0].device
90
+
91
+ # Concat outputs
92
+ # First, we need to unfold all outputs that are given as list, to extract the actual output tensors
93
+ output = self.concat_tensors(output_tensors)
94
+ return output, target_activation_tensors
95
+
96
+ def _generate_random_vectors_batch(self, shape: tuple, distribution: HessianEstimationDistribution,
97
+ device: torch.device) -> torch.Tensor:
98
+ """
99
+ Generate a batch of random vectors for Hutchinson estimation
100
+
101
+ Args:
102
+ shape: target shape
103
+ distribution: distribution to sample from
104
+ device: target device
105
+
106
+ Returns:
107
+ Random tensor
108
+ """
109
+ if distribution == HessianEstimationDistribution.GAUSSIAN:
110
+ return torch.randn(shape, device=device)
111
+
112
+ if distribution == HessianEstimationDistribution.RADEMACHER:
113
+ v = torch.randint(high=2, size=shape, device=device)
114
+ v[v == 0] = -1
115
+ return v
116
+
117
+ raise ValueError(f'Unknown distribution {distribution}') # pragma: no cover
118
+
119
+ def compute(self) -> List[np.ndarray]:
120
+ """
121
+ Compute the scores that are based on the approximation of the Hessian w.r.t the requested target nodes' activations.
122
+
123
+ Returns:
124
+ List[np.ndarray]: Scores based on the approximated Hessian for the requested nodes.
125
+ """
126
+ output, target_activation_tensors = self.forward_pass()
127
+
128
+ if self.hessian_request.granularity == HessianScoresGranularity.PER_TENSOR:
129
+ hessian_scores = self._compute_per_tensor(output, target_activation_tensors)
130
+ elif self.hessian_request.granularity == HessianScoresGranularity.PER_OUTPUT_CHANNEL:
131
+ hessian_scores = self._compute_per_channel(output, target_activation_tensors)
132
+ else:
133
+ raise NotImplementedError(f'{self.hessian_request.granularity} is not supported') # pragma: no cover
134
+
135
+ # Convert results to list of numpy arrays
136
+ hessian_results = [torch_tensor_to_numpy(h) for h in hessian_scores]
137
+ return hessian_results
138
+
139
+ def _compute_per_tensor(self, output, target_activation_tensors):
140
+ assert self.hessian_request.granularity == HessianScoresGranularity.PER_TENSOR
141
+ ipts_hessian_approx_scores = [torch.tensor([0.0], requires_grad=True, device=output.device)
142
+ for _ in range(len(target_activation_tensors))]
143
+ prev_mean_results = None
144
+ for j in tqdm(range(self.num_iterations_for_approximation), "Hessian random iterations"): # Approximation iterations
145
+ # Getting a random vector with normal distribution
146
+ v = self._generate_random_vectors_batch(output.shape, self.hessian_request.distribution, output.device)
147
+ f_v = torch.sum(v * output)
148
+ for i, ipt_tensor in enumerate(target_activation_tensors): # Per Interest point activation tensor
149
+ # Computing the hessian-approximation scores by getting the gradient of (output * v)
150
+ hess_v = autograd.grad(outputs=f_v,
151
+ inputs=ipt_tensor,
152
+ retain_graph=True,
153
+ allow_unused=True)[0]
154
+
155
+ if hess_v is None:
156
+ # In case we have an output node, which is an interest point, but it is not differentiable,
157
+ # we consider its Hessian to be the initial value 0.
158
+ continue # pragma: no cover
159
+
160
+ # Mean over all dims but the batch (CXHXW for conv)
161
+ hessian_approx_scores = torch.sum(hess_v ** 2.0, dim=tuple(d for d in range(1, len(hess_v.shape))))
162
+
163
+ # Update node Hessian approximation mean over random iterations
164
+ ipts_hessian_approx_scores[i] = (j * ipts_hessian_approx_scores[i] + hessian_approx_scores) / (j + 1)
165
+
166
+ # If the change to the maximal mean Hessian approximation is insignificant we stop the calculation
167
+ if j > MIN_HESSIAN_ITER:
168
+ if prev_mean_results is not None:
169
+ new_mean_res = torch.mean(torch.stack(ipts_hessian_approx_scores), dim=1)
170
+ relative_delta_per_node = (torch.abs(new_mean_res - prev_mean_results) /
171
+ (torch.abs(new_mean_res) + 1e-6))
172
+ max_delta = torch.max(relative_delta_per_node)
173
+ if max_delta < HESSIAN_COMP_TOLERANCE:
174
+ break
175
+ prev_mean_results = torch.mean(torch.stack(ipts_hessian_approx_scores), dim=1)
176
+
177
+ # add extra dimension to preserve previous behaviour
178
+ ipts_hessian_approx_scores = [torch.unsqueeze(t, -1) for t in ipts_hessian_approx_scores]
179
+ return ipts_hessian_approx_scores
180
+
181
+ def _compute_per_channel(self, output, target_activation_tensors):
182
+ assert self.hessian_request.granularity == HessianScoresGranularity.PER_OUTPUT_CHANNEL
183
+ ipts_hessian_approx_scores = [torch.tensor(0.0, requires_grad=True, device=output.device)
184
+ for _ in range(len(target_activation_tensors))]
185
+
186
+ for j in tqdm(range(self.num_iterations_for_approximation), "Hessian random iterations"): # Approximation iterations
187
+ v = self._generate_random_vectors_batch(output.shape, self.hessian_request.distribution, output.device)
188
+ f_v = torch.sum(v * output)
189
+ for i, ipt_tensor in enumerate(target_activation_tensors): # Per Interest point activation tensor
190
+ hess_v = autograd.grad(outputs=f_v,
191
+ inputs=ipt_tensor,
192
+ retain_graph=True)[0]
193
+ hessian_approx_scores = hess_v ** 2
194
+ rank = len(hess_v.shape)
195
+ if rank > 2:
196
+ hessian_approx_scores = torch.mean(hessian_approx_scores, dim=tuple(range(2, rank)))
197
+
198
+ # Update node Hessian approximation mean over random iterations
199
+ ipts_hessian_approx_scores[i] = (j * ipts_hessian_approx_scores[i] + hessian_approx_scores) / (j + 1)
200
+
201
+ return ipts_hessian_approx_scores
@@ -17,6 +17,7 @@ from enum import Enum
17
17
  from typing import Callable, Any, Dict, Optional
18
18
 
19
19
  from model_compression_toolkit.constants import GPTQ_HESSIAN_NUM_SAMPLES, ACT_HESSIAN_DEFAULT_BATCH_SIZE
20
+ from model_compression_toolkit.core.common.hessian import HessianScoresGranularity, HessianEstimationDistribution
20
21
  from model_compression_toolkit.gptq.common.gptq_constants import REG_DEFAULT
21
22
 
22
23
 
@@ -39,17 +40,21 @@ class GPTQHessianScoresConfig:
39
40
  Configuration to use for computing the Hessian-based scores for GPTQ loss metric.
40
41
 
41
42
  Args:
42
- hessians_num_samples (int): Number of samples to use for computing the Hessian-based scores.
43
+ hessians_num_samples (int|None): Number of samples to use for computing the Hessian-based scores.
44
+ If None, compute Hessian for all images.
43
45
  norm_scores (bool): Whether to normalize the returned scores of the weighted loss function (to get values between 0 and 1).
44
46
  log_norm (bool): Whether to use log normalization for the GPTQ Hessian-based scores.
45
47
  scale_log_norm (bool): Whether to scale the final vector of the Hessian-based scores.
46
48
  hessian_batch_size (int): The Hessian computation batch size. used only if using GPTQ with Hessian-based objective.
49
+ per_sample (bool): Whether to use per sample attention score.
47
50
  """
48
- hessians_num_samples: int = GPTQ_HESSIAN_NUM_SAMPLES
51
+ hessians_num_samples: Optional[int] = GPTQ_HESSIAN_NUM_SAMPLES
49
52
  norm_scores: bool = True
50
53
  log_norm: bool = True
51
54
  scale_log_norm: bool = False
52
55
  hessian_batch_size: int = ACT_HESSIAN_DEFAULT_BATCH_SIZE
56
+ per_sample: bool = False
57
+ estimator_distribution: HessianEstimationDistribution = HessianEstimationDistribution.GAUSSIAN
53
58
 
54
59
 
55
60
  @dataclass
@@ -13,6 +13,7 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
  import copy
16
+ import hashlib
16
17
  from abc import ABC, abstractmethod
17
18
  import numpy as np
18
19
  from typing import Callable, List, Any, Dict
@@ -143,7 +144,11 @@ class GPTQTrainer(ABC):
143
144
  return np.asarray([1 / num_nodes for _ in range(num_nodes)])
144
145
 
145
146
  # Fetch hessian approximations for each target node
146
- compare_point_to_hessian_approx_scores = self._fetch_hessian_approximations()
147
+ # TODO this smells like a potential bug. In hessian calculation target nodes are topo sorted and results are returned
148
+ # TODO also target nodes are replaced for reuse. Does this work correctly?
149
+ approximations = self._fetch_hessian_approximations(HessianScoresGranularity.PER_TENSOR)
150
+ compare_point_to_hessian_approx_scores = {node: score for node, score in zip(self.compare_points, approximations)}
151
+
147
152
  # Process the fetched hessian approximations to gather them per images
148
153
  hessian_approx_score_by_image = (
149
154
  self._process_hessian_approximations(compare_point_to_hessian_approx_scores))
@@ -172,29 +177,55 @@ class GPTQTrainer(ABC):
172
177
  # If log normalization is not enabled, return the mean of the approximations across images
173
178
  return np.mean(hessian_approx_score_by_image, axis=0)
174
179
 
175
- def _fetch_hessian_approximations(self) -> Dict[BaseNode, List[List[float]]]:
180
+ def _compute_sample_layer_attention_scores(self, inputs_batch) -> Dict[str, Dict[BaseNode, np.ndarray]]:
181
+ """
182
+ Compute sample layer attention scores per image hash per layer.
183
+
184
+ Args:
185
+ inputs_batch: a list containing a batch of inputs.
186
+
187
+ Returns:
188
+ A dictionary with a structure {img_hash: {layer: score}}.
189
+
190
+ """
191
+ request = self._build_hessian_request(HessianScoresGranularity.PER_OUTPUT_CHANNEL)
192
+ hessian_batch_size = self.gptq_config.hessian_weights_config.hessian_batch_size
193
+
194
+ hessian_score_per_image_per_layer = {}
195
+ # If hessian batch is smaller than inputs batch, split it to hessian batches. If hessian batch is larger,
196
+ # it's currently ignored (TODO)
197
+ for i in range(0, inputs_batch[0].shape[0], hessian_batch_size):
198
+ inputs = [t[i: i+hessian_batch_size] for t in inputs_batch]
199
+ hessian_score_per_image_per_layer.update(
200
+ self.hessian_service.compute_trackable_per_sample_hessian(request, inputs)
201
+ )
202
+ for img_hash, v in hessian_score_per_image_per_layer.items():
203
+ hessian_score_per_image_per_layer[img_hash] = {k: t.max(axis=0) for k, t in v.items()}
204
+ return hessian_score_per_image_per_layer
205
+
206
+ def _fetch_hessian_approximations(self, granularity: HessianScoresGranularity) -> Dict[BaseNode, List[List[float]]]:
176
207
  """
177
208
  Fetches hessian approximations for each target node.
178
209
 
179
210
  Returns:
180
211
  Mapping of target nodes to their hessian approximations.
181
212
  """
182
- approximations = {}
183
- hessian_scores_request = HessianScoresRequest(
184
- mode=HessianMode.ACTIVATION,
185
- granularity=HessianScoresGranularity.PER_TENSOR,
186
- target_nodes=self.compare_points
187
- )
213
+ hessian_scores_request = self._build_hessian_request(granularity)
214
+
188
215
  node_approximations = self.hessian_service.fetch_hessian(
189
216
  hessian_scores_request=hessian_scores_request,
190
217
  required_size=self.gptq_config.hessian_weights_config.hessians_num_samples,
191
218
  batch_size=self.gptq_config.hessian_weights_config.hessian_batch_size
192
219
  )
220
+ return node_approximations
193
221
 
194
- for i, target_node in enumerate(self.compare_points):
195
- approximations[target_node] = node_approximations[i]
196
-
197
- return approximations
222
+ def _build_hessian_request(self, granularity):
223
+ return HessianScoresRequest(
224
+ mode=HessianMode.ACTIVATION,
225
+ granularity=granularity,
226
+ target_nodes=self.compare_points,
227
+ distribution=self.gptq_config.hessian_weights_config.estimator_distribution
228
+ )
198
229
 
199
230
  def _process_hessian_approximations(self, approximations: Dict[BaseNode, List[List[float]]]) -> List:
200
231
  """
@@ -13,8 +13,10 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
  from typing import List
16
+
16
17
  import torch
17
18
 
19
+
18
20
  def mse_loss(y: torch.Tensor, x: torch.Tensor, normalized: bool = True) -> torch.Tensor:
19
21
  """
20
22
  Compute the MSE of two tensors.
@@ -25,7 +27,7 @@ def mse_loss(y: torch.Tensor, x: torch.Tensor, normalized: bool = True) -> torch
25
27
  Returns:
26
28
  The MSE of two tensors.
27
29
  """
28
- loss = torch.nn.MSELoss()(x,y)
30
+ loss = torch.nn.MSELoss()(x, y)
29
31
  return loss / torch.mean(torch.square(x)) if normalized else loss
30
32
 
31
33
 
@@ -62,3 +64,36 @@ def multiple_tensors_mse_loss(y_list: List[torch.Tensor],
62
64
  else:
63
65
  return torch.mean(torch.stack(loss_values_list))
64
66
 
67
+
68
+ def sample_layer_attention_loss(y_list: List[torch.Tensor],
69
+ x_list: List[torch.Tensor],
70
+ fxp_w_list,
71
+ flp_w_list,
72
+ act_bn_mean,
73
+ act_bn_std,
74
+ loss_weights: torch.Tensor) -> torch.Tensor:
75
+ """
76
+ Compute Sample Layer Attention loss between two lists of tensors.
77
+
78
+ Args:
79
+ y_list: First list of tensors.
80
+ x_list: Second list of tensors.
81
+ fxp_w_list, flp_w_list, act_bn_mean, act_bn_std: unused (needed to comply with the interface).
82
+ loss_weights: layer-sample weights tensor of shape (layers, batch)
83
+
84
+ Returns:
85
+ Sample Layer Attention loss (a scalar).
86
+ """
87
+ loss = 0
88
+ layers_mean_w = []
89
+
90
+ for i, (y, x, w) in enumerate(zip(y_list, x_list, loss_weights)):
91
+ norm = (y - x).pow(2).sum(1)
92
+ if len(norm.shape) > 1:
93
+ norm = norm.flatten(1).mean(1)
94
+ loss += torch.mean(w * norm)
95
+ layers_mean_w.append(w.mean())
96
+
97
+ loss = loss / torch.stack(layers_mean_w).max()
98
+ return loss
99
+