mct-nightly 2.0.0.20240505.435__tar.gz → 2.0.0.20240507.417__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (496) hide show
  1. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/PKG-INFO +1 -1
  2. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/mct_nightly.egg-info/PKG-INFO +1 -1
  3. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/__init__.py +1 -1
  4. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/base_graph.py +3 -3
  5. mct-nightly-2.0.0.20240507.417/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +247 -0
  6. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +1 -1
  7. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +1 -1
  8. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/pruner.py +1 -1
  9. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/pruning_info.py +1 -1
  10. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +2 -4
  11. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantization_config.py +2 -1
  12. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +1 -1
  13. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/substitutions/remove_identity.py +4 -1
  14. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +3 -3
  15. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +1 -1
  16. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/default_framework_info.py +1 -1
  17. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +1 -1
  18. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +1 -1
  19. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +1 -1
  20. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +1 -1
  21. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/hessian/trace_hessian_calculator_keras.py +1 -1
  22. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/keras_implementation.py +4 -6
  23. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/keras_node_prior_info.py +1 -1
  24. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +11 -0
  25. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/reader/common.py +4 -4
  26. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -15
  27. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +1 -1
  28. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +1 -1
  29. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +1 -1
  30. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/hessian/trace_hessian_calculator_pytorch.py +1 -1
  31. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +5 -2
  32. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +12 -0
  33. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/runner.py +12 -1
  34. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +10 -11
  35. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/common/optimization_utils.py +9 -9
  36. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/common/gptq_training.py +3 -38
  37. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +1 -4
  38. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +1 -3
  39. mct-nightly-2.0.0.20240505.435/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -164
  40. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/LICENSE.md +0 -0
  41. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/README.md +0 -0
  42. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/mct_nightly.egg-info/SOURCES.txt +0 -0
  43. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/mct_nightly.egg-info/dependency_links.txt +0 -0
  44. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/mct_nightly.egg-info/requires.txt +0 -0
  45. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/mct_nightly.egg-info/top_level.txt +0 -0
  46. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/constants.py +0 -0
  47. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/__init__.py +0 -0
  48. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/analyzer.py +0 -0
  49. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/__init__.py +0 -0
  50. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
  51. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
  52. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
  53. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
  54. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
  55. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
  56. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
  57. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
  58. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
  59. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
  60. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/framework_info.py +0 -0
  61. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
  62. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
  63. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
  64. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
  65. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/edge.py +0 -0
  66. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
  67. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
  68. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
  69. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
  70. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
  71. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
  72. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
  73. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
  74. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
  75. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
  76. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
  77. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
  78. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
  79. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
  80. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/hessian/trace_hessian_calculator.py +0 -0
  81. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/hessian/trace_hessian_request.py +0 -0
  82. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
  83. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
  84. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
  85. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
  86. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/matchers/function.py +0 -0
  87. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
  88. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
  89. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/memory_computation.py +0 -0
  90. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
  91. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
  92. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
  93. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
  94. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
  95. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
  96. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
  97. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
  98. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
  99. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
  100. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
  101. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
  102. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
  103. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
  104. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
  105. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
  106. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
  107. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
  108. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
  109. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/model_collector.py +0 -0
  110. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/model_validation.py +0 -0
  111. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
  112. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
  113. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
  114. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
  115. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
  116. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
  117. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
  118. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
  119. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
  120. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
  121. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
  122. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
  123. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
  124. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
  125. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
  126. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
  127. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
  128. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
  129. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
  130. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
  131. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
  132. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
  133. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
  134. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
  135. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
  136. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
  137. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
  138. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
  139. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
  140. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
  141. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
  142. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
  143. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
  144. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
  145. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
  146. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
  147. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
  148. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
  149. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
  150. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
  151. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
  152. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
  153. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
  154. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
  155. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
  156. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
  157. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
  158. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
  159. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
  160. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
  161. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
  162. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
  163. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
  164. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
  165. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
  166. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
  167. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
  168. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
  169. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
  170. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
  171. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
  172. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/user_info.py +0 -0
  173. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
  174. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
  175. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
  176. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
  177. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
  178. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/__init__.py +0 -0
  179. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
  180. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
  181. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
  182. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
  183. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
  184. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/constants.py +0 -0
  185. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
  186. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
  187. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
  188. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  189. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  190. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  191. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
  192. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
  193. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
  194. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
  195. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  196. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  197. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
  198. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
  199. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
  200. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
  201. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  202. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  203. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
  204. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/hessian/activation_trace_hessian_calculator_keras.py +0 -0
  205. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/hessian/weights_trace_hessian_calculator_keras.py +0 -0
  206. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
  207. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
  208. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
  209. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
  210. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
  211. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
  212. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
  213. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
  214. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
  215. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
  216. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
  217. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
  218. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
  219. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
  220. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
  221. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
  222. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
  223. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
  224. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
  225. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
  226. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
  227. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
  228. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
  229. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
  230. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
  231. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
  232. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
  233. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
  234. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
  235. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
  236. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
  237. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
  238. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/constants.py +0 -0
  239. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
  240. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
  241. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
  242. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  243. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  244. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  245. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
  246. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
  247. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
  248. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
  249. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  250. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/permute_call_method.py +0 -0
  251. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  252. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
  253. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
  254. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
  255. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  256. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
  257. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  258. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  259. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
  260. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/hessian/activation_trace_hessian_calculator_pytorch.py +0 -0
  261. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/hessian/weights_trace_hessian_calculator_pytorch.py +0 -0
  262. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
  263. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
  264. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
  265. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
  266. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
  267. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
  268. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
  269. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
  270. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
  271. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
  272. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
  273. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
  274. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
  275. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
  276. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
  277. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
  278. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/pytorch/utils.py +0 -0
  279. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
  280. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/__init__.py +0 -0
  281. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
  282. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/common/constants.py +0 -0
  283. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
  284. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
  285. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/common/enums.py +0 -0
  286. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
  287. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
  288. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
  289. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
  290. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
  291. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
  292. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
  293. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
  294. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
  295. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
  296. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
  297. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
  298. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
  299. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
  300. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
  301. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
  302. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
  303. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
  304. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
  305. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
  306. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
  307. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
  308. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
  309. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
  310. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
  311. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/defaultdict.py +0 -0
  312. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/__init__.py +0 -0
  313. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
  314. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
  315. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
  316. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
  317. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
  318. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
  319. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
  320. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
  321. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
  322. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
  323. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
  324. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
  325. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
  326. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
  327. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
  328. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
  329. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
  330. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
  331. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
  332. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
  333. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
  334. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
  335. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
  336. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
  337. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
  338. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
  339. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
  340. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
  341. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
  342. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
  343. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
  344. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/__init__.py +0 -0
  345. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/common/__init__.py +0 -0
  346. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/common/gptq_config.py +0 -0
  347. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
  348. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
  349. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
  350. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
  351. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
  352. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
  353. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
  354. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
  355. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/quantization_facade.py +0 -0
  356. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
  357. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
  358. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
  359. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
  360. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +0 -0
  361. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
  362. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  363. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  364. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  365. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
  366. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  367. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
  368. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
  369. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
  370. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/gptq_training.py +0 -0
  371. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
  372. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +0 -0
  373. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
  374. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
  375. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
  376. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
  377. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +0 -0
  378. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
  379. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  380. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  381. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  382. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  383. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  384. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/gptq/runner.py +0 -0
  385. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/logger.py +0 -0
  386. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/metadata.py +0 -0
  387. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/pruning/__init__.py +0 -0
  388. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
  389. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
  390. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
  391. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
  392. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/ptq/__init__.py +0 -0
  393. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
  394. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
  395. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
  396. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
  397. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/ptq/runner.py +0 -0
  398. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/__init__.py +0 -0
  399. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/common/__init__.py +0 -0
  400. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/common/qat_config.py +0 -0
  401. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/keras/__init__.py +0 -0
  402. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
  403. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
  404. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +0 -0
  405. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
  406. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
  407. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
  408. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
  409. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
  410. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
  411. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  412. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
  413. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
  414. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
  415. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
  416. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py +0 -0
  417. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
  418. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
  419. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
  420. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
  421. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/pytorch/quantizer/quantizer_utils.py +0 -0
  422. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  423. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  424. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
  425. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
  426. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
  427. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
  428. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
  429. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
  430. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
  431. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
  432. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
  433. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
  434. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
  435. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
  436. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
  437. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
  438. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
  439. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
  440. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
  441. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
  442. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
  443. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
  444. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
  445. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
  446. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
  447. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
  448. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
  449. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
  450. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
  451. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
  452. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
  453. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
  454. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
  455. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
  456. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
  457. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
  458. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
  459. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
  460. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +0 -0
  461. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
  462. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
  463. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
  464. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +0 -0
  465. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
  466. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
  467. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
  468. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
  469. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
  470. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
  471. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
  472. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
  473. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
  474. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
  475. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
  476. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
  477. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
  478. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
  479. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
  480. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
  481. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
  482. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
  483. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
  484. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
  485. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
  486. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
  487. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
  488. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
  489. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
  490. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
  491. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
  492. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
  493. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
  494. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
  495. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/setup.cfg +0 -0
  496. {mct-nightly-2.0.0.20240505.435 → mct-nightly-2.0.0.20240507.417}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 2.0.0.20240505.435
3
+ Version: 2.0.0.20240507.417
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 2.0.0.20240505.435
3
+ Version: 2.0.0.20240507.417
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
27
27
  from model_compression_toolkit import pruning
28
28
  from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
29
29
 
30
- __version__ = "2.0.0.20240505.000435"
30
+ __version__ = "2.0.0.20240507.000417"
@@ -414,7 +414,7 @@ class Graph(nx.MultiDiGraph, GraphSearches):
414
414
 
415
415
  """
416
416
  if new_node is None:
417
- Logger.critical("Cannot replace input node with a None value; new input node is required.")
417
+ Logger.critical("Cannot replace input node with a None value; new input node is required.") # pragma: no cover
418
418
 
419
419
  graph_inputs = self.get_inputs()
420
420
  new_graph_inputs = copy(graph_inputs)
@@ -828,12 +828,12 @@ class Graph(nx.MultiDiGraph, GraphSearches):
828
828
 
829
829
  """
830
830
  if not fw_impl.is_node_entry_node(entry_node):
831
- Logger.critical(f"Node {entry_node} is not a valid entry node for creating a pruning section")
831
+ Logger.critical(f"Node {entry_node} is not a valid entry node for creating a pruning section") # pragma: no cover
832
832
 
833
833
  intermediate_nodes, exit_node = self._find_intermediate_and_exit_nodes(entry_node, fw_impl)
834
834
 
835
835
  if not fw_impl.is_node_exit_node(exit_node, entry_node, self.fw_info):
836
- Logger.critical(f"Node {exit_node} is not a valid exit node for the pruning section starting with {entry_node}.")
836
+ Logger.critical(f"Node {exit_node} is not a valid exit node for the pruning section starting with {entry_node}.") # pragma: no cover
837
837
 
838
838
  return PruningSection(entry_node=entry_node,
839
839
  intermediate_nodes=intermediate_nodes,
@@ -0,0 +1,247 @@
1
+ # Copyright 2022 Sony Semiconductor Israel, Inc. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ import numpy as np
16
+ from typing import Callable, Any, Dict, Tuple
17
+
18
+ from model_compression_toolkit.constants import FLOAT_BITWIDTH, BITS_TO_BYTES
19
+ from model_compression_toolkit.core import FrameworkInfo, ResourceUtilization, CoreConfig
20
+ from model_compression_toolkit.core.common import Graph
21
+ from model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation
22
+ from model_compression_toolkit.core.common.graph.edge import EDGE_SINK_INDEX
23
+ from model_compression_toolkit.core.graph_prep_runner import graph_preparation_runner
24
+ from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities, \
25
+ QuantizationConfigOptions
26
+
27
+
28
+ def compute_resource_utilization_data(in_model: Any,
29
+ representative_data_gen: Callable,
30
+ core_config: CoreConfig,
31
+ tpc: TargetPlatformCapabilities,
32
+ fw_info: FrameworkInfo,
33
+ fw_impl: FrameworkImplementation,
34
+ transformed_graph: Graph = None,
35
+ mixed_precision_enable: bool = True) -> ResourceUtilization:
36
+ """
37
+ Compute Resource Utilization information that can be relevant for defining target ResourceUtilization for mixed precision search.
38
+ Calculates maximal activation tensor size, the sum of the model's weight parameters and the total memory combining both weights
39
+ and maximal activation tensor size.
40
+
41
+ Args:
42
+ in_model: Model to build graph from (the model that intended to be quantized).
43
+ representative_data_gen: Dataset used for calibration.
44
+ core_config: CoreConfig containing parameters of how the model should be quantized.
45
+ tpc: TargetPlatformCapabilities object that models the inference target platform and
46
+ the attached framework operator's information.
47
+ fw_info: Information needed for quantization about the specific framework.
48
+ fw_impl: FrameworkImplementation object with a specific framework methods implementation.
49
+ transformed_graph: An internal graph representation of the input model. Defaults to None.
50
+ If no graph is provided, a graph will be constructed using the specified model.
51
+ mixed_precision_enable: Indicates if mixed precision is enabled, defaults to True.
52
+ If disabled, computes resource utilization using base quantization
53
+ configurations across all layers.
54
+
55
+ Returns:
56
+ ResourceUtilization: An object encapsulating the calculated resource utilization computations.
57
+
58
+
59
+ """
60
+
61
+ # We assume that the resource_utilization_data API is used to compute the model resource utilization for
62
+ # mixed precision scenario, so we run graph preparation under the assumption of enabled mixed precision.
63
+ if transformed_graph is None:
64
+ transformed_graph = graph_preparation_runner(in_model,
65
+ representative_data_gen,
66
+ core_config.quantization_config,
67
+ fw_info,
68
+ fw_impl,
69
+ tpc,
70
+ mixed_precision_enable=mixed_precision_enable)
71
+
72
+ # Compute parameters sum
73
+ weights_memory_bytes, weights_params = compute_nodes_weights_params(graph=transformed_graph, fw_info=fw_info)
74
+ total_weights_params = 0 if len(weights_params) == 0 else sum(weights_params)
75
+
76
+ # Compute max activation tensor
77
+ activation_output_sizes_bytes, activation_output_sizes = compute_activation_output_sizes(graph=transformed_graph)
78
+ max_activation_tensor_size = 0 if len(activation_output_sizes) == 0 else max(activation_output_sizes)
79
+
80
+ # Compute total memory utilization - parameters sum + max activation tensor
81
+ total_size = total_weights_params + max_activation_tensor_size
82
+
83
+ # Compute BOPS utilization - total count of bit-operations for all configurable layers with kernel
84
+ bops_count = compute_total_bops(graph=transformed_graph, fw_info=fw_info, fw_impl=fw_impl)
85
+ bops_count = np.inf if len(bops_count) == 0 else sum(bops_count)
86
+
87
+ return ResourceUtilization(weights_memory=total_weights_params,
88
+ activation_memory=max_activation_tensor_size,
89
+ total_memory=total_size,
90
+ bops=bops_count)
91
+
92
+
93
+ def compute_nodes_weights_params(graph: Graph, fw_info: FrameworkInfo) -> Tuple[np.ndarray, np.ndarray]:
94
+ """
95
+ Calculates the memory usage in bytes and the number of weight parameters for each node within a graph.
96
+ Memory calculations are based on the maximum bit-width used for quantization per node.
97
+
98
+ Args:
99
+ graph: A finalized Graph object, representing the model structure.
100
+ fw_info: FrameworkInfo object containing details about the specific framework's
101
+ quantization attributes for different layers' weights.
102
+
103
+ Returns:
104
+ A tuple containing two arrays:
105
+ - The first array represents the memory in bytes for each node's weights when quantized at the maximal bit-width.
106
+ - The second array represents the total number of weight parameters for each node.
107
+ """
108
+ weights_params = []
109
+ weights_memory_bytes = []
110
+ for n in graph.nodes:
111
+ # TODO: when enabling multiple attribute quantization by default (currently,
112
+ # only kernel quantization is enabled) we should include other attributes memory in the sum of all
113
+ # weights memory.
114
+ # When implementing this, we should just go over all attributes in the node instead of counting only kernels.
115
+ kernel_attr = fw_info.get_kernel_op_attributes(n.type)[0]
116
+ if kernel_attr is not None and not n.reuse:
117
+ kernel_candidates = n.get_all_weights_attr_candidates(kernel_attr)
118
+
119
+ if len(kernel_candidates) > 0 and any([c.enable_weights_quantization for c in kernel_candidates]):
120
+ max_weight_bits = max([kc.weights_n_bits for kc in kernel_candidates])
121
+ node_num_weights_params = 0
122
+ for attr in fw_info.get_kernel_op_attributes(n.type):
123
+ if attr is not None:
124
+ node_num_weights_params += n.get_weights_by_keys(attr).flatten().shape[0]
125
+
126
+ weights_params.append(node_num_weights_params)
127
+
128
+ # multiply num params by num bits and divide by BITS_TO_BYTES to convert from bits to bytes
129
+ weights_memory_bytes.append(node_num_weights_params * max_weight_bits / BITS_TO_BYTES)
130
+
131
+ return np.array(weights_memory_bytes), np.array(weights_params)
132
+
133
+ def compute_activation_output_sizes(graph: Graph) -> Tuple[np.ndarray, np.ndarray]:
134
+ """
135
+ Computes an array of the respective output tensor size and an array of the output tensor size in bytes for
136
+ each node.
137
+
138
+ Args:
139
+ graph: A finalized Graph object, representing the model structure.
140
+
141
+ Returns:
142
+ A tuple containing two arrays:
143
+ - The first array represents the size of each node's activation output tensor size in bytes,
144
+ calculated using the maximal bit-width for quantization.
145
+ - The second array represents the size of each node's activation output tensor size.
146
+
147
+
148
+ """
149
+
150
+ activation_outputs = []
151
+ activation_outputs_bytes = []
152
+ for n in graph.nodes:
153
+ # Go over all nodes that have configurable activation.
154
+ if n.has_activation_quantization_enabled_candidate():
155
+ # Fetch maximum bits required for quantizing activations
156
+ max_activation_bits = max([qc.activation_quantization_cfg.activation_n_bits for qc in n.candidates_quantization_cfg])
157
+ node_output_size = n.get_total_output_params()
158
+ activation_outputs.append(node_output_size)
159
+ # Calculate activation size in bytes and append to list
160
+ activation_outputs_bytes.append(node_output_size * max_activation_bits / BITS_TO_BYTES)
161
+
162
+ return np.array(activation_outputs_bytes), np.array(activation_outputs)
163
+
164
+
165
+ def compute_total_bops(graph: Graph, fw_info: FrameworkInfo, fw_impl: FrameworkImplementation) -> np.ndarray:
166
+ """
167
+ Computes a vector with the respective Bit-operations count for each configurable node that includes MAC operations.
168
+ The computation assumes that the graph is a representation of a float model, thus, BOPs computation uses 32-bit.
169
+
170
+ Args:
171
+ graph: Finalized Graph object.
172
+ fw_info: FrameworkInfo object about the specific framework
173
+ (e.g., attributes of different layers' weights to quantize).
174
+ fw_impl: FrameworkImplementation object with a specific framework methods implementation.
175
+
176
+ Returns: A vector of nodes' Bit-operations count.
177
+
178
+ """
179
+
180
+ bops = []
181
+
182
+ # Go over all configurable nodes that have kernels.
183
+ for n in graph.get_topo_sorted_nodes():
184
+ if n.has_kernel_weight_to_quantize(fw_info):
185
+ # If node doesn't have weights then its MAC count is 0, and we shouldn't consider it in the BOPS count.
186
+ incoming_edges = graph.incoming_edges(n, sort_by_attr=EDGE_SINK_INDEX)
187
+ assert len(incoming_edges) == 1, f"Can't compute BOPS metric for node {n.name} with multiple inputs."
188
+
189
+ node_mac = fw_impl.get_node_mac_operations(n, fw_info)
190
+
191
+ node_bops = (FLOAT_BITWIDTH ** 2) * node_mac
192
+ bops.append(node_bops)
193
+
194
+ return np.array(bops)
195
+
196
+
197
+ def requires_mixed_precision(in_model: Any,
198
+ target_resource_utilization: ResourceUtilization,
199
+ representative_data_gen: Callable,
200
+ core_config: CoreConfig,
201
+ tpc: TargetPlatformCapabilities,
202
+ fw_info: FrameworkInfo,
203
+ fw_impl: FrameworkImplementation) -> bool:
204
+ """
205
+ The function checks whether the model requires mixed precision to meet the requested target resource utilization.
206
+ This is determined by whether the target memory usage of the weights is less than the available memory,
207
+ the target maximum size of an activation tensor is less than the available memory,
208
+ and the target number of BOPs is less than the available BOPs.
209
+ If any of these conditions are met, the function returns True. Otherwise, it returns False.
210
+
211
+ Args:
212
+ in_model: The model to be evaluated.
213
+ target_resource_utilization: The resource utilization of the target device.
214
+ representative_data_gen: A function that generates representative data for the model.
215
+ core_config: CoreConfig containing parameters of how the model should be quantized.
216
+ tpc: TargetPlatformCapabilities object that models the inference target platform and
217
+ the attached framework operator's information.
218
+ fw_info: Information needed for quantization about the specific framework.
219
+ fw_impl: FrameworkImplementation object with a specific framework methods implementation.
220
+
221
+ Returns: A boolean indicating if mixed precision is needed.
222
+ """
223
+ is_mixed_precision = False
224
+ transformed_graph = graph_preparation_runner(in_model,
225
+ representative_data_gen,
226
+ core_config.quantization_config,
227
+ fw_info,
228
+ fw_impl,
229
+ tpc,
230
+ mixed_precision_enable=False)
231
+ # Compute max weights memory in bytes
232
+ weights_memory_by_layer_bytes, _ = compute_nodes_weights_params(transformed_graph, fw_info)
233
+ total_weights_memory_bytes = 0 if len(weights_memory_by_layer_bytes) == 0 else sum(weights_memory_by_layer_bytes)
234
+
235
+ # Compute max activation tensor in bytes
236
+ activation_output_sizes_bytes, _ = compute_activation_output_sizes(transformed_graph)
237
+ max_activation_tensor_size_bytes = 0 if len(activation_output_sizes_bytes) == 0 else max(activation_output_sizes_bytes)
238
+
239
+ # Compute BOPS utilization - total count of bit-operations for all configurable layers with kernel
240
+ bops_count = compute_total_bops(graph=transformed_graph, fw_info=fw_info, fw_impl=fw_impl)
241
+ bops_count = np.inf if len(bops_count) == 0 else sum(bops_count)
242
+
243
+ is_mixed_precision |= target_resource_utilization.weights_memory < total_weights_memory_bytes
244
+ is_mixed_precision |= target_resource_utilization.activation_memory < max_activation_tensor_size_bytes
245
+ is_mixed_precision |= target_resource_utilization.total_memory < total_weights_memory_bytes + max_activation_tensor_size_bytes
246
+ is_mixed_precision |= target_resource_utilization.bops < bops_count
247
+ return is_mixed_precision
@@ -73,7 +73,7 @@ class PerChannelMask:
73
73
  mask_indicator: The new value to set in the mask (either PRUNED or REMAINED).
74
74
  """
75
75
  if mask_indicator not in [MaskIndicator.PRUNED, MaskIndicator.REMAINED]:
76
- Logger.critical("Mask value must be either 'MaskIndicator.PRUNED' or 'MaskIndicator.REMAINED'")
76
+ Logger.critical("Mask value must be either 'MaskIndicator.PRUNED' or 'MaskIndicator.REMAINED'") # pragma: no cover
77
77
  self._mask[node][channel_idx] = mask_indicator.value
78
78
 
79
79
  def has_pruned_channel(self) -> bool:
@@ -79,7 +79,7 @@ class PerSIMDGroupMask:
79
79
  mask_indicator: The new value to set in the mask (either PRUNED or REMAINED).
80
80
  """
81
81
  if mask_indicator not in [MaskIndicator.PRUNED, MaskIndicator.REMAINED]:
82
- Logger.critical("Mask value must be either 'MaskIndicator.PRUNED' or 'MaskIndicator.REMAINED'")
82
+ Logger.critical("Mask value must be either 'MaskIndicator.PRUNED' or 'MaskIndicator.REMAINED'") # pragma: no cover
83
83
 
84
84
  # Update the SIMD group mask and corresponding per-channel mask
85
85
  self._mask_simd[node][group_index] = mask_indicator.value
@@ -92,7 +92,7 @@ class Pruner:
92
92
  mask_calculator.compute_mask()
93
93
  self.per_oc_mask = mask_calculator.get_mask()
94
94
  else:
95
- Logger.critical("Only GREEDY ChannelsFilteringStrategy is currently supported.")
95
+ Logger.critical("Only GREEDY ChannelsFilteringStrategy is currently supported.") # pragma: no cover
96
96
 
97
97
  Logger.info("Start pruning graph...")
98
98
  _pruned_graph = build_pruned_graph(self.float_graph,
@@ -76,7 +76,7 @@ def unroll_simd_scores_to_per_channel_scores(simd_scores: Dict[BaseNode, np.ndar
76
76
  """
77
77
  if simd_scores is None or simd_groups_indices is None:
78
78
  Logger.critical(f"Failed to find scores and indices to create unrolled scores for pruning information."
79
- f" Scores: {simd_scores}, Group indices: {simd_groups_indices}.")
79
+ f" Scores: {simd_scores}, Group indices: {simd_groups_indices}.") # pragma: no cover
80
80
  _scores = {}
81
81
  for node, groups_indices in simd_groups_indices.items():
82
82
  node_scores = simd_scores[node]
@@ -65,10 +65,8 @@ class BaseNodeQuantizationConfig(object):
65
65
  """
66
66
  Returns: String to display a NodeQuantizationConfig object.
67
67
  """
68
- repr_str = ''
69
- for k, v in self.__dict__.items():
70
- repr_str += f'{k}: {v}\n'
71
- return repr_str
68
+ # Used for debugging, thus no cover.
69
+ return ''.join(f'{k}: {v}\n' for k, v in self.__dict__.items()) # pragma: no cover
72
70
 
73
71
 
74
72
  class NodeActivationQuantizationConfig(BaseNodeQuantizationConfig):
@@ -124,7 +124,8 @@ class QuantizationConfig:
124
124
  self.concat_threshold_update = concat_threshold_update
125
125
 
126
126
  def __repr__(self):
127
- return str(self.__dict__)
127
+ # Used for debugging, thus no cover.
128
+ return str(self.__dict__) # pragma: no cover
128
129
 
129
130
 
130
131
  # Default quantization configuration the library use.
@@ -44,6 +44,6 @@ def get_weights_quantization_fn(weights_quantization_method: QuantizationMethod)
44
44
  quantizer_fn = lut_kmeans_quantizer
45
45
  else:
46
46
  Logger.critical(
47
- f"No quantizer function found for the specified quantization method: {weights_quantization_method}")
47
+ f"No quantizer function found for the specified quantization method: {weights_quantization_method}") # pragma: no cover
48
48
 
49
49
  return quantizer_fn
@@ -15,6 +15,7 @@
15
15
 
16
16
  from model_compression_toolkit.core.common.graph.base_graph import Graph, OutTensor
17
17
  from model_compression_toolkit.core.common.graph.base_node import BaseNode
18
+ from model_compression_toolkit.logger import Logger
18
19
 
19
20
 
20
21
  def remove_identity_node(graph: Graph,
@@ -36,7 +37,9 @@ def remove_identity_node(graph: Graph,
36
37
 
37
38
  # Ensure there is exactly one predecessor; otherwise, do nothing.
38
39
  if len(prev_identity_nodes) != 1:
39
- return graph
40
+ # We do not expect to get here.
41
+ Logger.error(f"Identity node {node} have {len(prev_identity_nodes)} inputs, while expected to have one. Skipping remove identity substitution.") # pragma: no cover
42
+ return graph # pragma: no cover
40
43
 
41
44
  graph_outputs = graph.get_outputs()
42
45
  for i, g_out in enumerate(graph_outputs):
@@ -25,9 +25,9 @@ if version.parse(tf.__version__) >= version.parse("2.13"):
25
25
  from keras.src.layers.core import TFOpLambda
26
26
  from keras.src.engine.base_layer import TensorFlowOpLayer, Layer
27
27
  else:
28
- from keras import Input
29
- from keras.layers.core import TFOpLambda
30
- from keras.engine.base_layer import TensorFlowOpLayer, Layer
28
+ from keras import Input # pragma: no cover
29
+ from keras.layers.core import TFOpLambda # pragma: no cover
30
+ from keras.engine.base_layer import TensorFlowOpLayer, Layer # pragma: no cover
31
31
 
32
32
  from typing import Any, Dict, List, Tuple, Callable
33
33
  from tensorflow.python.util.object_identity import Reference as TFReference
@@ -19,7 +19,7 @@ import tensorflow as tf
19
19
  if version.parse(tf.__version__) >= version.parse("2.13"):
20
20
  from keras.src.engine.base_layer import Layer
21
21
  else:
22
- from keras.engine.base_layer import Layer
22
+ from keras.engine.base_layer import Layer # pragma: no cover
23
23
 
24
24
  from keras.models import Model
25
25
  from mct_quantizers import KerasQuantizationWrapper, KerasActivationQuantizationHolder, QuantizationTarget
@@ -22,7 +22,7 @@ from packaging import version
22
22
  if version.parse(tf.__version__) >= version.parse("2.13"):
23
23
  from keras.src.layers import Conv2D, DepthwiseConv2D, Dense, Conv2DTranspose, Softmax, ELU
24
24
  else:
25
- from keras.layers import Conv2D, DepthwiseConv2D, Dense, Conv2DTranspose, Softmax, ELU
25
+ from keras.layers import Conv2D, DepthwiseConv2D, Dense, Conv2DTranspose, Softmax, ELU # pragma: no cover
26
26
 
27
27
  from model_compression_toolkit.defaultdict import DefaultDict
28
28
  from model_compression_toolkit.core.common.framework_info import FrameworkInfo
@@ -66,7 +66,7 @@ class ActivationDecomposition(common.BaseSubstitution):
66
66
  if ACTIVATION not in op2d_node.framework_attr:
67
67
  Logger.warning(f'Op2d node {op2d_node.name} of type {op2d_node.type} is missing an "{ACTIVATION}"'
68
68
  f' attribute -> Skipping substitution ActivationDecomposition') # pragma: no cover
69
- return graph
69
+ return graph # pragma: no cover
70
70
 
71
71
  activation_node_name = op2d_node.name + '_post_activation'
72
72
 
@@ -104,7 +104,7 @@ def conv2d_collapsing_fn(first_node: BaseNode,
104
104
 
105
105
  return kernel_collapsed, bias_collapsed
106
106
  else:
107
- Logger.critical(f"Layer collapsing unsupported for combination: {first_node.type} and {second_node.type}.")
107
+ Logger.critical(f"Layer collapsing unsupported for combination: {first_node.type} and {second_node.type}.") # pragma: no cover
108
108
 
109
109
 
110
110
  def keras_linear_collapsing() -> Conv2DCollapsing:
@@ -62,7 +62,7 @@ def residual_collapsing_fn(first_node: BaseNode,
62
62
 
63
63
  return kernel
64
64
  else:
65
- Logger.critical(f"Residual collapsing is unsupported for {first_node.type} node types.")
65
+ Logger.critical(f"Residual collapsing is unsupported for {first_node.type} node types.") # pragma: no cover
66
66
 
67
67
 
68
68
  def keras_residual_collapsing() -> ResidualCollapsing:
@@ -22,7 +22,7 @@ import tensorflow as tf
22
22
  from tensorflow.python.keras.layers.core import TFOpLambda
23
23
  if version.parse(tf.__version__) >= version.parse("2.13"):
24
24
  from keras.src.layers import Activation, Conv2D, Dense, DepthwiseConv2D, ZeroPadding2D, Reshape, \
25
- GlobalAveragePooling2D, Dropout, ReLU, PReLU, ELU
25
+ GlobalAveragePooling2D, Dropout, ReLU, PReLU, ELU # pragma: no cover
26
26
  else:
27
27
  from tensorflow.keras.layers import Activation, Conv2D, Dense, DepthwiseConv2D, ZeroPadding2D, Reshape, \
28
28
  GlobalAveragePooling2D, Dropout, ReLU, PReLU, ELU
@@ -74,6 +74,6 @@ class TraceHessianCalculatorKeras(TraceHessianCalculator):
74
74
  concat_axis_dim = [o.shape[0] for o in _r_tensors]
75
75
  if not all(d == concat_axis_dim[0] for d in concat_axis_dim):
76
76
  Logger.critical(
77
- "Unable to concatenate tensors for gradient calculation due to mismatched shapes along the first axis.")# pragma: no cover
77
+ "Unable to concatenate tensors for gradient calculation due to mismatched shapes along the first axis.") # pragma: no cover
78
78
 
79
79
  return tf.concat(_r_tensors, axis=1)
@@ -51,13 +51,11 @@ from model_compression_toolkit.core.keras.statistics_correction.apply_second_mom
51
51
  from packaging import version
52
52
 
53
53
  if version.parse(tf.__version__) >= version.parse("2.13"):
54
- from keras.src.layers import Dense, Activation, Conv2D, DepthwiseConv2D, Conv2DTranspose, \
55
- Concatenate, Add
54
+ from keras.src.layers import Dense, Activation, Conv2D, DepthwiseConv2D, Conv2DTranspose, Concatenate, Add
56
55
  from keras.src.layers.core import TFOpLambda
57
56
  else:
58
- from keras.layers import Dense, Activation, Conv2D, DepthwiseConv2D, Conv2DTranspose, \
59
- Concatenate, Add
60
- from keras.layers.core import TFOpLambda
57
+ from keras.layers import Dense, Activation, Conv2D, DepthwiseConv2D, Conv2DTranspose, Concatenate, Add # pragma: no cover
58
+ from keras.layers.core import TFOpLambda # pragma: no cover
61
59
 
62
60
  from model_compression_toolkit.core import QuantizationConfig, FrameworkInfo, CoreConfig, MixedPrecisionQuantizationConfig
63
61
  from model_compression_toolkit.core import common
@@ -489,7 +487,7 @@ class KerasImplementation(FrameworkImplementation):
489
487
  fw_impl=self,
490
488
  num_iterations_for_approximation=num_iterations_for_approximation)
491
489
  else:
492
- Logger.critical(f"Unsupported Hessian mode for Keras: {trace_hessian_request.mode}.")
490
+ Logger.critical(f"Unsupported Hessian mode for Keras: {trace_hessian_request.mode}.") # pragma: no cover
493
491
 
494
492
  def is_output_node_compatible_for_hessian_score_computation(self,
495
493
  node: BaseNode) -> Any:
@@ -6,7 +6,7 @@ from packaging import version
6
6
  if version.parse(tf.__version__) >= version.parse("2.13"):
7
7
  from keras.src.layers import Activation, ReLU, BatchNormalization
8
8
  else:
9
- from keras.layers import Activation, ReLU, BatchNormalization
9
+ from keras.layers import Activation, ReLU, BatchNormalization # pragma: no cover
10
10
 
11
11
  from model_compression_toolkit.core import FrameworkInfo
12
12
  from model_compression_toolkit.core.common import BaseNode
@@ -24,6 +24,17 @@ from model_compression_toolkit.logger import Logger
24
24
  from model_compression_toolkit.constants import THRESHOLD, SIGNED, RANGE_MIN, RANGE_MAX
25
25
  from model_compression_toolkit.core.common.quantization.quantizers.uniform_quantizers import threshold_is_power_of_two
26
26
 
27
+ ################################################################
28
+ ################################################################
29
+ # TODO:
30
+ # These quantizer functions are for internal use. They are currently
31
+ # used in some features like MP for activation and SNC (where
32
+ # inference in the framework is needed).
33
+ # It may worth considering removing these functions and use
34
+ # activation inferable quantizers in those features like we do
35
+ # in GPTQ.
36
+ ################################################################
37
+ ################################################################
27
38
 
28
39
  def quantizer_min_max_calculator(threshold: np.ndarray,
29
40
  num_bits: int,
@@ -24,10 +24,10 @@ if version.parse(tf.__version__) >= version.parse("2.13"):
24
24
  from keras.src.engine.functional import Functional
25
25
  from keras.src.engine.sequential import Sequential
26
26
  else:
27
- from keras.engine.input_layer import InputLayer
28
- from keras.engine.node import Node as KerasNode
29
- from keras.engine.functional import Functional
30
- from keras.engine.sequential import Sequential
27
+ from keras.engine.input_layer import InputLayer # pragma: no cover
28
+ from keras.engine.node import Node as KerasNode # pragma: no cover
29
+ from keras.engine.functional import Functional # pragma: no cover
30
+ from keras.engine.sequential import Sequential # pragma: no cover
31
31
 
32
32
  from model_compression_toolkit.logger import Logger
33
33
  from model_compression_toolkit.core.common.graph.base_node import BaseNode
@@ -38,18 +38,3 @@ def node_builder(n: BaseNode) -> Module:
38
38
  return node_instance
39
39
 
40
40
 
41
- # todo: remove. It is not used anymore
42
- def identity_wrapper(node: BaseNode,
43
- module: Module,
44
- include_activation_quantizers: bool):
45
- """
46
- A function which takes a computational graph node and a pytorch module and return an identity wrapping which return the layer itself
47
- Args:
48
- node: A node of mct graph.
49
- layer: A pytorch module
50
- include_activation_quantizers: bool flag.
51
- Returns: pytorch module
52
- """
53
- return module
54
-
55
-
@@ -27,7 +27,7 @@ from model_compression_toolkit.core.common.back2framework.base_model_builder imp
27
27
  from model_compression_toolkit.core.common.graph.edge import EDGE_SINK_INDEX
28
28
  from model_compression_toolkit.core.common.graph.functional_node import FunctionalNode
29
29
  from model_compression_toolkit.core.common.user_info import UserInformation
30
- from model_compression_toolkit.core.pytorch.back2framework.instance_builder import node_builder, identity_wrapper
30
+ from model_compression_toolkit.core.pytorch.back2framework.instance_builder import node_builder
31
31
  from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
32
32
  from model_compression_toolkit.core.pytorch.pytorch_device_config import get_working_device
33
33
  from model_compression_toolkit.core.pytorch.reader.node_holders import DummyPlaceHolder
@@ -101,7 +101,7 @@ def conv2d_collapsing_fn(first_node: BaseNode,
101
101
 
102
102
  return kernel_collapsed, bias_collapsed
103
103
  else:
104
- Logger.critical(f"Layer collapsing is not supported for the combination of {first_node.type} and {second_node.type}.")
104
+ Logger.critical(f"Layer collapsing is not supported for the combination of {first_node.type} and {second_node.type}.") # pragma: no cover
105
105
 
106
106
 
107
107
  def pytorch_linear_collapsing() -> Conv2DCollapsing:
@@ -58,7 +58,7 @@ def residual_collapsing_fn(first_node: BaseNode,
58
58
  kernel[i, i, idxH, idxW] += 1
59
59
  return kernel
60
60
  else:
61
- Logger.critical(f"Residual collapsing not supported for node type: {first_node.type}")
61
+ Logger.critical(f"Residual collapsing not supported for node type: {first_node.type}") # pragma: no cover
62
62
 
63
63
 
64
64
  def pytorch_residual_collapsing() -> ResidualCollapsing:
@@ -65,6 +65,6 @@ class TraceHessianCalculatorPytorch(TraceHessianCalculator):
65
65
  concat_axis_dim = [o.shape[0] for o in _r_tensors]
66
66
  if not all(d == concat_axis_dim[0] for d in concat_axis_dim):
67
67
  Logger.critical(
68
- "Unable to concatenate tensors for gradient calculation due to mismatched shapes along the first axis.")
68
+ "Unable to concatenate tensors for gradient calculation due to mismatched shapes along the first axis.") # pragma: no cover
69
69
 
70
70
  return torch.concat(_r_tensors, dim=1)
@@ -87,13 +87,16 @@ class DeviceManager:
87
87
  device_index = int(device_name.split(':')[1])
88
88
  if device_index >= torch.cuda.device_count():
89
89
  return False, f"CUDA device index {device_index} out of range. Number of valid devices: {torch.cuda.device_count()}"
90
- except IndexError:
90
+ except Exception:
91
91
  # Handle cases where the device name is incorrectly formatted
92
92
  return False, "Invalid CUDA device format. Use 'cuda' or 'cuda:x' where x is the device index."
93
93
 
94
94
  return True, "Valid device"
95
95
 
96
- return True, "Valid device"
96
+ if CPU in device_name:
97
+ return True, "Valid device"
98
+
99
+ return False, "Invalid device"
97
100
 
98
101
 
99
102