mct-nightly 2.0.0.20240522.420__tar.gz → 2.0.0.20240523.418__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (495) hide show
  1. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/PKG-INFO +23 -23
  2. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/README.md +22 -22
  3. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/mct_nightly.egg-info/PKG-INFO +23 -23
  4. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/__init__.py +1 -1
  5. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/framework_implementation.py +6 -4
  6. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/base_node.py +6 -2
  7. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +10 -5
  8. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/keras_implementation.py +6 -4
  9. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +1 -2
  10. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +6 -4
  11. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/LICENSE.md +0 -0
  12. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/mct_nightly.egg-info/SOURCES.txt +0 -0
  13. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/mct_nightly.egg-info/dependency_links.txt +0 -0
  14. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/mct_nightly.egg-info/requires.txt +0 -0
  15. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/mct_nightly.egg-info/top_level.txt +0 -0
  16. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/constants.py +0 -0
  17. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/__init__.py +0 -0
  18. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/analyzer.py +0 -0
  19. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/__init__.py +0 -0
  20. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
  21. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
  22. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
  23. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
  24. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
  25. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
  26. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
  27. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
  28. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
  29. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/framework_info.py +0 -0
  30. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
  31. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
  32. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
  33. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
  34. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/edge.py +0 -0
  35. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
  36. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
  37. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
  38. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
  39. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
  40. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
  41. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
  42. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
  43. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
  44. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
  45. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
  46. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
  47. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
  48. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
  49. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/hessian/trace_hessian_calculator.py +0 -0
  50. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/hessian/trace_hessian_request.py +0 -0
  51. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
  52. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
  53. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
  54. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
  55. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/matchers/function.py +0 -0
  56. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
  57. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
  58. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/memory_computation.py +0 -0
  59. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
  60. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
  61. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
  62. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
  63. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
  64. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
  65. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
  66. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
  67. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py +0 -0
  68. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +0 -0
  69. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +0 -0
  70. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +0 -0
  71. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +0 -0
  72. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +0 -0
  73. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
  74. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
  75. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
  76. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
  77. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
  78. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/model_collector.py +0 -0
  79. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/model_validation.py +0 -0
  80. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
  81. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
  82. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
  83. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
  84. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
  85. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
  86. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
  87. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
  88. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
  89. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
  90. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
  91. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
  92. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
  93. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
  94. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
  95. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
  96. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
  97. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
  98. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
  99. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
  100. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
  101. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
  102. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
  103. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
  104. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
  105. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
  106. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
  107. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
  108. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
  109. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
  110. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
  111. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
  112. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
  113. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
  114. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
  115. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
  116. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
  117. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
  118. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
  119. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
  120. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
  121. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
  122. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
  123. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
  124. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
  125. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
  126. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
  127. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
  128. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
  129. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
  130. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
  131. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
  132. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
  133. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
  134. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
  135. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
  136. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
  137. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
  138. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
  139. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
  140. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
  141. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
  142. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/substitutions/remove_identity.py +0 -0
  143. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
  144. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
  145. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
  146. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
  147. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
  148. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
  149. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/user_info.py +0 -0
  150. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
  151. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
  152. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
  153. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
  154. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
  155. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/__init__.py +0 -0
  156. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
  157. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
  158. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
  159. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
  160. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
  161. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
  162. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
  163. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/constants.py +0 -0
  164. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
  165. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
  166. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
  167. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
  168. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
  169. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  170. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  171. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  172. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
  173. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
  174. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
  175. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  176. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
  177. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  178. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  179. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_identity.py +0 -0
  180. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  181. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
  182. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
  183. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  184. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
  185. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  186. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  187. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
  188. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/hessian/activation_trace_hessian_calculator_keras.py +0 -0
  189. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/hessian/trace_hessian_calculator_keras.py +0 -0
  190. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/hessian/weights_trace_hessian_calculator_keras.py +0 -0
  191. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
  192. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
  193. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
  194. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
  195. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
  196. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
  197. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
  198. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
  199. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
  200. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
  201. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
  202. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
  203. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/reader/common.py +0 -0
  204. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
  205. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
  206. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
  207. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
  208. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
  209. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
  210. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
  211. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
  212. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +0 -0
  213. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
  214. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
  215. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
  216. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
  217. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
  218. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
  219. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
  220. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
  221. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
  222. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
  223. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
  224. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
  225. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
  226. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
  227. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/constants.py +0 -0
  228. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
  229. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
  230. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
  231. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  232. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  233. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  234. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/concat_threshold_update.py +0 -0
  235. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
  236. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
  237. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
  238. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  239. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  240. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/permute_call_method.py +0 -0
  241. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  242. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/remove_identity.py +0 -0
  243. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
  244. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  245. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
  246. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  247. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
  248. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  249. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  250. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
  251. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/hessian/activation_trace_hessian_calculator_pytorch.py +0 -0
  252. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/hessian/trace_hessian_calculator_pytorch.py +0 -0
  253. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/hessian/weights_trace_hessian_calculator_pytorch.py +0 -0
  254. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
  255. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
  256. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
  257. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
  258. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
  259. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
  260. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
  261. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
  262. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
  263. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
  264. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
  265. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
  266. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
  267. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
  268. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py +0 -0
  269. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
  270. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
  271. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/pytorch/utils.py +0 -0
  272. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
  273. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/core/runner.py +0 -0
  274. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/__init__.py +0 -0
  275. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
  276. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/common/constants.py +0 -0
  277. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
  278. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
  279. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/common/enums.py +0 -0
  280. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
  281. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
  282. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
  283. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
  284. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
  285. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
  286. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
  287. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
  288. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
  289. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
  290. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
  291. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
  292. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
  293. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
  294. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
  295. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
  296. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
  297. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
  298. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
  299. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
  300. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
  301. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
  302. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
  303. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
  304. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
  305. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
  306. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
  307. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/defaultdict.py +0 -0
  308. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/__init__.py +0 -0
  309. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
  310. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
  311. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
  312. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
  313. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
  314. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
  315. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
  316. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
  317. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
  318. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
  319. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
  320. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
  321. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
  322. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
  323. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
  324. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
  325. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
  326. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
  327. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
  328. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/__init__.py +0 -0
  329. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_wrapper/fw_agnostic/get_inferable_quantizers.py +0 -0
  330. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
  331. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
  332. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
  333. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
  334. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
  335. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
  336. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
  337. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
  338. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
  339. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
  340. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/__init__.py +0 -0
  341. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/common/__init__.py +0 -0
  342. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/common/gptq_config.py +0 -0
  343. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
  344. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
  345. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
  346. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/common/gptq_training.py +0 -0
  347. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
  348. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
  349. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
  350. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/gptq_training.py +0 -0
  351. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
  352. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/quantization_facade.py +0 -0
  353. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
  354. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
  355. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
  356. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +0 -0
  357. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +0 -0
  358. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
  359. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  360. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  361. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  362. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
  363. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  364. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
  365. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
  366. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
  367. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/gptq_training.py +0 -0
  368. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
  369. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +0 -0
  370. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
  371. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
  372. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
  373. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +0 -0
  374. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +0 -0
  375. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
  376. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  377. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  378. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  379. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  380. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  381. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/gptq/runner.py +0 -0
  382. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/logger.py +0 -0
  383. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/metadata.py +0 -0
  384. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/pruning/__init__.py +0 -0
  385. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
  386. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
  387. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
  388. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
  389. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/ptq/__init__.py +0 -0
  390. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
  391. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/ptq/keras/quantization_facade.py +0 -0
  392. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
  393. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +0 -0
  394. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/ptq/runner.py +0 -0
  395. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/__init__.py +0 -0
  396. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/common/__init__.py +0 -0
  397. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/common/qat_config.py +0 -0
  398. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/keras/__init__.py +0 -0
  399. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
  400. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
  401. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +0 -0
  402. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
  403. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
  404. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
  405. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
  406. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
  407. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
  408. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  409. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
  410. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
  411. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
  412. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
  413. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py +0 -0
  414. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
  415. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
  416. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
  417. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
  418. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/pytorch/quantizer/quantizer_utils.py +0 -0
  419. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  420. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  421. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
  422. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
  423. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
  424. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
  425. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
  426. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
  427. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
  428. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
  429. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
  430. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
  431. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
  432. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
  433. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
  434. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
  435. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
  436. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
  437. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
  438. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
  439. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
  440. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
  441. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
  442. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
  443. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
  444. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
  445. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
  446. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
  447. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
  448. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
  449. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
  450. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
  451. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
  452. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
  453. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
  454. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
  455. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
  456. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/__init__.py +0 -0
  457. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tp_model.py +0 -0
  458. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_keras.py +0 -0
  459. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2/tpc_pytorch.py +0 -0
  460. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/__init__.py +0 -0
  461. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tp_model.py +0 -0
  462. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_keras.py +0 -0
  463. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v2_lut/tpc_pytorch.py +0 -0
  464. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
  465. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
  466. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
  467. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
  468. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
  469. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
  470. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
  471. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
  472. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
  473. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
  474. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
  475. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
  476. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
  477. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
  478. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
  479. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
  480. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
  481. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
  482. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
  483. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
  484. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
  485. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
  486. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
  487. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
  488. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
  489. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
  490. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
  491. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
  492. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
  493. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
  494. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/setup.cfg +0 -0
  495. {mct-nightly-2.0.0.20240522.420 → mct-nightly-2.0.0.20240523.418}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 2.0.0.20240522.420
3
+ Version: 2.0.0.20240523.418
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -12,7 +12,7 @@ Description: # Model Compression Toolkit (MCT)
12
12
 
13
13
  Specifically, this project aims to apply quantization to compress neural networks.
14
14
 
15
- <img src="docsrc/images/mct_block_diagram.svg" width="10000">
15
+ <img src="https://github.com/sony/model_optimization/raw/main/docsrc/images/mct_block_diagram.svg" width="10000">
16
16
 
17
17
  MCT is developed by researchers and engineers working at Sony Semiconductor Israel.
18
18
 
@@ -20,12 +20,12 @@ Description: # Model Compression Toolkit (MCT)
20
20
 
21
21
  ## Table of Contents
22
22
 
23
- - [Getting Started](#getting-started)
24
- - [Supported features](#supported-features)
25
- - [Results](#results)
26
- - [Troubleshooting](#trouble-shooting)
27
- - [Contributions](#contributions)
28
- - [License](#license)
23
+ - [Getting Started](https://github.com/sony/model_optimization?tab=readme-ov-file#getting-started)
24
+ - [Supported features](https://github.com/sony/model_optimization?tab=readme-ov-file#supported-features)
25
+ - [Results](https://github.com/sony/model_optimization?tab=readme-ov-file#results)
26
+ - [Troubleshooting](https://github.com/sony/model_optimization?tab=readme-ov-file#trouble-shooting)
27
+ - [Contributions](https://github.com/sony/model_optimization?tab=readme-ov-file#contributions)
28
+ - [License](https://github.com/sony/model_optimization?tab=readme-ov-file#license)
29
29
 
30
30
 
31
31
  ## Getting Started
@@ -39,17 +39,17 @@ Description: # Model Compression Toolkit (MCT)
39
39
  pip install model-compression-toolkit
40
40
  ```
41
41
 
42
- For installing the nightly version or installing from source, refer to the [installation guide](INSTALLATION.md).
42
+ For installing the nightly version or installing from source, refer to the [installation guide](https://github.com/sony/model_optimization/blob/main/INSTALLATION.md).
43
43
 
44
44
 
45
45
  ### Quick start & tutorials
46
46
 
47
47
  Explore the Model Compression Toolkit (MCT) through our tutorials,
48
- covering compression techniques for Keras and PyTorch models. Access interactive [notebooks](tutorials/README.md)
48
+ covering compression techniques for Keras and PyTorch models. Access interactive [notebooks](https://github.com/sony/model_optimization/blob/main/tutorials/README.md)
49
49
  for hands-on learning. For example:
50
- * [Keras MobileNetV2 post training quantization](tutorials/notebooks/imx500_notebooks/keras/example_keras_mobilenetv2_for_imx500.ipynb)
51
- * [Post training quantization with PyTorch](tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_ptq_mnist.ipynb)
52
- * [Data Generation for ResNet18 with PyTorch](tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_data_generation.ipynb).
50
+ * [Keras MobileNetV2 post training quantization](https://github.com/sony/model_optimization/blob/main/tutorials/notebooks/imx500_notebooks/keras/example_keras_mobilenetv2_for_imx500.ipynb)
51
+ * [Post training quantization with PyTorch](https://github.com/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_ptq_mnist.ipynb)
52
+ * [Data Generation for ResNet18 with PyTorch](https://github.com/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_data_generation.ipynb).
53
53
 
54
54
 
55
55
  ### Supported Versions
@@ -73,15 +73,15 @@ Description: # Model Compression Toolkit (MCT)
73
73
  ## Supported Features
74
74
  MCT offers a range of powerful features to optimize neural network models for efficient deployment. These supported features include:
75
75
 
76
- ### Data Generation [*](#experimental-features)
76
+ ### Data Generation [*](https://github.com/sony/model_optimization?tab=readme-ov-file#experimental-features)
77
77
  MCT provides tools for generating synthetic images based on the statistics stored in a model's batch normalization layers. These generated images are valuable for various compression tasks where image data is required, such as quantization and pruning.
78
- You can customize data generation configurations to suit your specific needs. [Go to the Data Generation page.](model_compression_toolkit/data_generation/README.md)
78
+ You can customize data generation configurations to suit your specific needs. [Go to the Data Generation page.](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/data_generation/README.md)
79
79
 
80
80
  ### Quantization
81
81
  MCT supports different quantization methods:
82
82
  * Post-training quantization (PTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/keras_post_training_quantization.html), [PyTorch API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/pytorch_post_training_quantization.html)
83
83
  * Gradient-based post-training quantization (GPTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/keras_gradient_post_training_quantization.html), [PyTorch API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/pytorch_gradient_post_training_quantization.html)
84
- * Quantization-aware training (QAT) [*](#experimental-features)
84
+ * Quantization-aware training (QAT) [*](https://github.com/sony/model_optimization?tab=readme-ov-file#experimental-features)
85
85
 
86
86
 
87
87
  | Quantization Method | Complexity | Computational Cost |
@@ -103,20 +103,20 @@ Description: # Model Compression Toolkit (MCT)
103
103
  * <ins>Advanced quantization algorithms:</ins> To prevent a performance degradation some algorithms are applied such as:
104
104
  * <ins>Shift negative correction:</ins> Symmetric activation quantization can hurt the model's performance when some layers output both negative and positive activations, but their range is asymmetric. For more details please visit [1].
105
105
  * <ins>Outliers filtering:</ins> Computing z-score for activation statistics to detect and remove outliers.
106
- * <ins>Clustering:</ins> Using non-uniform quantization grid to quantize the weights and activations to match their distributions.[*](#experimental-features)
106
+ * <ins>Clustering:</ins> Using non-uniform quantization grid to quantize the weights and activations to match their distributions.[*](https://github.com/sony/model_optimization?tab=readme-ov-file#experimental-features)
107
107
  * <ins>Mixed-precision search:</ins> Assigning quantization bit-width per layer (for weights/activations), based on the layer's sensitivity to different bit-widths.
108
108
  * <ins>Visualization:</ins> You can use TensorBoard to observe useful information for troubleshooting the quantized model's performance (for example, the model in different phases of the quantization, collected statistics, similarity between layers of the float and quantized model and bit-width configuration for mixed-precision quantization). For more details, please read the [visualization documentation](https://sony.github.io/model_optimization/docs/guidelines/visualization.html).
109
- * <ins>Target Platform Capabilities:</ins> The Target Platform Capabilities (TPC) describes the target platform (an edge device with dedicated hardware). For more details, please read the [TPC README](model_compression_toolkit/target_platform_capabilities/README.md).
109
+ * <ins>Target Platform Capabilities:</ins> The Target Platform Capabilities (TPC) describes the target platform (an edge device with dedicated hardware). For more details, please read the [TPC README](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/README.md).
110
110
 
111
111
  ### Enhanced Post-Training Quantization (EPTQ)
112
112
  As part of the GPTQ we provide an advanced optimization algorithm called EPTQ.
113
113
 
114
114
  The specifications of the algorithm are detailed in the paper: _"**EPTQ: Enhanced Post-Training Quantization via Label-Free Hessian**"_ [4].
115
115
 
116
- More details on the how to use EPTQ via MCT can be found in the [EPTQ guidelines](model_compression_toolkit/gptq/README.md).
116
+ More details on the how to use EPTQ via MCT can be found in the [EPTQ guidelines](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/gptq/README.md).
117
117
 
118
118
 
119
- ### Structured Pruning [*](#experimental-features)
119
+ ### Structured Pruning [*](https://github.com/sony/model_optimization?tab=readme-ov-file#experimental-features)
120
120
  MCT introduces a structured and hardware-aware model pruning.
121
121
  This pruning technique is designed to compress models for specific hardware architectures,
122
122
  taking into account the target platform's Single Instruction, Multiple Data (SIMD) capabilities.
@@ -138,7 +138,7 @@ Description: # Model Compression Toolkit (MCT)
138
138
  Graph of [MobileNetV2](https://keras.io/api/applications/mobilenet/) accuracy on ImageNet vs average bit-width of weights, using
139
139
  single-precision quantization, mixed-precision quantization, and mixed-precision quantization with GPTQ.
140
140
 
141
- <img src="docsrc/images/mbv2_accuracy_graph.png">
141
+ <img src="https://github.com/sony/model_optimization/raw/main/docsrc/images/mbv2_accuracy_graph.png">
142
142
 
143
143
  For more results, please see [1]
144
144
 
@@ -174,11 +174,11 @@ Description: # Model Compression Toolkit (MCT)
174
174
  ## Contributions
175
175
  MCT aims at keeping a more up-to-date fork and welcomes contributions from anyone.
176
176
 
177
- *You will find more information about contributions in the [Contribution guide](CONTRIBUTING.md).
177
+ *You will find more information about contributions in the [Contribution guide](https://github.com/sony/model_optimization/blob/main/CONTRIBUTING.md).
178
178
 
179
179
 
180
180
  ## License
181
- [Apache License 2.0](LICENSE.md).
181
+ [Apache License 2.0](https://github.com/sony/model_optimization/blob/main/LICENSE.md).
182
182
 
183
183
  ## References
184
184
 
@@ -6,7 +6,7 @@ This project provides researchers, developers, and engineers tools for optimizin
6
6
 
7
7
  Specifically, this project aims to apply quantization to compress neural networks.
8
8
 
9
- <img src="docsrc/images/mct_block_diagram.svg" width="10000">
9
+ <img src="https://github.com/sony/model_optimization/raw/main/docsrc/images/mct_block_diagram.svg" width="10000">
10
10
 
11
11
  MCT is developed by researchers and engineers working at Sony Semiconductor Israel.
12
12
 
@@ -14,12 +14,12 @@ MCT is developed by researchers and engineers working at Sony Semiconductor Isra
14
14
 
15
15
  ## Table of Contents
16
16
 
17
- - [Getting Started](#getting-started)
18
- - [Supported features](#supported-features)
19
- - [Results](#results)
20
- - [Troubleshooting](#trouble-shooting)
21
- - [Contributions](#contributions)
22
- - [License](#license)
17
+ - [Getting Started](https://github.com/sony/model_optimization?tab=readme-ov-file#getting-started)
18
+ - [Supported features](https://github.com/sony/model_optimization?tab=readme-ov-file#supported-features)
19
+ - [Results](https://github.com/sony/model_optimization?tab=readme-ov-file#results)
20
+ - [Troubleshooting](https://github.com/sony/model_optimization?tab=readme-ov-file#trouble-shooting)
21
+ - [Contributions](https://github.com/sony/model_optimization?tab=readme-ov-file#contributions)
22
+ - [License](https://github.com/sony/model_optimization?tab=readme-ov-file#license)
23
23
 
24
24
 
25
25
  ## Getting Started
@@ -33,17 +33,17 @@ To install the latest stable release of MCT, run the following command:
33
33
  pip install model-compression-toolkit
34
34
  ```
35
35
 
36
- For installing the nightly version or installing from source, refer to the [installation guide](INSTALLATION.md).
36
+ For installing the nightly version or installing from source, refer to the [installation guide](https://github.com/sony/model_optimization/blob/main/INSTALLATION.md).
37
37
 
38
38
 
39
39
  ### Quick start & tutorials
40
40
 
41
41
  Explore the Model Compression Toolkit (MCT) through our tutorials,
42
- covering compression techniques for Keras and PyTorch models. Access interactive [notebooks](tutorials/README.md)
42
+ covering compression techniques for Keras and PyTorch models. Access interactive [notebooks](https://github.com/sony/model_optimization/blob/main/tutorials/README.md)
43
43
  for hands-on learning. For example:
44
- * [Keras MobileNetV2 post training quantization](tutorials/notebooks/imx500_notebooks/keras/example_keras_mobilenetv2_for_imx500.ipynb)
45
- * [Post training quantization with PyTorch](tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_ptq_mnist.ipynb)
46
- * [Data Generation for ResNet18 with PyTorch](tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_data_generation.ipynb).
44
+ * [Keras MobileNetV2 post training quantization](https://github.com/sony/model_optimization/blob/main/tutorials/notebooks/imx500_notebooks/keras/example_keras_mobilenetv2_for_imx500.ipynb)
45
+ * [Post training quantization with PyTorch](https://github.com/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_ptq_mnist.ipynb)
46
+ * [Data Generation for ResNet18 with PyTorch](https://github.com/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_data_generation.ipynb).
47
47
 
48
48
 
49
49
  ### Supported Versions
@@ -67,15 +67,15 @@ Currently, MCT is being tested on various Python, Pytorch and TensorFlow version
67
67
  ## Supported Features
68
68
  MCT offers a range of powerful features to optimize neural network models for efficient deployment. These supported features include:
69
69
 
70
- ### Data Generation [*](#experimental-features)
70
+ ### Data Generation [*](https://github.com/sony/model_optimization?tab=readme-ov-file#experimental-features)
71
71
  MCT provides tools for generating synthetic images based on the statistics stored in a model's batch normalization layers. These generated images are valuable for various compression tasks where image data is required, such as quantization and pruning.
72
- You can customize data generation configurations to suit your specific needs. [Go to the Data Generation page.](model_compression_toolkit/data_generation/README.md)
72
+ You can customize data generation configurations to suit your specific needs. [Go to the Data Generation page.](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/data_generation/README.md)
73
73
 
74
74
  ### Quantization
75
75
  MCT supports different quantization methods:
76
76
  * Post-training quantization (PTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/keras_post_training_quantization.html), [PyTorch API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/pytorch_post_training_quantization.html)
77
77
  * Gradient-based post-training quantization (GPTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/keras_gradient_post_training_quantization.html), [PyTorch API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/pytorch_gradient_post_training_quantization.html)
78
- * Quantization-aware training (QAT) [*](#experimental-features)
78
+ * Quantization-aware training (QAT) [*](https://github.com/sony/model_optimization?tab=readme-ov-file#experimental-features)
79
79
 
80
80
 
81
81
  | Quantization Method | Complexity | Computational Cost |
@@ -97,20 +97,20 @@ Main features:
97
97
  * <ins>Advanced quantization algorithms:</ins> To prevent a performance degradation some algorithms are applied such as:
98
98
  * <ins>Shift negative correction:</ins> Symmetric activation quantization can hurt the model's performance when some layers output both negative and positive activations, but their range is asymmetric. For more details please visit [1].
99
99
  * <ins>Outliers filtering:</ins> Computing z-score for activation statistics to detect and remove outliers.
100
- * <ins>Clustering:</ins> Using non-uniform quantization grid to quantize the weights and activations to match their distributions.[*](#experimental-features)
100
+ * <ins>Clustering:</ins> Using non-uniform quantization grid to quantize the weights and activations to match their distributions.[*](https://github.com/sony/model_optimization?tab=readme-ov-file#experimental-features)
101
101
  * <ins>Mixed-precision search:</ins> Assigning quantization bit-width per layer (for weights/activations), based on the layer's sensitivity to different bit-widths.
102
102
  * <ins>Visualization:</ins> You can use TensorBoard to observe useful information for troubleshooting the quantized model's performance (for example, the model in different phases of the quantization, collected statistics, similarity between layers of the float and quantized model and bit-width configuration for mixed-precision quantization). For more details, please read the [visualization documentation](https://sony.github.io/model_optimization/docs/guidelines/visualization.html).
103
- * <ins>Target Platform Capabilities:</ins> The Target Platform Capabilities (TPC) describes the target platform (an edge device with dedicated hardware). For more details, please read the [TPC README](model_compression_toolkit/target_platform_capabilities/README.md).
103
+ * <ins>Target Platform Capabilities:</ins> The Target Platform Capabilities (TPC) describes the target platform (an edge device with dedicated hardware). For more details, please read the [TPC README](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/README.md).
104
104
 
105
105
  ### Enhanced Post-Training Quantization (EPTQ)
106
106
  As part of the GPTQ we provide an advanced optimization algorithm called EPTQ.
107
107
 
108
108
  The specifications of the algorithm are detailed in the paper: _"**EPTQ: Enhanced Post-Training Quantization via Label-Free Hessian**"_ [4].
109
109
 
110
- More details on the how to use EPTQ via MCT can be found in the [EPTQ guidelines](model_compression_toolkit/gptq/README.md).
110
+ More details on the how to use EPTQ via MCT can be found in the [EPTQ guidelines](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/gptq/README.md).
111
111
 
112
112
 
113
- ### Structured Pruning [*](#experimental-features)
113
+ ### Structured Pruning [*](https://github.com/sony/model_optimization?tab=readme-ov-file#experimental-features)
114
114
  MCT introduces a structured and hardware-aware model pruning.
115
115
  This pruning technique is designed to compress models for specific hardware architectures,
116
116
  taking into account the target platform's Single Instruction, Multiple Data (SIMD) capabilities.
@@ -132,7 +132,7 @@ For more details, we highly recommend visiting our project website where experim
132
132
  Graph of [MobileNetV2](https://keras.io/api/applications/mobilenet/) accuracy on ImageNet vs average bit-width of weights, using
133
133
  single-precision quantization, mixed-precision quantization, and mixed-precision quantization with GPTQ.
134
134
 
135
- <img src="docsrc/images/mbv2_accuracy_graph.png">
135
+ <img src="https://github.com/sony/model_optimization/raw/main/docsrc/images/mbv2_accuracy_graph.png">
136
136
 
137
137
  For more results, please see [1]
138
138
 
@@ -168,11 +168,11 @@ Check out the [FAQ](https://github.com/sony/model_optimization/tree/main/FAQ.md)
168
168
  ## Contributions
169
169
  MCT aims at keeping a more up-to-date fork and welcomes contributions from anyone.
170
170
 
171
- *You will find more information about contributions in the [Contribution guide](CONTRIBUTING.md).
171
+ *You will find more information about contributions in the [Contribution guide](https://github.com/sony/model_optimization/blob/main/CONTRIBUTING.md).
172
172
 
173
173
 
174
174
  ## License
175
- [Apache License 2.0](LICENSE.md).
175
+ [Apache License 2.0](https://github.com/sony/model_optimization/blob/main/LICENSE.md).
176
176
 
177
177
  ## References
178
178
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 2.0.0.20240522.420
3
+ Version: 2.0.0.20240523.418
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -12,7 +12,7 @@ Description: # Model Compression Toolkit (MCT)
12
12
 
13
13
  Specifically, this project aims to apply quantization to compress neural networks.
14
14
 
15
- <img src="docsrc/images/mct_block_diagram.svg" width="10000">
15
+ <img src="https://github.com/sony/model_optimization/raw/main/docsrc/images/mct_block_diagram.svg" width="10000">
16
16
 
17
17
  MCT is developed by researchers and engineers working at Sony Semiconductor Israel.
18
18
 
@@ -20,12 +20,12 @@ Description: # Model Compression Toolkit (MCT)
20
20
 
21
21
  ## Table of Contents
22
22
 
23
- - [Getting Started](#getting-started)
24
- - [Supported features](#supported-features)
25
- - [Results](#results)
26
- - [Troubleshooting](#trouble-shooting)
27
- - [Contributions](#contributions)
28
- - [License](#license)
23
+ - [Getting Started](https://github.com/sony/model_optimization?tab=readme-ov-file#getting-started)
24
+ - [Supported features](https://github.com/sony/model_optimization?tab=readme-ov-file#supported-features)
25
+ - [Results](https://github.com/sony/model_optimization?tab=readme-ov-file#results)
26
+ - [Troubleshooting](https://github.com/sony/model_optimization?tab=readme-ov-file#trouble-shooting)
27
+ - [Contributions](https://github.com/sony/model_optimization?tab=readme-ov-file#contributions)
28
+ - [License](https://github.com/sony/model_optimization?tab=readme-ov-file#license)
29
29
 
30
30
 
31
31
  ## Getting Started
@@ -39,17 +39,17 @@ Description: # Model Compression Toolkit (MCT)
39
39
  pip install model-compression-toolkit
40
40
  ```
41
41
 
42
- For installing the nightly version or installing from source, refer to the [installation guide](INSTALLATION.md).
42
+ For installing the nightly version or installing from source, refer to the [installation guide](https://github.com/sony/model_optimization/blob/main/INSTALLATION.md).
43
43
 
44
44
 
45
45
  ### Quick start & tutorials
46
46
 
47
47
  Explore the Model Compression Toolkit (MCT) through our tutorials,
48
- covering compression techniques for Keras and PyTorch models. Access interactive [notebooks](tutorials/README.md)
48
+ covering compression techniques for Keras and PyTorch models. Access interactive [notebooks](https://github.com/sony/model_optimization/blob/main/tutorials/README.md)
49
49
  for hands-on learning. For example:
50
- * [Keras MobileNetV2 post training quantization](tutorials/notebooks/imx500_notebooks/keras/example_keras_mobilenetv2_for_imx500.ipynb)
51
- * [Post training quantization with PyTorch](tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_ptq_mnist.ipynb)
52
- * [Data Generation for ResNet18 with PyTorch](tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_data_generation.ipynb).
50
+ * [Keras MobileNetV2 post training quantization](https://github.com/sony/model_optimization/blob/main/tutorials/notebooks/imx500_notebooks/keras/example_keras_mobilenetv2_for_imx500.ipynb)
51
+ * [Post training quantization with PyTorch](https://github.com/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_ptq_mnist.ipynb)
52
+ * [Data Generation for ResNet18 with PyTorch](https://github.com/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_data_generation.ipynb).
53
53
 
54
54
 
55
55
  ### Supported Versions
@@ -73,15 +73,15 @@ Description: # Model Compression Toolkit (MCT)
73
73
  ## Supported Features
74
74
  MCT offers a range of powerful features to optimize neural network models for efficient deployment. These supported features include:
75
75
 
76
- ### Data Generation [*](#experimental-features)
76
+ ### Data Generation [*](https://github.com/sony/model_optimization?tab=readme-ov-file#experimental-features)
77
77
  MCT provides tools for generating synthetic images based on the statistics stored in a model's batch normalization layers. These generated images are valuable for various compression tasks where image data is required, such as quantization and pruning.
78
- You can customize data generation configurations to suit your specific needs. [Go to the Data Generation page.](model_compression_toolkit/data_generation/README.md)
78
+ You can customize data generation configurations to suit your specific needs. [Go to the Data Generation page.](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/data_generation/README.md)
79
79
 
80
80
  ### Quantization
81
81
  MCT supports different quantization methods:
82
82
  * Post-training quantization (PTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/keras_post_training_quantization.html), [PyTorch API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/pytorch_post_training_quantization.html)
83
83
  * Gradient-based post-training quantization (GPTQ): [Keras API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/keras_gradient_post_training_quantization.html), [PyTorch API](https://sony.github.io/model_optimization/docs/api/api_docs/methods/pytorch_gradient_post_training_quantization.html)
84
- * Quantization-aware training (QAT) [*](#experimental-features)
84
+ * Quantization-aware training (QAT) [*](https://github.com/sony/model_optimization?tab=readme-ov-file#experimental-features)
85
85
 
86
86
 
87
87
  | Quantization Method | Complexity | Computational Cost |
@@ -103,20 +103,20 @@ Description: # Model Compression Toolkit (MCT)
103
103
  * <ins>Advanced quantization algorithms:</ins> To prevent a performance degradation some algorithms are applied such as:
104
104
  * <ins>Shift negative correction:</ins> Symmetric activation quantization can hurt the model's performance when some layers output both negative and positive activations, but their range is asymmetric. For more details please visit [1].
105
105
  * <ins>Outliers filtering:</ins> Computing z-score for activation statistics to detect and remove outliers.
106
- * <ins>Clustering:</ins> Using non-uniform quantization grid to quantize the weights and activations to match their distributions.[*](#experimental-features)
106
+ * <ins>Clustering:</ins> Using non-uniform quantization grid to quantize the weights and activations to match their distributions.[*](https://github.com/sony/model_optimization?tab=readme-ov-file#experimental-features)
107
107
  * <ins>Mixed-precision search:</ins> Assigning quantization bit-width per layer (for weights/activations), based on the layer's sensitivity to different bit-widths.
108
108
  * <ins>Visualization:</ins> You can use TensorBoard to observe useful information for troubleshooting the quantized model's performance (for example, the model in different phases of the quantization, collected statistics, similarity between layers of the float and quantized model and bit-width configuration for mixed-precision quantization). For more details, please read the [visualization documentation](https://sony.github.io/model_optimization/docs/guidelines/visualization.html).
109
- * <ins>Target Platform Capabilities:</ins> The Target Platform Capabilities (TPC) describes the target platform (an edge device with dedicated hardware). For more details, please read the [TPC README](model_compression_toolkit/target_platform_capabilities/README.md).
109
+ * <ins>Target Platform Capabilities:</ins> The Target Platform Capabilities (TPC) describes the target platform (an edge device with dedicated hardware). For more details, please read the [TPC README](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/README.md).
110
110
 
111
111
  ### Enhanced Post-Training Quantization (EPTQ)
112
112
  As part of the GPTQ we provide an advanced optimization algorithm called EPTQ.
113
113
 
114
114
  The specifications of the algorithm are detailed in the paper: _"**EPTQ: Enhanced Post-Training Quantization via Label-Free Hessian**"_ [4].
115
115
 
116
- More details on the how to use EPTQ via MCT can be found in the [EPTQ guidelines](model_compression_toolkit/gptq/README.md).
116
+ More details on the how to use EPTQ via MCT can be found in the [EPTQ guidelines](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/gptq/README.md).
117
117
 
118
118
 
119
- ### Structured Pruning [*](#experimental-features)
119
+ ### Structured Pruning [*](https://github.com/sony/model_optimization?tab=readme-ov-file#experimental-features)
120
120
  MCT introduces a structured and hardware-aware model pruning.
121
121
  This pruning technique is designed to compress models for specific hardware architectures,
122
122
  taking into account the target platform's Single Instruction, Multiple Data (SIMD) capabilities.
@@ -138,7 +138,7 @@ Description: # Model Compression Toolkit (MCT)
138
138
  Graph of [MobileNetV2](https://keras.io/api/applications/mobilenet/) accuracy on ImageNet vs average bit-width of weights, using
139
139
  single-precision quantization, mixed-precision quantization, and mixed-precision quantization with GPTQ.
140
140
 
141
- <img src="docsrc/images/mbv2_accuracy_graph.png">
141
+ <img src="https://github.com/sony/model_optimization/raw/main/docsrc/images/mbv2_accuracy_graph.png">
142
142
 
143
143
  For more results, please see [1]
144
144
 
@@ -174,11 +174,11 @@ Description: # Model Compression Toolkit (MCT)
174
174
  ## Contributions
175
175
  MCT aims at keeping a more up-to-date fork and welcomes contributions from anyone.
176
176
 
177
- *You will find more information about contributions in the [Contribution guide](CONTRIBUTING.md).
177
+ *You will find more information about contributions in the [Contribution guide](https://github.com/sony/model_optimization/blob/main/CONTRIBUTING.md).
178
178
 
179
179
 
180
180
  ## License
181
- [Apache License 2.0](LICENSE.md).
181
+ [Apache License 2.0](https://github.com/sony/model_optimization/blob/main/LICENSE.md).
182
182
 
183
183
  ## References
184
184
 
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
27
27
  from model_compression_toolkit import pruning
28
28
  from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
29
29
 
30
- __version__ = "2.0.0.20240522.000420"
30
+ __version__ = "2.0.0.20240523.000418"
@@ -348,13 +348,14 @@ class FrameworkImplementation(ABC):
348
348
  raise NotImplemented(f'{self.__class__.__name__} have to implement the '
349
349
  f'framework\'s count_node_for_mixed_precision_interest_points method.') # pragma: no cover
350
350
 
351
- def get_node_distance_fn(self, layer_class: type,
351
+ def get_mp_node_distance_fn(self, layer_class: type,
352
352
  framework_attrs: Dict[str, Any],
353
353
  compute_distance_fn: Callable = None,
354
- axis: int = None) -> Callable:
354
+ axis: int = None,
355
+ norm_mse: bool = False) -> Callable:
355
356
  """
356
357
  A mapping between layers' types and a distance function for computing the distance between
357
- two tensors (for loss computation purposes). Returns a specific function if node of specific types is
358
+ two tensors in mixed precision (for loss computation purposes). Returns a specific function if node of specific types is
358
359
  given, or a default (normalized MSE) function otherwise.
359
360
 
360
361
  Args:
@@ -362,12 +363,13 @@ class FrameworkImplementation(ABC):
362
363
  framework_attrs: Framework attributes the layer had which the graph node holds.
363
364
  compute_distance_fn: An optional distance function to use globally for all nodes.
364
365
  axis: The axis on which the operation is preformed (if specified).
366
+ norm_mse: whether to normalize mse distance function.
365
367
 
366
368
  Returns: A distance function between two tensors.
367
369
  """
368
370
 
369
371
  raise NotImplemented(f'{self.__class__.__name__} have to implement the '
370
- f'framework\'s get_node_distance_fn method.') # pragma: no cover
372
+ f'framework\'s get_mp_node_distance_fn method.') # pragma: no cover
371
373
 
372
374
 
373
375
  @abstractmethod
@@ -238,8 +238,12 @@ class BaseNode:
238
238
  """
239
239
  for pos, weight in sorted((pos, weight) for pos, weight in self.weights.items()
240
240
  if isinstance(pos, int)):
241
- assert pos <= len(input_tensors), 'Positional weight index mismatch'
242
- input_tensors.insert(pos, weight)
241
+ if pos > len(input_tensors):
242
+ Logger.critical("The positional weight index cannot exceed the number of input tensors to the node.") # pragma: no cover
243
+ # Insert only positional weights that are not subject to quantization. If the positional weight is
244
+ # subject to quantization, the quantization wrapper inserts the positional weight into the node.
245
+ if not self.is_weights_quantization_enabled(pos):
246
+ input_tensors.insert(pos, weight)
243
247
 
244
248
  return input_tensors
245
249
 
@@ -89,10 +89,13 @@ class SensitivityEvaluation:
89
89
  fw_impl.count_node_for_mixed_precision_interest_points,
90
90
  quant_config.num_interest_points_factor)
91
91
 
92
- self.ips_distance_fns, self.ips_axis = self._init_metric_points_lists(self.interest_points)
92
+ # We use normalized MSE when not running hessian-based. For Hessian-based normalized MSE is not needed
93
+ # beacause hessian weights already do normalization.
94
+ use_normalized_mse = self.quant_config.use_hessian_based_scores is False
95
+ self.ips_distance_fns, self.ips_axis = self._init_metric_points_lists(self.interest_points, use_normalized_mse)
93
96
 
94
97
  self.output_points = get_output_nodes_for_metric(graph)
95
- self.out_ps_distance_fns, self.out_ps_axis = self._init_metric_points_lists(self.output_points)
98
+ self.out_ps_distance_fns, self.out_ps_axis = self._init_metric_points_lists(self.output_points, use_normalized_mse)
96
99
 
97
100
  # Setting lists with relative position of the interest points
98
101
  # and output points in the list of all mp model activation tensors
@@ -128,7 +131,7 @@ class SensitivityEvaluation:
128
131
  self.interest_points_hessians = self._compute_hessian_based_scores()
129
132
  self.quant_config.distance_weighting_method = lambda d: self.interest_points_hessians
130
133
 
131
- def _init_metric_points_lists(self, points: List[BaseNode]) -> Tuple[List[Callable], List[int]]:
134
+ def _init_metric_points_lists(self, points: List[BaseNode], norm_mse: bool = False) -> Tuple[List[Callable], List[int]]:
132
135
  """
133
136
  Initiates required lists for future use when computing the sensitivity metric.
134
137
  Each point on which the metric is computed uses a dedicated distance function based on its type.
@@ -136,6 +139,7 @@ class SensitivityEvaluation:
136
139
 
137
140
  Args:
138
141
  points: The set of nodes in the graph for which we need to initiate the lists.
142
+ norm_mse: whether to normalize mse distance function.
139
143
 
140
144
  Returns: A lists with distance functions and an axis list for each node.
141
145
 
@@ -144,11 +148,12 @@ class SensitivityEvaluation:
144
148
  axis_list = []
145
149
  for n in points:
146
150
  axis = n.framework_attr.get(AXIS) if not isinstance(n, FunctionalNode) else n.op_call_kwargs.get(AXIS)
147
- distance_fn = self.fw_impl.get_node_distance_fn(
151
+ distance_fn = self.fw_impl.get_mp_node_distance_fn(
148
152
  layer_class=n.layer_class,
149
153
  framework_attrs=n.framework_attr,
150
154
  compute_distance_fn=self.quant_config.compute_distance_fn,
151
- axis=axis)
155
+ axis=axis,
156
+ norm_mse=norm_mse)
152
157
  distance_fns_list.append(distance_fn)
153
158
  # Axis is needed only for KL Divergence calculation, otherwise we use per-tensor computation
154
159
  axis_list.append(axis if distance_fn==compute_kl_divergence else None)
@@ -421,13 +421,14 @@ class KerasImplementation(FrameworkImplementation):
421
421
 
422
422
  return False
423
423
 
424
- def get_node_distance_fn(self, layer_class: type,
424
+ def get_mp_node_distance_fn(self, layer_class: type,
425
425
  framework_attrs: Dict[str, Any],
426
426
  compute_distance_fn: Callable = None,
427
- axis: int = None) -> Callable:
427
+ axis: int = None,
428
+ norm_mse: bool = False) -> Callable:
428
429
  """
429
430
  A mapping between layers' types and a distance function for computing the distance between
430
- two tensors (for loss computation purposes). Returns a specific function if node of specific types is
431
+ two tensors in mixed precision (for loss computation purposes). Returns a specific function if node of specific types is
431
432
  given, or a default (normalized MSE) function otherwise.
432
433
 
433
434
  Args:
@@ -435,6 +436,7 @@ class KerasImplementation(FrameworkImplementation):
435
436
  framework_attrs: Framework attributes the layer had which the graph node holds.
436
437
  compute_distance_fn: An optional distance function to use globally for all nodes.
437
438
  axis: The axis on which the operation is preformed (if specified).
439
+ norm_mse: whether to normalize mse distance function.
438
440
 
439
441
  Returns: A distance function between two tensors.
440
442
  """
@@ -456,7 +458,7 @@ class KerasImplementation(FrameworkImplementation):
456
458
  return compute_cs
457
459
  elif layer_class == Dense:
458
460
  return compute_cs
459
- return compute_mse
461
+ return partial(compute_mse, norm=norm_mse)
460
462
 
461
463
  def get_trace_hessian_calculator(self,
462
464
  graph: Graph,
@@ -67,8 +67,7 @@ def _build_input_tensors_list(node: BaseNode,
67
67
  _input_tensors = node_to_output_tensors_dict[ie.source_node]
68
68
  input_tensors.append(_input_tensors)
69
69
  input_tensors = [tensor for tensor_list in input_tensors for tensor in tensor_list] # flat list of lists
70
- if not is_op_quantize_wrapper:
71
- input_tensors = node.insert_positional_weights_to_input_list(input_tensors)
70
+ input_tensors = node.insert_positional_weights_to_input_list(input_tensors)
72
71
  # convert inputs from positional weights (numpy arrays) to tensors. Must handle each element in the
73
72
  # list separately, because in FX the tensors are FX objects and fail to_torch_tensor
74
73
  input_tensors = [to_torch_tensor(t, numpy_type=t.dtype) if isinstance(t, np.ndarray) else t
@@ -403,13 +403,14 @@ class PytorchImplementation(FrameworkImplementation):
403
403
  return True
404
404
  return False
405
405
 
406
- def get_node_distance_fn(self, layer_class: type,
406
+ def get_mp_node_distance_fn(self, layer_class: type,
407
407
  framework_attrs: Dict[str, Any],
408
408
  compute_distance_fn: Callable = None,
409
- axis: int = None) -> Callable:
409
+ axis: int = None,
410
+ norm_mse: bool = False) -> Callable:
410
411
  """
411
412
  A mapping between layers' types and a distance function for computing the distance between
412
- two tensors (for loss computation purposes). Returns a specific function if node of specific types is
413
+ two tensors in mixed precision (for loss computation purposes). Returns a specific function if node of specific types is
413
414
  given, or a default (normalized MSE) function otherwise.
414
415
 
415
416
  Args:
@@ -417,6 +418,7 @@ class PytorchImplementation(FrameworkImplementation):
417
418
  framework_attrs: Framework attributes the layer had which the graph node holds.
418
419
  compute_distance_fn: An optional distance function to use globally for all nodes.
419
420
  axis: The axis on which the operation is preformed (if specified).
421
+ norm_mse: whether to normalize mse distance function.
420
422
 
421
423
  Returns: A distance function between two tensors.
422
424
  """
@@ -430,7 +432,7 @@ class PytorchImplementation(FrameworkImplementation):
430
432
  return compute_cs
431
433
  elif layer_class == Linear:
432
434
  return compute_cs
433
- return compute_mse
435
+ return partial(compute_mse, norm=norm_mse)
434
436
 
435
437
  def is_output_node_compatible_for_hessian_score_computation(self,
436
438
  node: BaseNode) -> bool: