mct-nightly 1.11.0.20240305.post352__tar.gz → 1.11.0.20240306.post426__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (484) hide show
  1. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/PKG-INFO +4 -4
  2. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/README.md +3 -4
  3. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/mct_nightly.egg-info/PKG-INFO +4 -4
  4. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/__init__.py +1 -1
  5. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/common/gptq_config.py +5 -72
  6. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/gptq_training.py +2 -2
  7. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/quantization_facade.py +15 -29
  8. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +3 -3
  9. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +2 -4
  10. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/gptq_training.py +2 -2
  11. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/quantization_facade.py +11 -28
  12. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +3 -3
  13. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +2 -4
  14. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/runner.py +3 -3
  15. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/ptq/__init__.py +2 -2
  16. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/ptq/keras/quantization_facade.py +9 -23
  17. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/ptq/pytorch/quantization_facade.py +9 -24
  18. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/setup.cfg +1 -1
  19. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/LICENSE.md +0 -0
  20. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/mct_nightly.egg-info/SOURCES.txt +0 -0
  21. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/mct_nightly.egg-info/dependency_links.txt +0 -0
  22. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/mct_nightly.egg-info/requires.txt +0 -0
  23. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/mct_nightly.egg-info/top_level.txt +0 -0
  24. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/__init__.py +0 -0
  25. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/constants.py +0 -0
  26. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/__init__.py +0 -0
  27. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/analyzer.py +0 -0
  28. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/__init__.py +0 -0
  29. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/back2framework/__init__.py +0 -0
  30. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/back2framework/base_model_builder.py +0 -0
  31. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/base_substitutions.py +0 -0
  32. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/collectors/__init__.py +0 -0
  33. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/collectors/base_collector.py +0 -0
  34. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/collectors/histogram_collector.py +0 -0
  35. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/collectors/mean_collector.py +0 -0
  36. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py +0 -0
  37. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/collectors/statistics_collector.py +0 -0
  38. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/data_loader.py +0 -0
  39. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/framework_implementation.py +0 -0
  40. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/framework_info.py +0 -0
  41. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/fusion/__init__.py +0 -0
  42. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/fusion/layer_fusing.py +0 -0
  43. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/__init__.py +0 -0
  44. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/base_graph.py +0 -0
  45. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/base_node.py +0 -0
  46. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/edge.py +0 -0
  47. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/functional_node.py +0 -0
  48. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/graph_matchers.py +0 -0
  49. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/graph_searches.py +0 -0
  50. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/memory_graph/__init__.py +0 -0
  51. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +0 -0
  52. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py +0 -0
  53. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/memory_graph/cut.py +0 -0
  54. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +0 -0
  55. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/memory_graph/memory_element.py +0 -0
  56. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/memory_graph/memory_graph.py +0 -0
  57. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +0 -0
  58. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/hessian/__init__.py +0 -0
  59. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/hessian/hessian_info_service.py +0 -0
  60. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/hessian/hessian_info_utils.py +0 -0
  61. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/hessian/trace_hessian_calculator.py +0 -0
  62. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/hessian/trace_hessian_request.py +0 -0
  63. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/matchers/__init__.py +0 -0
  64. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/matchers/base_graph_filter.py +0 -0
  65. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/matchers/base_matcher.py +0 -0
  66. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/matchers/edge_matcher.py +0 -0
  67. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/matchers/function.py +0 -0
  68. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/matchers/node_matcher.py +0 -0
  69. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/matchers/walk_matcher.py +0 -0
  70. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/memory_computation.py +0 -0
  71. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/__init__.py +0 -0
  72. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +0 -0
  73. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py +0 -0
  74. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py +0 -0
  75. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/distance_weighting.py +0 -0
  76. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/__init__.py +0 -0
  77. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi.py +0 -0
  78. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_aggregation_methods.py +0 -0
  79. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_data.py +0 -0
  80. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_functions_mapping.py +0 -0
  81. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_methods.py +0 -0
  82. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py +0 -0
  83. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +0 -0
  84. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +0 -0
  85. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py +0 -0
  86. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +0 -0
  87. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +0 -0
  88. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py +0 -0
  89. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +0 -0
  90. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/model_builder_mode.py +0 -0
  91. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/model_collector.py +0 -0
  92. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/model_validation.py +0 -0
  93. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/network_editors/__init__.py +0 -0
  94. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/network_editors/actions.py +0 -0
  95. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/network_editors/edit_network.py +0 -0
  96. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/network_editors/node_filters.py +0 -0
  97. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/node_prior_info.py +0 -0
  98. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/__init__.py +0 -0
  99. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/channels_grouping.py +0 -0
  100. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +0 -0
  101. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/importance_metrics/__init__.py +0 -0
  102. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/importance_metrics/base_importance_metric.py +0 -0
  103. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/importance_metrics/importance_metric_factory.py +0 -0
  104. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +0 -0
  105. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/mask/__init__.py +0 -0
  106. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +0 -0
  107. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +0 -0
  108. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/memory_calculator.py +0 -0
  109. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/prune_graph.py +0 -0
  110. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/pruner.py +0 -0
  111. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/pruning_config.py +0 -0
  112. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/pruning_framework_implementation.py +0 -0
  113. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/pruning_info.py +0 -0
  114. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/pruning/pruning_section.py +0 -0
  115. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/__init__.py +0 -0
  116. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +0 -0
  117. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/core_config.py +0 -0
  118. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/debug_config.py +0 -0
  119. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +0 -0
  120. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/node_quantization_config.py +0 -0
  121. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantization_config.py +0 -0
  122. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +0 -0
  123. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +0 -0
  124. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/__init__.py +0 -0
  125. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +0 -0
  126. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +0 -0
  127. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/outlier_filter.py +0 -0
  128. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +0 -0
  129. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +0 -0
  130. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py +0 -0
  131. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py +0 -0
  132. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py +0 -0
  133. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +0 -0
  134. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +0 -0
  135. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantize_graph_weights.py +0 -0
  136. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantize_node.py +0 -0
  137. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantizers/__init__.py +0 -0
  138. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantizers/lut_kmeans_quantizer.py +0 -0
  139. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +0 -0
  140. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +0 -0
  141. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +0 -0
  142. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/similarity_analyzer.py +0 -0
  143. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/statistics_correction/__init__.py +0 -0
  144. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/statistics_correction/apply_bias_correction_to_graph.py +0 -0
  145. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/statistics_correction/apply_second_moment_correction_to_graph.py +0 -0
  146. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +0 -0
  147. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/statistics_correction/statistics_correction.py +0 -0
  148. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/substitutions/__init__.py +0 -0
  149. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/substitutions/apply_substitutions.py +0 -0
  150. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/substitutions/batchnorm_folding.py +0 -0
  151. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +0 -0
  152. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +0 -0
  153. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/substitutions/linear_collapsing.py +0 -0
  154. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/substitutions/linear_collapsing_substitution.py +0 -0
  155. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/substitutions/residual_collapsing.py +0 -0
  156. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/substitutions/scale_equalization.py +0 -0
  157. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +0 -0
  158. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/substitutions/softmax_shift.py +0 -0
  159. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +0 -0
  160. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/substitutions/weights_activation_split.py +0 -0
  161. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/user_info.py +0 -0
  162. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/visualization/__init__.py +0 -0
  163. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/visualization/final_config_visualizer.py +0 -0
  164. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/visualization/nn_visualizer.py +0 -0
  165. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/common/visualization/tensorboard_writer.py +0 -0
  166. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/exporter.py +0 -0
  167. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/graph_prep_runner.py +0 -0
  168. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/__init__.py +0 -0
  169. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/back2framework/__init__.py +0 -0
  170. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +0 -0
  171. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/back2framework/float_model_builder.py +0 -0
  172. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/back2framework/instance_builder.py +0 -0
  173. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/back2framework/keras_model_builder.py +0 -0
  174. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +0 -0
  175. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/back2framework/quantized_model_builder.py +0 -0
  176. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/constants.py +0 -0
  177. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/custom_layer_validation.py +0 -0
  178. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/default_framework_info.py +0 -0
  179. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/__init__.py +0 -0
  180. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/__init__.py +0 -0
  181. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/activation_decomposition.py +0 -0
  182. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  183. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  184. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  185. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/dwconv_to_conv.py +0 -0
  186. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +0 -0
  187. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  188. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +0 -0
  189. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  190. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  191. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/remove_relu_upper_bound.py +0 -0
  192. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  193. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/scale_equalization.py +0 -0
  194. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/separableconv_decomposition.py +0 -0
  195. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  196. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_shift.py +0 -0
  197. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  198. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  199. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/hessian/__init__.py +0 -0
  200. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/hessian/activation_trace_hessian_calculator_keras.py +0 -0
  201. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/hessian/trace_hessian_calculator_keras.py +0 -0
  202. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/hessian/weights_trace_hessian_calculator_keras.py +0 -0
  203. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/keras_implementation.py +0 -0
  204. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/keras_model_validation.py +0 -0
  205. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/keras_node_prior_info.py +0 -0
  206. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/kpi_data_facade.py +0 -0
  207. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/mixed_precision/__init__.py +0 -0
  208. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +0 -0
  209. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +0 -0
  210. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/pruning/__init__.py +0 -0
  211. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +0 -0
  212. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/quantizer/__init__.py +0 -0
  213. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/quantizer/base_quantizer.py +0 -0
  214. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +0 -0
  215. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/quantizer/lut_fake_quant.py +0 -0
  216. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/reader/__init__.py +0 -0
  217. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/reader/common.py +0 -0
  218. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/reader/connectivity_handler.py +0 -0
  219. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/reader/nested_model/__init__.py +0 -0
  220. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/reader/nested_model/edges_merger.py +0 -0
  221. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/reader/nested_model/nested_model_handler.py +0 -0
  222. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/reader/nested_model/nodes_merger.py +0 -0
  223. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/reader/nested_model/outputs_merger.py +0 -0
  224. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/reader/node_builder.py +0 -0
  225. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/reader/reader.py +0 -0
  226. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/statistics_correction/__init__.py +0 -0
  227. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/statistics_correction/apply_second_moment_correction.py +0 -0
  228. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/tf_tensor_numpy.py +0 -0
  229. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/keras/visualization/__init__.py +0 -0
  230. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/__init__.py +0 -0
  231. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/back2framework/__init__.py +0 -0
  232. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +0 -0
  233. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/back2framework/float_model_builder.py +0 -0
  234. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/back2framework/instance_builder.py +0 -0
  235. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +0 -0
  236. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py +0 -0
  237. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/__init__.py +0 -0
  238. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/quantized_layer_wrapper.py +0 -0
  239. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/back2framework/quantization_wrapper/wrapper_quantize_config.py +0 -0
  240. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/back2framework/quantized_model_builder.py +0 -0
  241. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/constants.py +0 -0
  242. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/default_framework_info.py +0 -0
  243. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/__init__.py +0 -0
  244. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/__init__.py +0 -0
  245. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_folding.py +0 -0
  246. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_reconstruction.py +0 -0
  247. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/batchnorm_refusing.py +0 -0
  248. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +0 -0
  249. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +0 -0
  250. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_layer_norm.py +0 -0
  251. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +0 -0
  252. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +0 -0
  253. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/permute_call_method.py +0 -0
  254. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +0 -0
  255. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +0 -0
  256. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +0 -0
  257. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/scale_equalization.py +0 -0
  258. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/shift_negative_activation.py +0 -0
  259. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax_shift.py +0 -0
  260. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py +0 -0
  261. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py +0 -0
  262. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/hessian/__init__.py +0 -0
  263. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/hessian/activation_trace_hessian_calculator_pytorch.py +0 -0
  264. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/hessian/trace_hessian_calculator_pytorch.py +0 -0
  265. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/hessian/weights_trace_hessian_calculator_pytorch.py +0 -0
  266. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/kpi_data_facade.py +0 -0
  267. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/mixed_precision/__init__.py +0 -0
  268. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +0 -0
  269. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +0 -0
  270. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/pruning/__init__.py +0 -0
  271. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +0 -0
  272. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/pytorch_device_config.py +0 -0
  273. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/pytorch_implementation.py +0 -0
  274. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/pytorch_node_prior_info.py +0 -0
  275. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/quantizer/__init__.py +0 -0
  276. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/quantizer/fake_quant_builder.py +0 -0
  277. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/quantizer/lut_fake_quant.py +0 -0
  278. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/reader/__init__.py +0 -0
  279. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/reader/graph_builders.py +0 -0
  280. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/reader/node_holders.py +0 -0
  281. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/reader/reader.py +0 -0
  282. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/statistics_correction/__init__.py +0 -0
  283. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/statistics_correction/apply_second_moment_correction.py +0 -0
  284. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/pytorch/utils.py +0 -0
  285. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/quantization_prep_runner.py +0 -0
  286. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/core/runner.py +0 -0
  287. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/__init__.py +0 -0
  288. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/common/__init__.py +0 -0
  289. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/common/constants.py +0 -0
  290. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/common/data_generation.py +0 -0
  291. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/common/data_generation_config.py +0 -0
  292. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/common/enums.py +0 -0
  293. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/common/image_pipeline.py +0 -0
  294. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/common/model_info_exctractors.py +0 -0
  295. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/common/optimization_utils.py +0 -0
  296. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/keras/__init__.py +0 -0
  297. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/keras/constants.py +0 -0
  298. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/keras/image_pipeline.py +0 -0
  299. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/keras/keras_data_generation.py +0 -0
  300. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/keras/model_info_exctractors.py +0 -0
  301. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/keras/optimization_functions/__init__.py +0 -0
  302. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/keras/optimization_functions/batchnorm_alignment_functions.py +0 -0
  303. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/keras/optimization_functions/bn_layer_weighting_functions.py +0 -0
  304. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +0 -0
  305. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +0 -0
  306. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/keras/optimization_functions/scheduler_step_functions.py +0 -0
  307. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/keras/optimization_utils.py +0 -0
  308. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/pytorch/__init__.py +0 -0
  309. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/pytorch/constants.py +0 -0
  310. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/pytorch/image_pipeline.py +0 -0
  311. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +0 -0
  312. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/pytorch/optimization_functions/__init__.py +0 -0
  313. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/pytorch/optimization_functions/batchnorm_alignment_functions.py +0 -0
  314. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/pytorch/optimization_functions/bn_layer_weighting_functions.py +0 -0
  315. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/pytorch/optimization_functions/image_initilization.py +0 -0
  316. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/pytorch/optimization_functions/output_loss_functions.py +0 -0
  317. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/pytorch/optimization_functions/scheduler_step_functions.py +0 -0
  318. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/pytorch/optimization_utils.py +0 -0
  319. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +0 -0
  320. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/defaultdict.py +0 -0
  321. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/__init__.py +0 -0
  322. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/__init__.py +0 -0
  323. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/__init__.py +0 -0
  324. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/exporter.py +0 -0
  325. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/fw_agonstic/quantization_format.py +0 -0
  326. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/keras/__init__.py +0 -0
  327. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py +0 -0
  328. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py +0 -0
  329. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +0 -0
  330. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py +0 -0
  331. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py +0 -0
  332. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +0 -0
  333. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/keras/mctq_keras_exporter.py +0 -0
  334. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py +0 -0
  335. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/pytorch/base_pytorch_exporter.py +0 -0
  336. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/pytorch/export_serialization_format.py +0 -0
  337. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_onnx_pytorch_exporter.py +0 -0
  338. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/pytorch/fakely_quant_torchscript_pytorch_exporter.py +0 -0
  339. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +0 -0
  340. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_wrapper/__init__.py +0 -0
  341. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_wrapper/keras/__init__.py +0 -0
  342. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_wrapper/keras/builder/__init__.py +0 -0
  343. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +0 -0
  344. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +0 -0
  345. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizers.py +0 -0
  346. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +0 -0
  347. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_wrapper/pytorch/__init__.py +0 -0
  348. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/__init__.py +0 -0
  349. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +0 -0
  350. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +0 -0
  351. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizers.py +0 -0
  352. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +0 -0
  353. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/common/__init__.py +0 -0
  354. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/common/gptq_constants.py +0 -0
  355. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/common/gptq_framework_implementation.py +0 -0
  356. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/common/gptq_graph.py +0 -0
  357. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/common/gptq_training.py +0 -0
  358. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/__init__.py +0 -0
  359. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/gptq_keras_implementation.py +0 -0
  360. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/gptq_loss.py +0 -0
  361. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/graph_info.py +0 -0
  362. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/quantizer/__init__.py +0 -0
  363. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -0
  364. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/quantizer/quant_utils.py +0 -0
  365. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/__init__.py +0 -0
  366. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  367. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  368. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  369. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py +0 -0
  370. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  371. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/__init__.py +0 -0
  372. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/gptq_loss.py +0 -0
  373. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py +0 -0
  374. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/graph_info.py +0 -0
  375. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +0 -0
  376. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -0
  377. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +0 -0
  378. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/__init__.py +0 -0
  379. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +0 -0
  380. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +0 -0
  381. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +0 -0
  382. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  383. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  384. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/logger.py +0 -0
  385. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/pruning/__init__.py +0 -0
  386. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/pruning/keras/__init__.py +0 -0
  387. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/pruning/keras/pruning_facade.py +0 -0
  388. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/pruning/pytorch/__init__.py +0 -0
  389. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/pruning/pytorch/pruning_facade.py +0 -0
  390. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/ptq/keras/__init__.py +0 -0
  391. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/ptq/pytorch/__init__.py +0 -0
  392. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/ptq/runner.py +0 -0
  393. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/__init__.py +0 -0
  394. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/common/__init__.py +0 -0
  395. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/common/qat_config.py +0 -0
  396. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/keras/__init__.py +0 -0
  397. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/keras/quantization_facade.py +0 -0
  398. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/keras/quantizer/__init__.py +0 -0
  399. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +0 -0
  400. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/keras/quantizer/lsq/__init__.py +0 -0
  401. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +0 -0
  402. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/keras/quantizer/lsq/uniform_lsq.py +0 -0
  403. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/keras/quantizer/quant_utils.py +0 -0
  404. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +0 -0
  405. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/__init__.py +0 -0
  406. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +0 -0
  407. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +0 -0
  408. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/pytorch/__init__.py +0 -0
  409. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/pytorch/quantization_facade.py +0 -0
  410. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/pytorch/quantizer/__init__.py +0 -0
  411. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py +0 -0
  412. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/pytorch/quantizer/lsq/__init__.py +0 -0
  413. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +0 -0
  414. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +0 -0
  415. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +0 -0
  416. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/pytorch/quantizer/quantizer_utils.py +0 -0
  417. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/__init__.py +0 -0
  418. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +0 -0
  419. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +0 -0
  420. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/__init__.py +0 -0
  421. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/constants.py +0 -0
  422. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/immutable.py +0 -0
  423. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +0 -0
  424. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +0 -0
  425. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +0 -0
  426. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +0 -0
  427. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/target_platform/operators.py +0 -0
  428. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +0 -0
  429. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model_component.py +0 -0
  430. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py +0 -0
  431. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +0 -0
  432. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +0 -0
  433. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py +0 -0
  434. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +0 -0
  435. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +0 -0
  436. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py +0 -0
  437. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/__init__.py +0 -0
  438. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py +0 -0
  439. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/__init__.py +0 -0
  440. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py +0 -0
  441. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/target_platform_capabilities.py +0 -0
  442. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/__init__.py +0 -0
  443. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py +0 -0
  444. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_keras.py +0 -0
  445. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc_pytorch.py +0 -0
  446. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/__init__.py +0 -0
  447. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tp_model.py +0 -0
  448. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_keras.py +0 -0
  449. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_lut/tpc_pytorch.py +0 -0
  450. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/__init__.py +0 -0
  451. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tp_model.py +0 -0
  452. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_keras.py +0 -0
  453. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1_pot/tpc_pytorch.py +0 -0
  454. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/__init__.py +0 -0
  455. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py +0 -0
  456. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/target_platform_capabilities.py +0 -0
  457. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/__init__.py +0 -0
  458. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py +0 -0
  459. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_keras.py +0 -0
  460. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc_pytorch.py +0 -0
  461. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/__init__.py +0 -0
  462. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py +0 -0
  463. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/target_platform_capabilities.py +0 -0
  464. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/__init__.py +0 -0
  465. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py +0 -0
  466. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_keras.py +0 -0
  467. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc_pytorch.py +0 -0
  468. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/__init__.py +0 -0
  469. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/common/__init__.py +0 -0
  470. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +0 -0
  471. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/common/constants.py +0 -0
  472. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +0 -0
  473. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +0 -0
  474. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/common/quant_utils.py +0 -0
  475. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +0 -0
  476. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/keras/__init__.py +0 -0
  477. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +0 -0
  478. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +0 -0
  479. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/keras/load_model.py +0 -0
  480. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +0 -0
  481. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py +0 -0
  482. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py +0 -0
  483. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +0 -0
  484. {mct-nightly-1.11.0.20240305.post352 → mct-nightly-1.11.0.20240306.post426}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 1.11.0.20240305.post352
3
+ Version: 1.11.0.20240306.post426
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -152,10 +152,9 @@ Description: # Model Compression Toolkit (MCT)
152
152
  taking into account the target platform's Single Instruction, Multiple Data (SIMD) capabilities.
153
153
  By pruning groups of channels (SIMD groups), our approach not only reduces model size
154
154
  and complexity, but ensures that better utilization of channels is in line with the SIMD architecture
155
- for a target KPI of weights memory footprint.
155
+ for a target KPI of weights memory footprint.
156
156
  [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_pruning_experimental.html)
157
- [Pytorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_pruning_experimental.html)
158
-
157
+ [Pytorch API](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/pruning/pytorch/pruning_facade.py#L43)
159
158
 
160
159
  #### Results
161
160
 
@@ -187,6 +186,7 @@ Description: # Model Compression Toolkit (MCT)
187
186
  [3] [TORCHVISION.MODELS](https://pytorch.org/vision/stable/models.html)
188
187
 
189
188
  [4] Gordon, O., Habi, H. V., & Netzer, A., 2023. [EPTQ: Enhanced Post-Training Quantization via Label-Free Hessian. arXiv preprint](https://arxiv.org/abs/2309.11531)
189
+
190
190
  Platform: UNKNOWN
191
191
  Classifier: Programming Language :: Python :: 3
192
192
  Classifier: License :: OSI Approved :: Apache Software License
@@ -146,10 +146,9 @@ This pruning technique is designed to compress models for specific hardware arch
146
146
  taking into account the target platform's Single Instruction, Multiple Data (SIMD) capabilities.
147
147
  By pruning groups of channels (SIMD groups), our approach not only reduces model size
148
148
  and complexity, but ensures that better utilization of channels is in line with the SIMD architecture
149
- for a target KPI of weights memory footprint.
149
+ for a target KPI of weights memory footprint.
150
150
  [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_pruning_experimental.html)
151
- [Pytorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_pruning_experimental.html)
152
-
151
+ [Pytorch API](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/pruning/pytorch/pruning_facade.py#L43)
153
152
 
154
153
  #### Results
155
154
 
@@ -180,4 +179,4 @@ MCT aims at keeping a more up-to-date fork and welcomes contributions from anyon
180
179
 
181
180
  [3] [TORCHVISION.MODELS](https://pytorch.org/vision/stable/models.html)
182
181
 
183
- [4] Gordon, O., Habi, H. V., & Netzer, A., 2023. [EPTQ: Enhanced Post-Training Quantization via Label-Free Hessian. arXiv preprint](https://arxiv.org/abs/2309.11531)
182
+ [4] Gordon, O., Habi, H. V., & Netzer, A., 2023. [EPTQ: Enhanced Post-Training Quantization via Label-Free Hessian. arXiv preprint](https://arxiv.org/abs/2309.11531)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 1.11.0.20240305.post352
3
+ Version: 1.11.0.20240306.post426
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -152,10 +152,9 @@ Description: # Model Compression Toolkit (MCT)
152
152
  taking into account the target platform's Single Instruction, Multiple Data (SIMD) capabilities.
153
153
  By pruning groups of channels (SIMD groups), our approach not only reduces model size
154
154
  and complexity, but ensures that better utilization of channels is in line with the SIMD architecture
155
- for a target KPI of weights memory footprint.
155
+ for a target KPI of weights memory footprint.
156
156
  [Keras API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/keras_pruning_experimental.html)
157
- [Pytorch API](https://sony.github.io/model_optimization/docs/api/experimental_api_docs/methods/pytorch_pruning_experimental.html)
158
-
157
+ [Pytorch API](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/pruning/pytorch/pruning_facade.py#L43)
159
158
 
160
159
  #### Results
161
160
 
@@ -187,6 +186,7 @@ Description: # Model Compression Toolkit (MCT)
187
186
  [3] [TORCHVISION.MODELS](https://pytorch.org/vision/stable/models.html)
188
187
 
189
188
  [4] Gordon, O., Habi, H. V., & Netzer, A., 2023. [EPTQ: Enhanced Post-Training Quantization via Label-Free Hessian. arXiv preprint](https://arxiv.org/abs/2309.11531)
189
+
190
190
  Platform: UNKNOWN
191
191
  Classifier: Programming Language :: Python :: 3
192
192
  Classifier: License :: OSI Approved :: Apache Software License
@@ -13,7 +13,7 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
 
16
- from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, RoundingType, GradientPTQConfigV2, GPTQHessianScoresConfig
16
+ from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, RoundingType, GradientPTQConfig, GPTQHessianScoresConfig
17
17
  from model_compression_toolkit.gptq.keras.quantization_facade import keras_gradient_post_training_quantization
18
18
  from model_compression_toolkit.gptq.keras.quantization_facade import get_keras_gptq_config
19
19
  from model_compression_toolkit.gptq.pytorch.quantization_facade import pytorch_gradient_post_training_quantization
@@ -61,8 +61,8 @@ class GradientPTQConfig:
61
61
  """
62
62
  Configuration to use for quantization with GradientPTQ.
63
63
  """
64
-
65
- def __init__(self, n_iter: int,
64
+ def __init__(self,
65
+ n_epochs: int,
66
66
  optimizer: Any,
67
67
  optimizer_rest: Any = None,
68
68
  loss: Callable = None,
@@ -79,7 +79,7 @@ class GradientPTQConfig:
79
79
  Initialize a GradientPTQConfig.
80
80
 
81
81
  Args:
82
- n_iter (int): Number of iterations to train.
82
+ n_epochs (int): Number of representative dataset epochs to train.
83
83
  optimizer (Any): Optimizer to use.
84
84
  optimizer_rest (Any): Optimizer to use for bias and quantizer parameters.
85
85
  loss (Callable): The loss to use. should accept 6 lists of tensors. 1st list of quantized tensors, the 2nd list is the float tensors,
@@ -96,7 +96,8 @@ class GradientPTQConfig:
96
96
  gptq_quantizer_params_override (dict): A dictionary of parameters to override in GPTQ quantizer instantiation. Defaults to None (no parameters).
97
97
 
98
98
  """
99
- self.n_iter = n_iter
99
+
100
+ self.n_epochs = n_epochs
100
101
  self.optimizer = optimizer
101
102
  self.optimizer_rest = optimizer_rest
102
103
  self.loss = loss
@@ -114,71 +115,3 @@ class GradientPTQConfig:
114
115
  else gptq_quantizer_params_override
115
116
 
116
117
 
117
- class GradientPTQConfigV2(GradientPTQConfig):
118
- """
119
- Configuration to use for quantization with GradientPTQV2.
120
- """
121
- def __init__(self, n_epochs: int,
122
- optimizer: Any,
123
- optimizer_rest: Any = None,
124
- loss: Callable = None,
125
- log_function: Callable = None,
126
- train_bias: bool = True,
127
- rounding_type: RoundingType = RoundingType.SoftQuantizer,
128
- use_hessian_based_weights: bool = True,
129
- optimizer_quantization_parameter: Any = None,
130
- optimizer_bias: Any = None,
131
- regularization_factor: float = REG_DEFAULT,
132
- hessian_weights_config: GPTQHessianScoresConfig = GPTQHessianScoresConfig(),
133
- gptq_quantizer_params_override: Dict[str, Any] = None):
134
- """
135
- Initialize a GradientPTQConfigV2.
136
-
137
- Args:
138
- n_epochs (int): Number of representative dataset epochs to train.
139
- optimizer (Any): Optimizer to use.
140
- optimizer_rest (Any): Optimizer to use for bias and quantizer parameters.
141
- loss (Callable): The loss to use. should accept 6 lists of tensors. 1st list of quantized tensors, the 2nd list is the float tensors,
142
- the 3rd is a list of quantized weights, the 4th is a list of float weights, the 5th and 6th lists are the mean and std of the tensors
143
- accordingly. see example in multiple_tensors_mse_loss
144
- log_function (Callable): Function to log information about the GPTQ process.
145
- train_bias (bool): Whether to update the bias during the training or not.
146
- rounding_type (RoundingType): An enum that defines the rounding type.
147
- use_hessian_based_weights (bool): Whether to use Hessian-based weights for weighted average loss.
148
- optimizer_quantization_parameter (Any): Optimizer to override the rest optimizer for quantizer parameters.
149
- optimizer_bias (Any): Optimizer to override the rest optimizerfor bias.
150
- regularization_factor (float): A floating point number that defines the regularization factor.
151
- hessian_weights_config (GPTQHessianScoresConfig): A configuration that include all necessary arguments to run a computation of Hessian scores for the GPTQ loss.
152
- gptq_quantizer_params_override (dict): A dictionary of parameters to override in GPTQ quantizer instantiation. Defaults to None (no parameters).
153
-
154
- """
155
-
156
- super().__init__(n_iter=None,
157
- optimizer=optimizer,
158
- optimizer_rest=optimizer_rest,
159
- loss=loss,
160
- log_function=log_function,
161
- train_bias=train_bias,
162
- rounding_type=rounding_type,
163
- use_hessian_based_weights=use_hessian_based_weights,
164
- optimizer_quantization_parameter=optimizer_quantization_parameter,
165
- optimizer_bias=optimizer_bias,
166
- regularization_factor=regularization_factor,
167
- hessian_weights_config=hessian_weights_config,
168
- gptq_quantizer_params_override=gptq_quantizer_params_override)
169
- self.n_epochs = n_epochs
170
-
171
- @classmethod
172
- def from_v1(cls, n_ptq_iter: int, config_v1: GradientPTQConfig):
173
- """
174
- Initialize a GradientPTQConfigV2 from GradientPTQConfig instance.
175
-
176
- Args:
177
- n_ptq_iter (int): Number of PTQ calibration iters (length of representative dataset).
178
- config_v1 (GradientPTQConfig): A GPTQ config to convert to V2.
179
-
180
- """
181
- n_epochs = int(round(config_v1.n_iter) / n_ptq_iter)
182
- v1_params = config_v1.__dict__
183
- v1_params = {k: v for k, v in v1_params.items() if k != 'n_iter'}
184
- return cls(n_epochs, **v1_params)
@@ -37,7 +37,7 @@ else:
37
37
  from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
38
38
  from model_compression_toolkit.core import common
39
39
  from model_compression_toolkit.gptq.common.gptq_training import GPTQTrainer
40
- from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfigV2
40
+ from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig
41
41
  from model_compression_toolkit.core.common import Graph
42
42
  from model_compression_toolkit.gptq.keras.graph_info import get_weights_for_loss, get_gptq_trainable_parameters
43
43
  from model_compression_toolkit.gptq.keras.quantizer.regularization_factory import get_regularization
@@ -56,7 +56,7 @@ class KerasGPTQTrainer(GPTQTrainer):
56
56
  def __init__(self,
57
57
  graph_float: Graph,
58
58
  graph_quant: Graph,
59
- gptq_config: GradientPTQConfigV2,
59
+ gptq_config: GradientPTQConfig,
60
60
  fw_impl: FrameworkImplementation,
61
61
  fw_info: FrameworkInfo,
62
62
  representative_data_gen: Callable,
@@ -21,7 +21,7 @@ from model_compression_toolkit.gptq.common.gptq_constants import REG_DEFAULT
21
21
  from model_compression_toolkit.logger import Logger
22
22
  from model_compression_toolkit.constants import TENSORFLOW, FOUND_TF
23
23
  from model_compression_toolkit.core.common.user_info import UserInformation
24
- from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfigV2
24
+ from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig
25
25
  from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
26
26
  from model_compression_toolkit.core.common.framework_info import FrameworkInfo
27
27
  from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import MixedPrecisionQuantizationConfig
@@ -66,7 +66,7 @@ if FOUND_TF:
66
66
  loss: Callable = GPTQMultipleTensorsLoss(),
67
67
  log_function: Callable = None,
68
68
  use_hessian_based_weights: bool = True,
69
- regularization_factor: float = REG_DEFAULT) -> GradientPTQConfigV2:
69
+ regularization_factor: float = REG_DEFAULT) -> GradientPTQConfig:
70
70
  """
71
71
  Create a GradientPTQConfigV2 instance for Keras models.
72
72
 
@@ -102,26 +102,25 @@ if FOUND_TF:
102
102
  """
103
103
  bias_optimizer = tf.keras.optimizers.SGD(learning_rate=LR_BIAS_DEFAULT,
104
104
  momentum=GPTQ_MOMENTUM)
105
- return GradientPTQConfigV2(n_epochs,
106
- optimizer,
107
- optimizer_rest=optimizer_rest,
108
- loss=loss,
109
- log_function=log_function,
110
- train_bias=True,
111
- optimizer_bias=bias_optimizer,
112
- use_hessian_based_weights=use_hessian_based_weights,
113
- regularization_factor=regularization_factor)
105
+ return GradientPTQConfig(n_epochs,
106
+ optimizer,
107
+ optimizer_rest=optimizer_rest,
108
+ loss=loss,
109
+ log_function=log_function,
110
+ train_bias=True,
111
+ optimizer_bias=bias_optimizer,
112
+ use_hessian_based_weights=use_hessian_based_weights,
113
+ regularization_factor=regularization_factor)
114
114
 
115
115
 
116
116
  def keras_gradient_post_training_quantization(in_model: Model,
117
117
  representative_data_gen: Callable,
118
- gptq_config: GradientPTQConfigV2,
118
+ gptq_config: GradientPTQConfig,
119
119
  gptq_representative_data_gen: Callable = None,
120
120
  target_kpi: KPI = None,
121
121
  core_config: CoreConfig = CoreConfig(),
122
122
  fw_info: FrameworkInfo = DEFAULT_KERAS_INFO,
123
- target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC,
124
- new_experimental_exporter: bool = True) -> Tuple[Model, UserInformation]:
123
+ target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC) -> Tuple[Model, UserInformation]:
125
124
  """
126
125
  Quantize a trained Keras model using post-training quantization. The model is quantized using a
127
126
  symmetric constraint quantization thresholds (power of two).
@@ -141,13 +140,12 @@ if FOUND_TF:
141
140
  Args:
142
141
  in_model (Model): Keras model to quantize.
143
142
  representative_data_gen (Callable): Dataset used for calibration.
144
- gptq_config (GradientPTQConfigV2): Configuration for using gptq (e.g. optimizer).
143
+ gptq_config (GradientPTQConfig): Configuration for using gptq (e.g. optimizer).
145
144
  gptq_representative_data_gen (Callable): Dataset used for GPTQ training. If None defaults to representative_data_gen
146
145
  target_kpi (KPI): KPI object to limit the search of the mixed-precision configuration as desired.
147
146
  core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
148
147
  fw_info (FrameworkInfo): Information needed for quantization about the specific framework (e.g., kernel channels indices, groups of layers by how they should be quantized, etc.). `Default Keras info <https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/core/keras/default_framework_info.py>`_
149
148
  target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the Keras model according to.
150
- new_experimental_exporter (bool): Whether to wrap the quantized model using quantization information or not. Enabled by default. Experimental and subject to future changes.
151
149
 
152
150
  Returns:
153
151
 
@@ -232,19 +230,7 @@ if FOUND_TF:
232
230
  if core_config.debug_config.analyze_similarity:
233
231
  analyzer_model_quantization(representative_data_gen, tb_w, tg_gptq, fw_impl, fw_info)
234
232
 
235
- if new_experimental_exporter:
236
- Logger.warning('Using new experimental wrapped and ready for export models. To '
237
- 'disable it, please set new_experimental_exporter to False when '
238
- 'calling keras_gradient_post_training_quantization. '
239
- 'If you encounter an issue please file a bug.')
240
-
241
- return get_exportable_keras_model(tg_gptq)
242
-
243
- return export_model(tg_gptq,
244
- fw_info,
245
- fw_impl,
246
- tb_w,
247
- bit_widths_config)
233
+ return get_exportable_keras_model(tg_gptq)
248
234
 
249
235
  else:
250
236
  # If tensorflow is not installed,
@@ -14,7 +14,7 @@
14
14
  # ==============================================================================
15
15
  from typing import Dict, List, Tuple
16
16
 
17
- from model_compression_toolkit.gptq import GradientPTQConfigV2
17
+ from model_compression_toolkit.gptq import GradientPTQConfig
18
18
  from model_compression_toolkit.core import common
19
19
  from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
20
20
  from model_compression_toolkit.exporter.model_wrapper.keras.builder.node_to_quantizer import \
@@ -33,7 +33,7 @@ from model_compression_toolkit.trainable_infrastructure.common.get_quantizers im
33
33
 
34
34
 
35
35
  def quantization_builder(n: common.BaseNode,
36
- gptq_config: GradientPTQConfigV2
36
+ gptq_config: GradientPTQConfig
37
37
  ) -> Tuple[Dict[str, BaseKerasGPTQTrainableQuantizer], List[BaseKerasInferableQuantizer]]:
38
38
  """
39
39
  Build quantizers for a node according to its quantization configuration and
@@ -41,7 +41,7 @@ def quantization_builder(n: common.BaseNode,
41
41
 
42
42
  Args:
43
43
  n: Node to build its QuantizeConfig.
44
- gptq_config (GradientPTQConfigV2): GradientPTQConfigV2 configuration.
44
+ gptq_config (GradientPTQConfig): GradientPTQConfigV2 configuration.
45
45
 
46
46
  Returns:
47
47
  A dictionary which maps the weights kernel attribute to a quantizer for GPTQ training.
@@ -14,7 +14,7 @@
14
14
  # ==============================================================================
15
15
  from typing import Callable
16
16
 
17
- from model_compression_toolkit.gptq import RoundingType, GradientPTQConfigV2, GradientPTQConfig
17
+ from model_compression_toolkit.gptq import RoundingType, GradientPTQConfig, GradientPTQConfig
18
18
  from model_compression_toolkit.gptq.keras.quantizer.soft_rounding.soft_quantizer_reg import \
19
19
  SoftQuantizerRegularization
20
20
 
@@ -38,8 +38,6 @@ def get_regularization(gptq_config: GradientPTQConfig, representative_data_gen:
38
38
  for _ in representative_data_gen():
39
39
  num_batches += 1
40
40
 
41
- n_epochs = GradientPTQConfigV2.from_v1(n_ptq_iter=num_batches, config_v1=gptq_config).n_epochs if \
42
- not type(gptq_config) == GradientPTQConfigV2 else gptq_config.n_epochs
43
- return SoftQuantizerRegularization(total_gradient_steps=num_batches * n_epochs)
41
+ return SoftQuantizerRegularization(total_gradient_steps=num_batches * gptq_config.n_epochs)
44
42
  else:
45
43
  return lambda m, e_reg: 0
@@ -25,7 +25,7 @@ from model_compression_toolkit.logger import Logger
25
25
  from model_compression_toolkit.core.pytorch.back2framework.pytorch_model_builder import PyTorchModelBuilder
26
26
  from model_compression_toolkit.gptq.common.gptq_graph import get_kernel_attribute_name_for_gptq
27
27
  from model_compression_toolkit.gptq.common.gptq_training import GPTQTrainer
28
- from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfigV2
28
+ from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig
29
29
  from model_compression_toolkit.core.common import Graph, BaseNode
30
30
  from model_compression_toolkit.core.common.framework_info import FrameworkInfo
31
31
  from model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation
@@ -46,7 +46,7 @@ class PytorchGPTQTrainer(GPTQTrainer):
46
46
  def __init__(self,
47
47
  graph_float: Graph,
48
48
  graph_quant: Graph,
49
- gptq_config: GradientPTQConfigV2,
49
+ gptq_config: GradientPTQConfig,
50
50
  fw_impl: FrameworkImplementation,
51
51
  fw_info: FrameworkInfo,
52
52
  representative_data_gen: Callable,
@@ -19,7 +19,7 @@ from model_compression_toolkit.core.common.visualization.tensorboard_writer impo
19
19
  from model_compression_toolkit.gptq.common.gptq_constants import REG_DEFAULT
20
20
  from model_compression_toolkit.logger import Logger
21
21
  from model_compression_toolkit.constants import PYTORCH
22
- from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfigV2
22
+ from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig
23
23
  from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
24
24
  from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
25
25
  from model_compression_toolkit.core.runner import core_runner
@@ -54,7 +54,7 @@ if FOUND_TORCH:
54
54
  loss: Callable = multiple_tensors_mse_loss,
55
55
  log_function: Callable = None,
56
56
  use_hessian_based_weights: bool = True,
57
- regularization_factor: float = REG_DEFAULT) -> GradientPTQConfigV2:
57
+ regularization_factor: float = REG_DEFAULT) -> GradientPTQConfig:
58
58
  """
59
59
  Create a GradientPTQConfigV2 instance for Pytorch models.
60
60
 
@@ -86,21 +86,19 @@ if FOUND_TORCH:
86
86
 
87
87
  """
88
88
  bias_optimizer = torch.optim.SGD([torch.Tensor([])], lr=LR_BIAS_DEFAULT, momentum=GPTQ_MOMENTUM)
89
- return GradientPTQConfigV2(n_epochs, optimizer, optimizer_rest=optimizer_rest, loss=loss,
90
- log_function=log_function, train_bias=True, optimizer_bias=bias_optimizer,
91
- use_hessian_based_weights=use_hessian_based_weights,
92
- regularization_factor=regularization_factor)
89
+ return GradientPTQConfig(n_epochs, optimizer, optimizer_rest=optimizer_rest, loss=loss,
90
+ log_function=log_function, train_bias=True, optimizer_bias=bias_optimizer,
91
+ use_hessian_based_weights=use_hessian_based_weights,
92
+ regularization_factor=regularization_factor)
93
93
 
94
94
 
95
95
  def pytorch_gradient_post_training_quantization(model: Module,
96
96
  representative_data_gen: Callable,
97
97
  target_kpi: KPI = None,
98
98
  core_config: CoreConfig = CoreConfig(),
99
- gptq_config: GradientPTQConfigV2 = None,
99
+ gptq_config: GradientPTQConfig = None,
100
100
  gptq_representative_data_gen: Callable = None,
101
- target_platform_capabilities: TargetPlatformCapabilities =
102
- DEFAULT_PYTORCH_TPC,
103
- new_experimental_exporter: bool = True):
101
+ target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_PYTORCH_TPC):
104
102
  """
105
103
  Quantize a trained Pytorch module using post-training quantization.
106
104
  By default, the module is quantized using a symmetric constraint quantization thresholds
@@ -122,10 +120,9 @@ if FOUND_TORCH:
122
120
  representative_data_gen (Callable): Dataset used for calibration.
123
121
  target_kpi (KPI): KPI object to limit the search of the mixed-precision configuration as desired.
124
122
  core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
125
- gptq_config (GradientPTQConfigV2): Configuration for using gptq (e.g. optimizer).
123
+ gptq_config (GradientPTQConfig): Configuration for using gptq (e.g. optimizer).
126
124
  gptq_representative_data_gen (Callable): Dataset used for GPTQ training. If None defaults to representative_data_gen
127
125
  target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the PyTorch model according to.
128
- new_experimental_exporter (bool): Whether to wrap the quantized model using quantization information or not. Enabled by default. Experimental and subject to future changes.
129
126
 
130
127
  Returns:
131
128
  A quantized module and information the user may need to handle the quantized module.
@@ -194,22 +191,8 @@ if FOUND_TORCH:
194
191
  if core_config.debug_config.analyze_similarity:
195
192
  analyzer_model_quantization(representative_data_gen, tb_w, graph_gptq, fw_impl, DEFAULT_PYTORCH_INFO)
196
193
 
197
- # ---------------------- #
198
- # Export
199
- # ---------------------- #
200
- if new_experimental_exporter:
201
- Logger.warning('Using new experimental wrapped and ready for export models. To '
202
- 'disable it, please set new_experimental_exporter to False when '
203
- 'calling pytorch_gradient_post_training_quantization_experimental. '
204
- 'If you encounter an issue please file a bug.')
205
-
206
- return get_exportable_pytorch_model(graph_gptq)
207
-
208
- return export_model(graph_gptq,
209
- DEFAULT_PYTORCH_INFO,
210
- fw_impl,
211
- tb_w,
212
- bit_widths_config)
194
+ return get_exportable_pytorch_model(graph_gptq)
195
+
213
196
 
214
197
  else:
215
198
  # If torch is not installed,
@@ -14,7 +14,7 @@
14
14
  # ==============================================================================
15
15
  from typing import List, Dict, Tuple
16
16
 
17
- from model_compression_toolkit.gptq import GradientPTQConfigV2
17
+ from model_compression_toolkit.gptq import GradientPTQConfig
18
18
  from model_compression_toolkit.core import common
19
19
  from model_compression_toolkit.core.pytorch.constants import KERNEL
20
20
  from model_compression_toolkit.exporter.model_wrapper.pytorch.builder.node_to_quantizer import \
@@ -34,7 +34,7 @@ from model_compression_toolkit.trainable_infrastructure.common.get_quantizers im
34
34
 
35
35
 
36
36
  def quantization_builder(n: common.BaseNode,
37
- gptq_config: GradientPTQConfigV2,
37
+ gptq_config: GradientPTQConfig,
38
38
  ) -> Tuple[Dict[str, BasePytorchQATTrainableQuantizer],
39
39
  List[BasePyTorchInferableQuantizer]]:
40
40
  """
@@ -43,7 +43,7 @@ def quantization_builder(n: common.BaseNode,
43
43
 
44
44
  Args:
45
45
  n: Node to build its QuantizeConfig.
46
- gptq_config (GradientPTQConfigV2): GradientPTQConfigV2 configuration.
46
+ gptq_config (GradientPTQConfig): GradientPTQConfigV2 configuration.
47
47
 
48
48
  Returns:
49
49
  A dictionary which maps the weights kernel attribute to a quantizer for GPTQ training.
@@ -14,7 +14,7 @@
14
14
  # ==============================================================================
15
15
  from typing import Callable
16
16
 
17
- from model_compression_toolkit.gptq import RoundingType, GradientPTQConfigV2, GradientPTQConfig
17
+ from model_compression_toolkit.gptq import RoundingType, GradientPTQConfig, GradientPTQConfig
18
18
  from model_compression_toolkit.gptq.pytorch.quantizer.soft_rounding.soft_quantizer_reg import \
19
19
  SoftQuantizerRegularization
20
20
 
@@ -38,8 +38,6 @@ def get_regularization(gptq_config: GradientPTQConfig, representative_data_gen:
38
38
  for _ in representative_data_gen():
39
39
  num_batches += 1
40
40
 
41
- n_epochs = GradientPTQConfigV2.from_v1(n_ptq_iter=num_batches, config_v1=gptq_config).n_epochs if \
42
- not type(gptq_config) == GradientPTQConfigV2 else gptq_config.n_epochs
43
- return SoftQuantizerRegularization(total_gradient_steps=num_batches * n_epochs)
41
+ return SoftQuantizerRegularization(total_gradient_steps=num_batches * gptq_config.n_epochs)
44
42
  else:
45
43
  return lambda m, e_reg: 0
@@ -20,7 +20,7 @@ from model_compression_toolkit.core import common
20
20
  from model_compression_toolkit.core.common.hessian import HessianInfoService
21
21
  from model_compression_toolkit.core.common.statistics_correction.statistics_correction import \
22
22
  apply_statistics_correction
23
- from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfigV2
23
+ from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig
24
24
  from model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation
25
25
  from model_compression_toolkit.core.common import FrameworkInfo
26
26
  from model_compression_toolkit.core.common.graph.base_graph import Graph
@@ -32,7 +32,7 @@ from model_compression_toolkit.core.common.statistics_correction.apply_bias_corr
32
32
  from model_compression_toolkit.logger import Logger
33
33
 
34
34
 
35
- def _apply_gptq(gptq_config: GradientPTQConfigV2,
35
+ def _apply_gptq(gptq_config: GradientPTQConfig,
36
36
  representative_data_gen: Callable,
37
37
  tb_w: TensorboardWriter,
38
38
  tg: Graph,
@@ -74,7 +74,7 @@ def _apply_gptq(gptq_config: GradientPTQConfigV2,
74
74
 
75
75
  def gptq_runner(tg: Graph,
76
76
  core_config: CoreConfig,
77
- gptq_config: GradientPTQConfigV2,
77
+ gptq_config: GradientPTQConfig,
78
78
  representative_data_gen: Callable,
79
79
  gptq_representative_data_gen: Callable,
80
80
  fw_info: FrameworkInfo,
@@ -13,5 +13,5 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
 
16
- from model_compression_toolkit.ptq.pytorch.quantization_facade import pytorch_post_training_quantization_experimental
17
- from model_compression_toolkit.ptq.keras.quantization_facade import keras_post_training_quantization_experimental
16
+ from model_compression_toolkit.ptq.pytorch.quantization_facade import pytorch_post_training_quantization
17
+ from model_compression_toolkit.ptq.keras.quantization_facade import keras_post_training_quantization
@@ -40,12 +40,11 @@ if FOUND_TF:
40
40
  DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
41
41
 
42
42
 
43
- def keras_post_training_quantization_experimental(in_model: Model,
44
- representative_data_gen: Callable,
45
- target_kpi: KPI = None,
46
- core_config: CoreConfig = CoreConfig(),
47
- target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC,
48
- new_experimental_exporter: bool = True):
43
+ def keras_post_training_quantization(in_model: Model,
44
+ representative_data_gen: Callable,
45
+ target_kpi: KPI = None,
46
+ core_config: CoreConfig = CoreConfig(),
47
+ target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC):
49
48
  """
50
49
  Quantize a trained Keras model using post-training quantization. The model is quantized using a
51
50
  symmetric constraint quantization thresholds (power of two).
@@ -65,7 +64,6 @@ if FOUND_TF:
65
64
  target_kpi (KPI): KPI object to limit the search of the mixed-precision configuration as desired.
66
65
  core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
67
66
  target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the Keras model according to.
68
- new_experimental_exporter (bool): Whether to wrap the quantized model using quantization information or not. Enabled by default. Experimental and subject to future changes.
69
67
 
70
68
  Returns:
71
69
 
@@ -111,7 +109,7 @@ if FOUND_TF:
111
109
  Pass the model, the representative dataset generator, the configuration and the target KPI to get a
112
110
  quantized model:
113
111
 
114
- >>> quantized_model, quantization_info = mct.ptq.keras_post_training_quantization_experimental(model, repr_datagen, kpi, core_config=config)
112
+ >>> quantized_model, quantization_info = mct.ptq.keras_post_training_quantization(model, repr_datagen, kpi, core_config=config)
115
113
 
116
114
  For more configuration options, please take a look at our `API documentation <https://sony.github.io/model_optimization/api/api_docs/modules/mixed_precision_quantization_config.html>`_.
117
115
 
@@ -150,26 +148,14 @@ if FOUND_TF:
150
148
  fw_impl,
151
149
  fw_info)
152
150
 
153
- if new_experimental_exporter:
154
- Logger.warning('Using new experimental wrapped and ready for export models. To '
155
- 'disable it, please set new_experimental_exporter to False when '
156
- 'calling keras_post_training_quantization_experimental. '
157
- 'If you encounter an issue please file a bug.')
158
-
159
- return get_exportable_keras_model(tg)
160
-
161
- return export_model(tg,
162
- fw_info,
163
- fw_impl,
164
- tb_w,
165
- bit_widths_config)
151
+ return get_exportable_keras_model(tg)
166
152
 
167
153
 
168
154
 
169
155
  else:
170
156
  # If tensorflow is not installed,
171
157
  # we raise an exception when trying to use these functions.
172
- def keras_post_training_quantization_experimental(*args, **kwargs):
158
+ def keras_post_training_quantization(*args, **kwargs):
173
159
  Logger.critical('Installing tensorflow is mandatory '
174
- 'when using keras_post_training_quantization_experimental. '
160
+ 'when using keras_post_training_quantization. '
175
161
  'Could not find Tensorflow package.') # pragma: no cover