mct-nightly 1.11.0.20240321.357__py3-none-any.whl → 1.11.0.20240323.408__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (155) hide show
  1. {mct_nightly-1.11.0.20240321.357.dist-info → mct_nightly-1.11.0.20240323.408.dist-info}/METADATA +17 -9
  2. {mct_nightly-1.11.0.20240321.357.dist-info → mct_nightly-1.11.0.20240323.408.dist-info}/RECORD +152 -152
  3. model_compression_toolkit/__init__.py +1 -1
  4. model_compression_toolkit/constants.py +1 -1
  5. model_compression_toolkit/core/__init__.py +3 -3
  6. model_compression_toolkit/core/common/collectors/base_collector.py +2 -2
  7. model_compression_toolkit/core/common/data_loader.py +3 -3
  8. model_compression_toolkit/core/common/graph/base_graph.py +10 -13
  9. model_compression_toolkit/core/common/graph/base_node.py +3 -3
  10. model_compression_toolkit/core/common/graph/edge.py +2 -1
  11. model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +2 -4
  12. model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py +2 -2
  13. model_compression_toolkit/core/common/hessian/hessian_info_service.py +2 -3
  14. model_compression_toolkit/core/common/hessian/trace_hessian_calculator.py +3 -5
  15. model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py +1 -2
  16. model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py +24 -23
  17. model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py +110 -112
  18. model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py +114 -0
  19. model_compression_toolkit/core/common/mixed_precision/{kpi_tools/kpi_data.py → resource_utilization_tools/resource_utilization_data.py} +19 -19
  20. model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py +105 -0
  21. model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py +26 -0
  22. model_compression_toolkit/core/common/mixed_precision/{kpi_tools/kpi_methods.py → resource_utilization_tools/ru_methods.py} +61 -61
  23. model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +75 -71
  24. model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py +2 -4
  25. model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py +34 -34
  26. model_compression_toolkit/core/common/model_collector.py +2 -2
  27. model_compression_toolkit/core/common/network_editors/actions.py +3 -3
  28. model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +12 -12
  29. model_compression_toolkit/core/common/pruning/importance_metrics/lfh_importance_metric.py +2 -2
  30. model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +2 -2
  31. model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +2 -2
  32. model_compression_toolkit/core/common/pruning/memory_calculator.py +7 -7
  33. model_compression_toolkit/core/common/pruning/prune_graph.py +2 -3
  34. model_compression_toolkit/core/common/pruning/pruner.py +7 -7
  35. model_compression_toolkit/core/common/pruning/pruning_config.py +1 -1
  36. model_compression_toolkit/core/common/pruning/pruning_info.py +2 -2
  37. model_compression_toolkit/core/common/quantization/candidate_node_quantization_config.py +7 -4
  38. model_compression_toolkit/core/common/quantization/node_quantization_config.py +3 -1
  39. model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +4 -2
  40. model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +4 -6
  41. model_compression_toolkit/core/common/quantization/quantization_params_generation/lut_kmeans_params.py +2 -4
  42. model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +1 -1
  43. model_compression_toolkit/core/common/quantization/quantizers/uniform_quantizers.py +8 -6
  44. model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +2 -2
  45. model_compression_toolkit/core/common/statistics_correction/compute_bias_correction_of_graph.py +4 -6
  46. model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +4 -7
  47. model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +3 -3
  48. model_compression_toolkit/core/common/substitutions/virtual_activation_weights_composition.py +1 -1
  49. model_compression_toolkit/core/common/substitutions/weights_activation_split.py +3 -3
  50. model_compression_toolkit/core/common/user_info.py +1 -1
  51. model_compression_toolkit/core/keras/back2framework/factory_model_builder.py +3 -3
  52. model_compression_toolkit/core/keras/back2framework/instance_builder.py +2 -2
  53. model_compression_toolkit/core/keras/back2framework/mixed_precision_model_builder.py +4 -8
  54. model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py +3 -2
  55. model_compression_toolkit/core/keras/graph_substitutions/substitutions/linear_collapsing.py +2 -2
  56. model_compression_toolkit/core/keras/graph_substitutions/substitutions/matmul_substitution.py +1 -1
  57. model_compression_toolkit/core/keras/graph_substitutions/substitutions/multi_head_attention_decomposition.py +1 -1
  58. model_compression_toolkit/core/keras/graph_substitutions/substitutions/residual_collapsing.py +1 -1
  59. model_compression_toolkit/core/keras/hessian/activation_trace_hessian_calculator_keras.py +3 -3
  60. model_compression_toolkit/core/keras/hessian/trace_hessian_calculator_keras.py +1 -2
  61. model_compression_toolkit/core/keras/hessian/weights_trace_hessian_calculator_keras.py +5 -6
  62. model_compression_toolkit/core/keras/keras_implementation.py +1 -1
  63. model_compression_toolkit/core/keras/mixed_precision/configurable_activation_quantizer.py +1 -1
  64. model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +2 -4
  65. model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +1 -1
  66. model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +7 -7
  67. model_compression_toolkit/core/keras/reader/common.py +2 -2
  68. model_compression_toolkit/core/keras/reader/node_builder.py +1 -1
  69. model_compression_toolkit/core/keras/{kpi_data_facade.py → resource_utilization_data_facade.py} +25 -24
  70. model_compression_toolkit/core/keras/tf_tensor_numpy.py +4 -2
  71. model_compression_toolkit/core/pytorch/back2framework/factory_model_builder.py +3 -3
  72. model_compression_toolkit/core/pytorch/back2framework/mixed_precision_model_builder.py +6 -11
  73. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/const_holder_conv.py +2 -2
  74. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +1 -1
  75. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +1 -1
  76. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +5 -5
  77. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/relu_bound_to_power_of_2.py +1 -1
  78. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +1 -1
  79. model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/residual_collapsing.py +1 -1
  80. model_compression_toolkit/core/pytorch/hessian/activation_trace_hessian_calculator_pytorch.py +3 -7
  81. model_compression_toolkit/core/pytorch/hessian/trace_hessian_calculator_pytorch.py +1 -2
  82. model_compression_toolkit/core/pytorch/hessian/weights_trace_hessian_calculator_pytorch.py +2 -2
  83. model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +1 -1
  84. model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +1 -2
  85. model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +3 -3
  86. model_compression_toolkit/core/pytorch/pytorch_implementation.py +1 -1
  87. model_compression_toolkit/core/pytorch/reader/graph_builders.py +5 -7
  88. model_compression_toolkit/core/pytorch/reader/reader.py +2 -2
  89. model_compression_toolkit/core/pytorch/{kpi_data_facade.py → resource_utilization_data_facade.py} +24 -22
  90. model_compression_toolkit/core/pytorch/utils.py +3 -2
  91. model_compression_toolkit/core/runner.py +43 -42
  92. model_compression_toolkit/data_generation/common/data_generation.py +18 -18
  93. model_compression_toolkit/data_generation/common/model_info_exctractors.py +1 -1
  94. model_compression_toolkit/data_generation/keras/keras_data_generation.py +7 -10
  95. model_compression_toolkit/data_generation/keras/model_info_exctractors.py +2 -1
  96. model_compression_toolkit/data_generation/keras/optimization_functions/image_initilization.py +2 -1
  97. model_compression_toolkit/data_generation/keras/optimization_functions/output_loss_functions.py +2 -4
  98. model_compression_toolkit/data_generation/pytorch/model_info_exctractors.py +2 -1
  99. model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +8 -11
  100. model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py +1 -1
  101. model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py +2 -3
  102. model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +2 -3
  103. model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +8 -4
  104. model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +2 -2
  105. model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +7 -8
  106. model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +19 -12
  107. model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +2 -2
  108. model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +10 -11
  109. model_compression_toolkit/gptq/common/gptq_graph.py +3 -3
  110. model_compression_toolkit/gptq/common/gptq_training.py +14 -12
  111. model_compression_toolkit/gptq/keras/gptq_training.py +10 -8
  112. model_compression_toolkit/gptq/keras/graph_info.py +1 -1
  113. model_compression_toolkit/gptq/keras/quantization_facade.py +15 -17
  114. model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +4 -5
  115. model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +1 -2
  116. model_compression_toolkit/gptq/pytorch/gptq_training.py +10 -8
  117. model_compression_toolkit/gptq/pytorch/graph_info.py +1 -1
  118. model_compression_toolkit/gptq/pytorch/quantization_facade.py +11 -13
  119. model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +3 -4
  120. model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +1 -2
  121. model_compression_toolkit/logger.py +1 -13
  122. model_compression_toolkit/pruning/keras/pruning_facade.py +11 -12
  123. model_compression_toolkit/pruning/pytorch/pruning_facade.py +11 -12
  124. model_compression_toolkit/ptq/keras/quantization_facade.py +13 -14
  125. model_compression_toolkit/ptq/pytorch/quantization_facade.py +7 -8
  126. model_compression_toolkit/qat/keras/quantization_facade.py +20 -22
  127. model_compression_toolkit/qat/keras/quantizer/base_keras_qat_quantizer.py +2 -3
  128. model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +1 -1
  129. model_compression_toolkit/qat/pytorch/quantization_facade.py +12 -14
  130. model_compression_toolkit/qat/pytorch/quantizer/base_pytorch_qat_quantizer.py +2 -3
  131. model_compression_toolkit/qat/pytorch/quantizer/quantization_builder.py +1 -1
  132. model_compression_toolkit/target_platform_capabilities/immutable.py +4 -2
  133. model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py +4 -8
  134. model_compression_toolkit/target_platform_capabilities/target_platform/current_tp_model.py +1 -1
  135. model_compression_toolkit/target_platform_capabilities/target_platform/fusing.py +43 -8
  136. model_compression_toolkit/target_platform_capabilities/target_platform/op_quantization_config.py +13 -18
  137. model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +2 -2
  138. model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py +2 -2
  139. model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +2 -1
  140. model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py +5 -5
  141. model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py +1 -2
  142. model_compression_toolkit/trainable_infrastructure/common/base_trainable_quantizer.py +13 -13
  143. model_compression_toolkit/trainable_infrastructure/common/get_quantizer_config.py +14 -7
  144. model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +5 -5
  145. model_compression_toolkit/trainable_infrastructure/keras/base_keras_quantizer.py +2 -3
  146. model_compression_toolkit/trainable_infrastructure/keras/load_model.py +4 -5
  147. model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py +3 -4
  148. model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +3 -3
  149. model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi.py +0 -112
  150. model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_aggregation_methods.py +0 -105
  151. model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_functions_mapping.py +0 -26
  152. {mct_nightly-1.11.0.20240321.357.dist-info → mct_nightly-1.11.0.20240323.408.dist-info}/LICENSE.md +0 -0
  153. {mct_nightly-1.11.0.20240321.357.dist-info → mct_nightly-1.11.0.20240323.408.dist-info}/WHEEL +0 -0
  154. {mct_nightly-1.11.0.20240321.357.dist-info → mct_nightly-1.11.0.20240323.408.dist-info}/top_level.txt +0 -0
  155. /model_compression_toolkit/core/common/mixed_precision/{kpi_tools → resource_utilization_tools}/__init__.py +0 -0
@@ -68,8 +68,8 @@ class WeightsTraceHessianCalculatorKeras(TraceHessianCalculatorKeras):
68
68
  """
69
69
  # Check if the target node's layer type is supported
70
70
  if not DEFAULT_KERAS_INFO.is_kernel_op(self.hessian_request.target_node.type):
71
- Logger.error(
72
- f"{self.hessian_request.target_node.type} is not supported for Hessian info w.r.t weights.")
71
+ Logger.critical(
72
+ f"{self.hessian_request.target_node.type} is not supported for Hessian-based scoring with respect to weights.")
73
73
 
74
74
  # Construct the Keras float model for inference
75
75
  model, _ = FloatKerasModelBuilder(graph=self.graph).build_model()
@@ -79,7 +79,7 @@ class WeightsTraceHessianCalculatorKeras(TraceHessianCalculatorKeras):
79
79
 
80
80
  # Get the weight tensor for the target node
81
81
  if len(weight_attributes) != 1:
82
- Logger.error(f"Hessian scores w.r.t weights is supported, for now, for a single-weight node. Found {len(weight_attributes)}")
82
+ Logger.critical(f"Hessian-based scoring with respect to weights is currently supported only for nodes with a single weight attribute. Found {len(weight_attributes)} attributes.")
83
83
 
84
84
  weight_tensor = getattr(model.get_layer(self.hessian_request.target_node.name), weight_attributes[0])
85
85
 
@@ -139,8 +139,7 @@ class WeightsTraceHessianCalculatorKeras(TraceHessianCalculatorKeras):
139
139
 
140
140
  if self.hessian_request.granularity == HessianInfoGranularity.PER_TENSOR:
141
141
  if final_approx.shape != (1,):
142
- Logger.error(f"In HessianInfoGranularity.PER_TENSOR the score shape is expected"
143
- f"to be (1,) but is {final_approx.shape} ")
142
+ Logger.critical(f"For HessianInfoGranularity.PER_TENSOR, the expected score shape is (1,), but found {final_approx.shape}.")
144
143
  elif self.hessian_request.granularity == HessianInfoGranularity.PER_ELEMENT:
145
144
  # Reshaping the scores to the original weight shape
146
145
  final_approx = tf.reshape(final_approx, weight_tensor.shape)
@@ -195,4 +194,4 @@ class WeightsTraceHessianCalculatorKeras(TraceHessianCalculatorKeras):
195
194
  elif self.hessian_request.granularity == HessianInfoGranularity.PER_ELEMENT:
196
195
  return tf.size(weight_tensor).numpy()
197
196
  else:
198
- Logger.error(f"Encountered an unexpected granularity {self.hessian_request.granularity} ")
197
+ Logger.critical(f"Unexpected granularity encountered: {self.hessian_request.granularity}.")
@@ -493,7 +493,7 @@ class KerasImplementation(FrameworkImplementation):
493
493
  fw_impl=self,
494
494
  num_iterations_for_approximation=num_iterations_for_approximation)
495
495
  else:
496
- Logger.error(f"Keras does not support hessian mode of {trace_hessian_request.mode}")
496
+ Logger.critical(f"Unsupported Hessian mode for Keras: {trace_hessian_request.mode}.")
497
497
 
498
498
  def is_output_node_compatible_for_hessian_score_computation(self,
499
499
  node: BaseNode) -> Any:
@@ -66,7 +66,7 @@ class ConfigurableActivationQuantizer(BaseKerasInferableQuantizer):
66
66
  for qc in node_q_cfg:
67
67
  if qc.activation_quantization_cfg.enable_activation_quantization != \
68
68
  node_q_cfg[0].activation_quantization_cfg.enable_activation_quantization:
69
- Logger.error("Candidates with different activation enabled properties is currently not supported.") # pragma: no cover
69
+ Logger.critical("Unsupported configuration: Mixing candidates with differing activation quantization states (enabled/disabled).") # pragma: no cover
70
70
 
71
71
  self.activation_quantizers = init_activation_quantizers(self.node_q_cfg)
72
72
  self.active_quantization_config_index = max_candidate_idx # initialize with first config as default
@@ -77,8 +77,7 @@ class ConfigurableWeightsQuantizer(BaseKerasInferableQuantizer):
77
77
  for qc in self.node_q_cfg:
78
78
  if qc.weights_quantization_cfg.get_attr_config(self.kernel_attr).enable_weights_quantization != \
79
79
  self.node_q_cfg[0].weights_quantization_cfg.get_attr_config(self.kernel_attr).enable_weights_quantization:
80
- Logger.error("Candidates with different kernel attribute quantization enabled "
81
- "properties is currently not supported.")
80
+ Logger.critical("Mixing candidates with varying weights quantization states (enabled/disabled) is not supported.")
82
81
 
83
82
  # Initialize quantized weights for each weight that should be quantized.
84
83
  self.quantized_weights = init_quantized_weights(node_q_cfg=self.node_q_cfg,
@@ -101,8 +100,7 @@ class ConfigurableWeightsQuantizer(BaseKerasInferableQuantizer):
101
100
  """
102
101
 
103
102
  if index >= len(self.node_q_cfg):
104
- Logger.error(f'Quantizer has {len(self.node_q_cfg)} ' # pragma: no cover
105
- f'possible nbits. Can not set index {index}')
103
+ Logger.critical(f'Quantizer supports only {len(self.node_q_cfg)} bit width configurations; index {index} is out of range.')# pragma: no cover
106
104
  self.active_quantization_config_index = index
107
105
 
108
106
  def __call__(self,
@@ -172,7 +172,7 @@ class PruningKerasImplementation(KerasImplementation, PruningFrameworkImplementa
172
172
  if fw_info.is_kernel_op(node.type):
173
173
  kernel_attributes = fw_info.get_kernel_op_attributes(node.type)
174
174
  if kernel_attributes is None or len(kernel_attributes)==0:
175
- Logger.error(f"Expected to find attributes but found {kernel_attributes}")
175
+ Logger.critical(f"Expected kernel attributes for operation for node type {node.type}, found None or empty.")
176
176
 
177
177
  for attr in kernel_attributes:
178
178
  attributes_with_axis[attr] = fw_info.kernel_channels_mapping.get(node.type)
@@ -69,11 +69,11 @@ def power_of_two_quantization(activation_n_bits: int,
69
69
  activation_is_signed = quantization_params.get(SIGNED)
70
70
 
71
71
  if activation_threshold is None:
72
- Logger.error("Activation threshold is None") # pragma: no cover
72
+ Logger.critical("Activation threshold must be specified.") # pragma: no cover
73
73
  if activation_is_signed is None:
74
- Logger.error("activation_is_signed is None") # pragma: no cover
74
+ Logger.critical("Parameter 'activation_is_signed' must be specified.") # pragma: no cover
75
75
  if not threshold_is_power_of_two(activation_threshold, per_channel=False):
76
- Logger.error("Activation threshold is not power of two") # pragma: no cover
76
+ Logger.critical("Activation threshold must be a power of two.") # pragma: no cover
77
77
 
78
78
  min_value, max_value = quantizer_min_max_calculator(activation_threshold,
79
79
  activation_n_bits,
@@ -99,9 +99,9 @@ def symmetric_quantization(activation_n_bits: int,
99
99
  activation_is_signed = quantization_params.get(SIGNED)
100
100
 
101
101
  if activation_threshold is None:
102
- Logger.error("Activation threshold is None") # pragma: no cover
102
+ Logger.critical("Activation threshold must be specified.") # pragma: no cover
103
103
  if activation_is_signed is None:
104
- Logger.error("activation_is_signed is None") # pragma: no cover
104
+ Logger.critical("Parameter 'activation_is_signed' must be specified.") # pragma: no cover
105
105
 
106
106
  min_value, max_value = quantizer_min_max_calculator(activation_threshold,
107
107
  activation_n_bits,
@@ -126,9 +126,9 @@ def uniform_quantization(activation_n_bits: int,
126
126
  min_value, max_value = quantization_params.get(RANGE_MIN), quantization_params.get(RANGE_MAX)
127
127
 
128
128
  if min_value is None:
129
- Logger.error("Min value is None") # pragma: no cover
129
+ Logger.critical("Minimum value must be specified.") # pragma: no cover
130
130
  if max_value is None:
131
- Logger.error("Max value is None") # pragma: no cover
131
+ Logger.critical("Maximum value must be specified.") # pragma: no cover
132
132
 
133
133
  return lambda x: q(x, min_value, max_value, activation_n_bits)
134
134
 
@@ -47,7 +47,7 @@ def is_node_an_input_layer(node: BaseNode) -> bool:
47
47
  elif isinstance(node, KerasNode):
48
48
  return isinstance(node.layer, InputLayer)
49
49
  else:
50
- Logger.error('Node to check has to be either a graph node or a keras node') # pragma: no cover
50
+ Logger.critical('Node must be a graph node or a Keras node for input layer check.') # pragma: no cover
51
51
 
52
52
 
53
53
  def is_node_a_model(node: BaseNode) -> bool:
@@ -64,5 +64,5 @@ def is_node_a_model(node: BaseNode) -> bool:
64
64
  elif isinstance(node, KerasNode):
65
65
  return isinstance(node.layer, Functional) or isinstance(node.layer, Sequential)
66
66
  else:
67
- Logger.error('Node to check has to be either a graph node or a keras node') # pragma: no cover
67
+ Logger.critical('Node must be a graph node or a Keras node.') # pragma: no cover
68
68
 
@@ -132,7 +132,7 @@ def build_node(node: KerasNode,
132
132
  # All KerasTensor and positional weights are removed from the call_args\kwargs. They are restored
133
133
  # in the model builder.
134
134
  if len(weights) > 0:
135
- Logger.error('Functional nodes are not expected to have weights in framework')
135
+ Logger.critical('Functional nodes are not expected to have weights in this framework.')
136
136
 
137
137
  # read weights from call args
138
138
  tf_function_symbols = get_tf_function_symbols()
@@ -15,12 +15,11 @@
15
15
 
16
16
  from typing import Callable
17
17
  from model_compression_toolkit.core import MixedPrecisionQuantizationConfig, CoreConfig
18
- from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
18
+ from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
19
19
  from model_compression_toolkit.logger import Logger
20
20
  from model_compression_toolkit.constants import TENSORFLOW
21
21
  from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
22
- from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi_data import compute_kpi_data
23
- from model_compression_toolkit.core.common.framework_info import FrameworkInfo
22
+ from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization_data import compute_resource_utilization_data
24
23
  from model_compression_toolkit.constants import FOUND_TF
25
24
 
26
25
  if FOUND_TF:
@@ -33,13 +32,16 @@ if FOUND_TF:
33
32
 
34
33
  KERAS_DEFAULT_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
35
34
 
36
- def keras_kpi_data(in_model: Model,
37
- representative_data_gen: Callable,
38
- core_config: CoreConfig = CoreConfig(mixed_precision_config=MixedPrecisionQuantizationConfig()),
39
- target_platform_capabilities: TargetPlatformCapabilities = KERAS_DEFAULT_TPC) -> KPI:
35
+ def keras_resource_utilization_data(in_model: Model,
36
+ representative_data_gen: Callable,
37
+ core_config: CoreConfig = CoreConfig(
38
+ mixed_precision_config=MixedPrecisionQuantizationConfig()),
39
+ target_platform_capabilities: TargetPlatformCapabilities = KERAS_DEFAULT_TPC) -> ResourceUtilization:
40
40
  """
41
- Computes KPI data that can be used to calculate the desired target KPI for mixed-precision quantization.
42
- Builds the computation graph from the given model and hw modeling, and uses it to compute the KPI data.
41
+ Computes resource utilization data that can be used to calculate the desired target resource utilization
42
+ for mixed-precision quantization.
43
+ Builds the computation graph from the given model and hw modeling, and uses it to compute the
44
+ resource utilization data.
43
45
 
44
46
  Args:
45
47
  in_model (Model): Keras model to quantize.
@@ -49,7 +51,7 @@ if FOUND_TF:
49
51
 
50
52
  Returns:
51
53
 
52
- A KPI object with total weights parameters sum and max activation tensor.
54
+ A ResourceUtilization object with total weights parameters sum and max activation tensor.
53
55
 
54
56
  Examples:
55
57
 
@@ -63,30 +65,29 @@ if FOUND_TF:
63
65
  >>> import numpy as np
64
66
  >>> def repr_datagen(): yield [np.random.random((1, 224, 224, 3))]
65
67
 
66
- Import MCT and call for KPI data calculation:
68
+ Import MCT and call for resource utilization data calculation:
67
69
 
68
70
  >>> import model_compression_toolkit as mct
69
- >>> kpi_data = mct.core.keras_kpi_data(model, repr_datagen)
71
+ >>> ru_data = mct.core.keras_resource_utilization_data(model, repr_datagen)
70
72
 
71
73
  """
72
74
 
73
75
  if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfig):
74
- Logger.error("KPI data computation can't be executed without MixedPrecisionQuantizationConfig object."
75
- "Given quant_config is not of type MixedPrecisionQuantizationConfig.")
76
+ Logger.critical("Resource utilization data computation requires a MixedPrecisionQuantizationConfig object; "
77
+ "provided config is of an incorrect type.")
76
78
 
77
79
  fw_impl = KerasImplementation()
78
80
 
79
- return compute_kpi_data(in_model,
80
- representative_data_gen,
81
- core_config,
82
- target_platform_capabilities,
83
- DEFAULT_KERAS_INFO,
84
- fw_impl)
81
+ return compute_resource_utilization_data(in_model,
82
+ representative_data_gen,
83
+ core_config,
84
+ target_platform_capabilities,
85
+ DEFAULT_KERAS_INFO,
86
+ fw_impl)
85
87
 
86
88
  else:
87
89
  # If tensorflow is not installed,
88
90
  # we raise an exception when trying to use this function.
89
- def keras_kpi_data(*args, **kwargs):
90
- Logger.critical('Installing tensorflow is mandatory '
91
- 'when using keras_kpi_data. '
92
- 'Could not find Tensorflow package.') # pragma: no cover
91
+ def keras_resource_utilization_data(*args, **kwargs):
92
+ Logger.critical("Tensorflow must be installed to use keras_resource_utilization_data. "
93
+ "The 'tensorflow' package is missing.") # pragma: no cover
@@ -16,6 +16,8 @@ from typing import Union, List, Tuple
16
16
  import tensorflow as tf
17
17
  import numpy as np
18
18
 
19
+ from model_compression_toolkit.logger import Logger
20
+
19
21
 
20
22
  def to_tf_tensor(tensor):
21
23
  """
@@ -35,7 +37,7 @@ def to_tf_tensor(tensor):
35
37
  elif isinstance(tensor, np.ndarray):
36
38
  return tf.convert_to_tensor(tensor.astype(np.float32))
37
39
  else:
38
- raise Exception(f'Conversion of type {type(tensor)} to {type(tf.Tensor)} is not supported')
40
+ Logger.critical(f'Unsupported type for conversion to TF tensor: {type(tensor)}.')
39
41
 
40
42
 
41
43
  def tf_tensor_to_numpy(tensor: Union[List, Tuple, np.ndarray, tf.Tensor],
@@ -65,4 +67,4 @@ def tf_tensor_to_numpy(tensor: Union[List, Tuple, np.ndarray, tf.Tensor],
65
67
  elif isinstance(tensor, tf.Tensor):
66
68
  return tensor.numpy()
67
69
  else:
68
- raise Exception(f'Conversion of type {type(tensor)} to {type(np.ndarray)} is not supported')
70
+ Logger.critical(f'Unsupported type for conversion to Numpy array: {type(tensor)}.')
@@ -38,9 +38,9 @@ def get_pytorch_model_builder(mode: ModelBuilderMode) -> type:
38
38
  """
39
39
 
40
40
  if not isinstance(mode, ModelBuilderMode):
41
- Logger.error(f'get_pytorch_model_builder expects a mode of type ModelBuilderMode, but {type(mode)} was passed.')
41
+ Logger.critical(f"Expected a ModelBuilderMode type for 'mode' parameter; received {type(mode)} instead.")
42
42
  if mode is None:
43
- Logger.error(f'get_pytorch_model_builder received a mode which is None')
43
+ Logger.critical(f"Received 'mode' parameter is None.")
44
44
  if mode not in pytorch_model_builders.keys():
45
- Logger.error(f'mode {mode} is not in pytorch model builders factory')
45
+ Logger.critical(f"'mode' parameter {mode} is not supported by the PyTorch model builders factory.")
46
46
  return pytorch_model_builders.get(mode)
@@ -101,8 +101,7 @@ class MixedPrecisionPyTorchModelBuilder(PyTorchModelBuilder):
101
101
  # or single precision).
102
102
  node_weights_qc = n.get_unique_weights_candidates(kernel_attr)
103
103
  if not len(node_weights_qc) == 1:
104
- Logger.error(f"Expecting node {n.name} to have a unique weights configuration " # pragma: no cover
105
- f"but {len(node_weights_qc)} different configurations exist.")
104
+ Logger.critical(f"Expected a single weights quantization configuration for node '{n.name}', but found ({len(node_weights_qc)}) configurations.")# pragma: no cover
106
105
 
107
106
  quantier_for_node = get_inferable_quantizer_class(QuantizationTarget.Weights,
108
107
  node_weights_qc[0].weights_quantization_cfg
@@ -139,8 +138,7 @@ class MixedPrecisionPyTorchModelBuilder(PyTorchModelBuilder):
139
138
 
140
139
  max_cfg_candidates = n.find_max_candidates_indices()
141
140
  if not len(max_cfg_candidates) == 1:
142
- Logger.error(f"A maximal config candidate must be defined, " # pragma: no cover
143
- f"but some node have multiple potential maximal candidates")
141
+ Logger.critical(f"A maximal configuration candidate must be uniquely defined; however, multiple potential maximal candidates were found.") # pragma: no cover
144
142
 
145
143
  max_candidate_idx = max_cfg_candidates[0]
146
144
 
@@ -179,7 +177,7 @@ class MixedPrecisionPyTorchModelBuilder(PyTorchModelBuilder):
179
177
 
180
178
  max_cfg_candidates = n.find_max_candidates_indices()
181
179
  assert len(max_cfg_candidates) == 1, \
182
- f"A maximal config candidate must be defined, but some node have multiple potential maximal candidates"
180
+ f"A maximal configuration candidate must be uniquely defined; however, multiple potential maximal candidates were found."
183
181
  max_candidate_idx = max_cfg_candidates[0]
184
182
 
185
183
  kernel_attr = self.fw_info.get_kernel_op_attributes(n.type)[0]
@@ -189,8 +187,7 @@ class MixedPrecisionPyTorchModelBuilder(PyTorchModelBuilder):
189
187
  * num_of_outputs
190
188
  else:
191
189
  node_act_qc = n.get_unique_activation_candidates()
192
- assert len(node_act_qc) == 1, f"Expecting node {n.name} to have a unique activation configuration, " \
193
- f"but {len(node_act_qc)} different configurations exist."
190
+ assert len(node_act_qc) == 1, f"Expected a single activation configuration for node '{n.name}', but found multiple ({len(node_act_qc)}) configurations."
194
191
  quantizer_for_node = get_inferable_quantizer_class(QuantizationTarget.Activation,
195
192
  node_act_qc[0].activation_quantization_cfg.activation_quantization_method,
196
193
  BasePyTorchInferableQuantizer)
@@ -204,8 +201,7 @@ class MixedPrecisionPyTorchModelBuilder(PyTorchModelBuilder):
204
201
  if len(activation_quantizers) == 1:
205
202
  return PytorchActivationQuantizationHolder(activation_quantizers[0])
206
203
 
207
- Logger.error(f'PytorchActivationQuantizationHolder supports a single quantizer but ' # pragma: no cover
208
- f'{len(activation_quantizers)} quantizers were found for node {n}')
204
+ Logger.critical(f"PytorchActivationQuantizationHolder expects a single quantizer, but ({len(activation_quantizers)}) quantizers were found for node {n}.")# pragma: no cover
209
205
 
210
206
  def build_model(self) -> Tuple[torch.nn.Module, UserInformation,
211
207
  Dict[str, Union[PytorchQuantizationWrapper, PytorchActivationQuantizationHolder]]]:
@@ -283,5 +279,4 @@ class MixedPrecisionPyTorchModelBuilder(PyTorchModelBuilder):
283
279
  elif weights_quant and act_quant:
284
280
  return self._get_weights_quant_layers(n, named_layers) + self._get_activation_quant_layers(n, named_layers)
285
281
  else: # pragma: no cover
286
- Logger.error(f"Expects node {n.name} to have at either weights or activation quantization configured,"
287
- f"but both are disabled.")
282
+ Logger.critical(f"Expected node {n.name} to have either weights or activation quantization enabled, but both are disabled.")
@@ -53,13 +53,13 @@ class FunctionalConvSubstitution(common.BaseSubstitution):
53
53
  elif func_node.type == conv_transpose2d:
54
54
  new_layer = ConvTranspose2d
55
55
  else:
56
- Logger.error('mismatch in substitution filter') # pragma: no cover
56
+ Logger.critical(f'Substitution filter mismatch. Layer {func_node.type}. Must be {type(Conv2d)} or {type(ConvTranspose2d)}.') # pragma: no cover
57
57
 
58
58
  out_channel_index, in_channel_index = self.fw_info.kernel_channels_mapping.get(new_layer)
59
59
 
60
60
  # Create new node of layer convolution
61
61
  if 1 not in func_node.weights:
62
- Logger.error('missing weight input') # pragma: no cover
62
+ Logger.critical(f'Weight input missing for node {func_node.name}.') # pragma: no cover
63
63
  weight = func_node.weights[1]
64
64
  bias = func_node.weights.get(2)
65
65
  framework_attr = func_node.framework_attr
@@ -47,7 +47,7 @@ class FunctionalBatchNorm(common.BaseSubstitution):
47
47
  Weights dictionary for BatchNorm2d.
48
48
  """
49
49
  if 1 not in node.weights and 2 not in node.weights:
50
- Logger.error(f'Missing {MOVING_MEAN} and {MOVING_VARIANCE} in functional batch_norm inputs')
50
+ Logger.critical(f'Missing {MOVING_MEAN} and {MOVING_VARIANCE} in functional batch_norm inputs.')
51
51
  weights_dict = {MOVING_MEAN: node.weights[1],
52
52
  MOVING_VARIANCE: node.weights[2],
53
53
  GAMMA: np.ones(node.weights[1].shape),
@@ -101,7 +101,7 @@ def conv2d_collapsing_fn(first_node: BaseNode,
101
101
 
102
102
  return kernel_collapsed, bias_collapsed
103
103
  else:
104
- Logger.error("No supported layer collapsing of {} and {}".format(first_node.type, second_node.type))
104
+ Logger.critical(f"Layer collapsing is not supported for the combination of {first_node.type} and {second_node.type}.")
105
105
 
106
106
 
107
107
  def pytorch_linear_collapsing() -> Conv2DCollapsing:
@@ -47,19 +47,19 @@ class MHAParams:
47
47
  # Only batch first network is supported
48
48
  if BATCH_FIRST in mha_node.framework_attr.keys():
49
49
  if mha_node.framework_attr[BATCH_FIRST] is not True:
50
- Logger.error('Only batch first network is supported') # pragma: no cover
50
+ Logger.critical('Only networks with batch first cofiguration are supported.') # pragma: no cover
51
51
  else:
52
- Logger.error('Only batch first network is supported') # pragma: no cover
52
+ Logger.critical('Only networks with batch first cofiguration are supported.') # pragma: no cover
53
53
 
54
54
  # Add Zero Attn feature is Not Implemented
55
55
  if ADD_ZERO_ATTN in mha_node.framework_attr.keys():
56
56
  if mha_node.framework_attr[ADD_ZERO_ATTN] is not False:
57
- Logger.error('Add Zero Attn feature is Not Implemented') # pragma: no cover
57
+ Logger.critical('Add Zero Attention (Add Zero Attn) feature is not implemented.') # pragma: no cover
58
58
 
59
59
  # Check if Add Bias KV feature is Active
60
60
  if BIAS_K and BIAS_V in mha_node.weights.keys():
61
61
  if mha_node.weights[BIAS_K] is not None and mha_node.weights[BIAS_V] is not None:
62
- Logger.error('Add BIAS_KV feature is Not Implemented') # pragma: no cover
62
+ Logger.critical('Add Bias to Key/Value (BIAS_KV) feature is not implemented.') # pragma: no cover
63
63
 
64
64
  self.embed_dim = mha_node.framework_attr[EMBED_DIM]
65
65
  self.num_heads = mha_node.framework_attr[NUM_HEADS]
@@ -702,7 +702,7 @@ class MultiHeadAttentionDecomposition(common.BaseSubstitution):
702
702
  """
703
703
 
704
704
  if mha_node.reuse:
705
- raise Exception("MCT doesn't support reuse of MultiHeadAttention layer") # pragma: no cover
705
+ Logger.critical("Reuse of MultiHeadAttention layers is currently not supported.") # pragma: no cover
706
706
  params = MHAParams(mha_node)
707
707
 
708
708
  # project
@@ -103,7 +103,7 @@ class ReLUBoundToPowerOfTwo(common.BaseSubstitution):
103
103
  else:
104
104
  return graph
105
105
  else:
106
- Logger.error(f"In substitution with wrong matched pattern")
106
+ Logger.critical(f"Encountered an unexpected non-linearity type not supported for this substitution: {non_linear_node.type}.")
107
107
  Logger.debug(
108
108
  f"Node named:{non_linear_node.name} changed "
109
109
  f"to:{non_linear_node.type}")
@@ -58,7 +58,7 @@ class ReshapeWithStaticShapes(common.BaseSubstitution):
58
58
  if len(node.output_shape) == 1:
59
59
  node.output_shape[0][0] = BATCH_DIM_VALUE
60
60
  else:
61
- Logger.error('Reshape or view nodes should have a single output shape') # pragma: no cover
61
+ Logger.critical("This substitution handles 'reshape' or 'view' nodes with a single output shape.") # pragma: no cover
62
62
 
63
63
  # configure the new static output shape attribute
64
64
  node.op_call_args = node.output_shape
@@ -58,7 +58,7 @@ def residual_collapsing_fn(first_node: BaseNode,
58
58
  kernel[i, i, idxH, idxW] += 1
59
59
  return kernel
60
60
  else:
61
- Logger.error("No supported add residual collapsing for {}".format(first_node.type))
61
+ Logger.critical(f"Residual collapsing not supported for node type: {first_node.type}")
62
62
 
63
63
 
64
64
  def pytorch_residual_collapsing() -> ResidualCollapsing:
@@ -65,10 +65,7 @@ class ActivationTraceHessianCalculatorPytorch(TraceHessianCalculatorPytorch):
65
65
  model_output_nodes = [ot.node for ot in self.graph.get_outputs()]
66
66
 
67
67
  if self.hessian_request.target_node in model_output_nodes:
68
- Logger.exception("Trying to compute activation Hessian approximation with respect to the model output. "
69
- "This operation is not supported. "
70
- "Remove the output node from the set of node targets in the Hessian request.")
71
-
68
+ Logger.critical("Activation Hessian approximation cannot be computed for model outputs. Exclude output nodes from Hessian request targets.")
72
69
  grad_model_outputs = [self.hessian_request.target_node] + model_output_nodes
73
70
  model, _ = FloatPyTorchModelBuilder(graph=self.graph, append2output=grad_model_outputs).build_model()
74
71
  model.eval()
@@ -82,8 +79,7 @@ class ActivationTraceHessianCalculatorPytorch(TraceHessianCalculatorPytorch):
82
79
  outputs = model(*self.input_images)
83
80
 
84
81
  if len(outputs) != len(grad_model_outputs):
85
- Logger.error(f"Model for computing activation Hessian approximation expects {len(grad_model_outputs)} "
86
- f"outputs, but got {len(outputs)} output tensors.")
82
+ Logger.critical(f"Mismatch in expected and actual model outputs for activation Hessian approximation. Expected {len(grad_model_outputs)} outputs, received {len(outputs)}.")
87
83
 
88
84
  # Extracting the intermediate activation tensors and the model real output
89
85
  # TODO: we are assuming that the hessian request is for a single node.
@@ -146,5 +142,5 @@ class ActivationTraceHessianCalculatorPytorch(TraceHessianCalculatorPytorch):
146
142
  return ipts_hessian_trace_approx.tolist()
147
143
 
148
144
  else:
149
- Logger.error(f"{self.hessian_request.granularity} is not supported for Pytorch activation hessian's trace approx calculator")
145
+ Logger.critical(f"PyTorch activation Hessian's trace approximation does not support {self.hessian_request.granularity} granularity.")
150
146
 
@@ -65,7 +65,6 @@ class TraceHessianCalculatorPytorch(TraceHessianCalculator):
65
65
  concat_axis_dim = [o.shape[0] for o in _r_tensors]
66
66
  if not all(d == concat_axis_dim[0] for d in concat_axis_dim):
67
67
  Logger.critical(
68
- "Can't concat model's outputs for gradients calculation since the shape of the first axis " # pragma: no cover
69
- "is not equal in all outputs.")
68
+ "Unable to concatenate tensors for gradient calculation due to mismatched shapes along the first axis.")
70
69
 
71
70
  return torch.concat(_r_tensors, dim=1)
@@ -70,7 +70,7 @@ class WeightsTraceHessianCalculatorPytorch(TraceHessianCalculatorPytorch):
70
70
 
71
71
  # Check if the target node's layer type is supported
72
72
  if not DEFAULT_PYTORCH_INFO.is_kernel_op(self.hessian_request.target_node.type):
73
- Logger.error(f"{self.hessian_request.target_node.type} is not supported for Hessian info w.r.t weights.") # pragma: no cover
73
+ Logger.critical(f"Hessian information with respect to weights is not supported for {self.hessian_request.target_node.type} layers.") # pragma: no cover
74
74
 
75
75
  # Float model
76
76
  model, _ = FloatPyTorchModelBuilder(graph=self.graph).build_model()
@@ -80,7 +80,7 @@ class WeightsTraceHessianCalculatorPytorch(TraceHessianCalculatorPytorch):
80
80
 
81
81
  # Get the weight tensor for the target node
82
82
  if len(weights_attributes) != 1:
83
- Logger.error(f"Hessian scores w.r.t weights is supported, for now, for a single-weight node. Found {len(weights_attributes)}")
83
+ Logger.critical(f"Currently, Hessian scores with respect to weights are supported only for nodes with a single weight attribute. {len(weights_attributes)} attributes found.")
84
84
 
85
85
  weights_tensor = getattr(getattr(model,self.hessian_request.target_node.name),weights_attributes[0])
86
86
 
@@ -65,7 +65,7 @@ class ConfigurableActivationQuantizer(BasePyTorchInferableQuantizer):
65
65
  for qc in self.node_q_cfg:
66
66
  if qc.activation_quantization_cfg.enable_activation_quantization != \
67
67
  self.node_q_cfg[0].activation_quantization_cfg.enable_activation_quantization:
68
- Logger.error("Candidates with different activation enabled properties is currently not supported.") # pragma: no cover
68
+ Logger.critical("Unsupported configuration: Mixing candidates with differing activation quantization states (enabled/disabled).") # pragma: no cover
69
69
 
70
70
  # Setting layer's activation
71
71
  self.activation_quantizers = init_activation_quantizers(self.node_q_cfg)
@@ -77,8 +77,7 @@ class ConfigurableWeightsQuantizer(BasePyTorchInferableQuantizer):
77
77
  for qc in self.node_q_cfg:
78
78
  if qc.weights_quantization_cfg.get_attr_config(self.kernel_attr).enable_weights_quantization != \
79
79
  self.node_q_cfg[0].weights_quantization_cfg.get_attr_config(self.kernel_attr).enable_weights_quantization:
80
- Logger.error("Candidates with different kernel attribute quantization enabled "
81
- "properties is currently not supported.")
80
+ Logger.critical("Unsupported configuration: Mixing candidates with differing weights quantization states (enabled/disabled).") # pragma: no cover
82
81
 
83
82
  # Initialize quantized weights for each weight that should be quantized.
84
83
  self.quantized_weights = init_quantized_weights(node_q_cfg=self.node_q_cfg,
@@ -184,7 +184,7 @@ class PruningPytorchImplementation(PytorchImplementation, PruningFrameworkImplem
184
184
  if fw_info.is_kernel_op(node.type):
185
185
  kernel_attributes = fw_info.get_kernel_op_attributes(node.type)
186
186
  if kernel_attributes is None or len(kernel_attributes) == 0:
187
- Logger.error(f"Expected to find attributes but found {kernel_attributes}")
187
+ Logger.critical(f"Expected to find kernel attributes but none were identified for node '{node.name}' of type {node.type}.")
188
188
 
189
189
  for attr in kernel_attributes:
190
190
  attributes_with_axis[attr] = fw_info.kernel_channels_mapping.get(node.type)
@@ -273,7 +273,7 @@ def _prune_pytorch_edge_node(node: BaseNode,
273
273
  elif node.type == torch.nn.Linear:
274
274
  node.framework_attr[OUT_FEATURES] = int(np.sum(mask))
275
275
  else:
276
- Logger.exception(f"{node.type} is currently not supported"
276
+ Logger.critical(f"{node.type} is currently not supported"
277
277
  f"as an edge node in a pruning section")
278
278
 
279
279
  if is_exit_node:
@@ -282,7 +282,7 @@ def _prune_pytorch_edge_node(node: BaseNode,
282
282
  elif node.type == torch.nn.Linear:
283
283
  node.framework_attr[IN_FEATURES] = int(np.sum(mask))
284
284
  else:
285
- Logger.exception(f"{node.type} is currently not supported"
285
+ Logger.critical(f"{node.type} is currently not supported"
286
286
  f"as an edge node in a pruning section")
287
287
  # Adjust the input shape for the last node in the section.
288
288
  _edit_node_input_shape(node, mask_bool, fw_info)
@@ -301,7 +301,7 @@ class PytorchImplementation(FrameworkImplementation):
301
301
  if quant_config.softmax_shift:
302
302
  substitutions_list.append(pytorch_softmax_shift())
303
303
  if quant_config.input_scaling:
304
- raise Exception('Input scaling is currently not supported for Pytorch.')
304
+ Logger.critical('Input scaling is currently not supported for Pytorch.')
305
305
  return substitutions_list
306
306
 
307
307
  def get_substitutions_pre_build(self) -> List[common.BaseSubstitution]:
@@ -46,9 +46,7 @@ def extract_holder_weights(constant_name, node_target, model, weights, to_numpy)
46
46
  named_buffer_weights = {constant_name: to_numpy(parameter) for name, parameter in
47
47
  model.named_buffers() if node_target == name}
48
48
  if len(named_parameters_weights) + len(named_buffer_weights) > 1:
49
- raise Exception(
50
- f'Constant parameter can only have one tensor. Here we have '
51
- f'{len(named_parameters_weights)+ len(named_buffer_weights)}')
49
+ Logger.critical("A single constant parameter must correspond to exactly one tensor. Found {len(named_parameters_weights) + len(named_buffer_weights)} parameters.")
52
50
 
53
51
  weights.update(named_parameters_weights)
54
52
  weights.update(named_buffer_weights)
@@ -108,13 +106,13 @@ def nodes_builder(model: GraphModule,
108
106
  elif hasattr(torch.Tensor, node.target):
109
107
  node_type = getattr(torch.Tensor, node.target)
110
108
  else:
111
- raise Exception(f'Call method of type \'{node.target}\' is currently not supported.')
109
+ Logger.critical(f"The call method '{node.target}' is not supported.")
112
110
  elif node.op == GET_ATTR:
113
111
  Logger.warning(
114
112
  'Pytorch model has a parameter or constant Tensor value. This can cause unexpected behaviour when '
115
113
  'converting the model.')
116
114
  else:
117
- raise Exception(f'Unknown node type: {node.name}')
115
+ Logger.critical(f'Encountered an unsupported node type in node: {node.name}.')
118
116
 
119
117
  # extract layer weights and named buffers
120
118
  weights = {}
@@ -129,7 +127,7 @@ def nodes_builder(model: GraphModule,
129
127
  if node.op == GET_ATTR:
130
128
  new_const = extract_holder_weights(node, node.target, model, weights, to_numpy)
131
129
  if list(new_const.keys())[0] in consts_dict:
132
- Logger.error('Constant weight recorded twice')
130
+ Logger.critical('A constant weight appears to have been recorded multiple times.')
133
131
  consts_dict.update(new_const)
134
132
  continue
135
133
 
@@ -219,7 +217,7 @@ def nodes_builder(model: GraphModule,
219
217
  # make sure all extracted constants were used in the graph
220
218
  not_connected_consts = [c for c in consts_dict if c not in used_consts]
221
219
  if not_connected_consts:
222
- Logger.error(f'error reading graph - constants not connected in graph: {not_connected_consts}')
220
+ Logger.critical(f'Error reading graph: These constants are not connected in the graph: {not_connected_consts}.')
223
221
 
224
222
  # generate graph outputs list
225
223
  for node in output_nodes: