mct-nightly 2.2.0.20241209.545__py3-none-any.whl → 2.2.0.20241211.531__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 2.2.0.20241209.545
3
+ Version: 2.2.0.20241211.531
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -1,4 +1,4 @@
1
- model_compression_toolkit/__init__.py,sha256=6AyrIFAF_n31SbVFLSLbYkFV2J14B71InZGWcUcUUC0,1573
1
+ model_compression_toolkit/__init__.py,sha256=3AkzGEeakmlxtSQtujDenxH119YlHAatNP77RqE6mR4,1573
2
2
  model_compression_toolkit/constants.py,sha256=i_R6uXBfO1ph_X6DNJych2x59SUojfJbn7dNjs_mZnc,3846
3
3
  model_compression_toolkit/defaultdict.py,sha256=LSc-sbZYXENMCw3U9F4GiXuv67IKpdn0Qm7Fr11jy-4,2277
4
4
  model_compression_toolkit/logger.py,sha256=3DByV41XHRR3kLTJNbpaMmikL8icd9e1N-nkQAY9oDk,4567
@@ -8,7 +8,7 @@ model_compression_toolkit/core/__init__.py,sha256=tnDtL9KmT0vsOU27SsJ19TKDEbIH-t
8
8
  model_compression_toolkit/core/analyzer.py,sha256=X-2ZpkH1xdXnISnw1yJvXnvV-ssoUh-9LkLISSWNqiY,3691
9
9
  model_compression_toolkit/core/graph_prep_runner.py,sha256=7-b7Jd5jBVaXOWg5nSqbEyzBtdaGDbCxs8aqMV6GZ6I,11287
10
10
  model_compression_toolkit/core/quantization_prep_runner.py,sha256=OtL6g2rTC5mfdKrkzm47EPPW-voGGVYMYxpy2_sfu1U,6547
11
- model_compression_toolkit/core/runner.py,sha256=IavCZRVG9RisEKvFDxz27WDRKrfIG03YKXKv3tcagPo,14700
11
+ model_compression_toolkit/core/runner.py,sha256=Hzeh3x84nPobf3Xy9dyvEqFN1yO71_JMdP_pV2eqetI,14680
12
12
  model_compression_toolkit/core/common/__init__.py,sha256=Wh127PbXcETZX_d1PQqZ71ETK3J9XO5A-HpadGUbj6o,1447
13
13
  model_compression_toolkit/core/common/base_substitutions.py,sha256=xDFSmVVs_iFSZfajytI0cuQaNRNcwHX3uqOoHgVUvxQ,1666
14
14
  model_compression_toolkit/core/common/framework_implementation.py,sha256=IkMydCj6voau7dwkYLYA_Ka_EFUKP3GKQdpYN6b1fgc,22163
@@ -29,7 +29,7 @@ model_compression_toolkit/core/common/collectors/mean_collector.py,sha256=mjr3U_
29
29
  model_compression_toolkit/core/common/collectors/min_max_per_channel_collector.py,sha256=5oKsJEKdVmj4C7fKdHhmrFN5k4G2BaFETpmf_xKNs7s,5207
30
30
  model_compression_toolkit/core/common/collectors/statistics_collector.py,sha256=vcf7Pk1v09SJC4fbAWf_8AgTktE6tPizJbQpSmocP2U,7930
31
31
  model_compression_toolkit/core/common/fusion/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
32
- model_compression_toolkit/core/common/fusion/graph_fuser.py,sha256=8seu9jBpC7HartP1nJd7S_SYFICyemVpDV9ZJ0QUQ7E,6212
32
+ model_compression_toolkit/core/common/fusion/graph_fuser.py,sha256=5VkHB2fW0ohfPQmISz6o4fCMV8QyFdj5_kU51lN0JS8,6214
33
33
  model_compression_toolkit/core/common/fusion/layer_fusing.py,sha256=lOubqpc18TslhXZijWUJQAa1c3jIB2S-M-5HK78wJPQ,5548
34
34
  model_compression_toolkit/core/common/graph/__init__.py,sha256=Xr-Lt_qXMdrCnnOaUS_OJP_3iTTGfPCLf8_vSrQgCs0,773
35
35
  model_compression_toolkit/core/common/graph/base_graph.py,sha256=GG13PAtndsMjIqINfrCN6llVkFrg5CBfij4z99ntieU,37815
@@ -41,7 +41,7 @@ model_compression_toolkit/core/common/graph/graph_searches.py,sha256=2oKuW6L8hP-
41
41
  model_compression_toolkit/core/common/graph/virtual_activation_weights_node.py,sha256=3el-A7j1oyoo1_9zq3faQp7IeRsFXFCvnrb3zZFXpU0,9803
42
42
  model_compression_toolkit/core/common/graph/memory_graph/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
43
43
  model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py,sha256=X6FK3C3y8ixFRPjC_wm3ClloCX8_06SOdA1TRi7o_LA,3800
44
- model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py,sha256=yJ0GncHVOP0Wj9ntzluklDQsgRFow89gteNCvIxRVXU,2857
44
+ model_compression_toolkit/core/common/graph/memory_graph/compute_graph_max_cut.py,sha256=-bVPbzMMaDpbacjFOafBsxbmJFHaD4tE8IAHobLzop4,2858
45
45
  model_compression_toolkit/core/common/graph/memory_graph/cut.py,sha256=aPdXJPP5a5Rnu5Z5XqTZZkuGtdgHVu0RmX_NOfNM6Tc,2470
46
46
  model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py,sha256=crV2NCLVO8jx9MlryZBYuJKFe_G9HfM7rUR64fDymlw,17045
47
47
  model_compression_toolkit/core/common/graph/memory_graph/memory_element.py,sha256=gRmBEFRmyJsNKezQfiwDwQu1cmbGd2wgKCRTH6iw8mw,3961
@@ -59,25 +59,25 @@ model_compression_toolkit/core/common/matchers/function.py,sha256=kMwcinxn_PInve
59
59
  model_compression_toolkit/core/common/matchers/node_matcher.py,sha256=63cMwa5YbQ5LKZy8-KFmdchVc3N7mpDJ6fNDt_uAQsk,2745
60
60
  model_compression_toolkit/core/common/matchers/walk_matcher.py,sha256=xqfLKk6xZt72hSnND_HoX5ESOooNMypb5VOZkVsJ_nw,1111
61
61
  model_compression_toolkit/core/common/mixed_precision/__init__.py,sha256=sw7LOPN1bM82o3SkMaklyH0jw-TLGK0-fl2Wq73rffI,697
62
- model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py,sha256=iPuI11f3IkroC-dStdR40vKn4jpa1VL-kskCXc5z7wk,7536
62
+ model_compression_toolkit/core/common/mixed_precision/bit_width_setter.py,sha256=lB3cxQPQqpAH5tP6kqOxqv7RmOtf1YciIkvr9irvKq0,7084
63
63
  model_compression_toolkit/core/common/mixed_precision/configurable_quant_id.py,sha256=LLDguK7afsbN742ucLpmJr5TUfTyFpK1vbf2bpVr1v0,882
64
64
  model_compression_toolkit/core/common/mixed_precision/configurable_quantizer_utils.py,sha256=7dKMi5S0zQZ16m8NWn1XIuoXsKuZUg64G4-uK8-j1PQ,5177
65
65
  model_compression_toolkit/core/common/mixed_precision/distance_weighting.py,sha256=-x8edUyudu1EAEM66AuXPtgayLpzbxoLNubfEbFM5kU,2867
66
- model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py,sha256=nZb0_inMxgqlx5lG6uMF0sskHR-5zMSClzxYALBeqLA,4531
66
+ model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py,sha256=15PbLAfuIyQInFczPka_MuyO4AJzAaOm9bOi3bzllxc,4531
67
67
  model_compression_toolkit/core/common/mixed_precision/mixed_precision_quantization_config.py,sha256=r1t025_QHshyoop-PZvL7x6UuXaeplCCU3h4VNBhJHo,4309
68
68
  model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_facade.py,sha256=B7xLl8P5eCz0_fBxocDlNiv6k-3MdfMUk2GjYKl2p5k,7522
69
69
  model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py,sha256=hlaV4ybreT0DR4ftLSPg5KTit3BEm9dWA7Y8NHpEJ8w,37532
70
70
  model_compression_toolkit/core/common/mixed_precision/sensitivity_evaluation.py,sha256=adjuvrJcan7Ua3nYlJX7T6qGkCRHGqWMaM5-099a9Us,27220
71
71
  model_compression_toolkit/core/common/mixed_precision/set_layer_to_bitwidth.py,sha256=P8QtKgFXtt5b2RoubzI5OGlCfbEfZsAirjyrkFzK26A,2846
72
- model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py,sha256=KifDMbm7qkSfvSl6pcZzQ82naIXzeKL6aT-VsvWZYyc,7901
72
+ model_compression_toolkit/core/common/mixed_precision/solution_refinement_procedure.py,sha256=cjmHFU4peJ6qYP8lsIkYYSLvRddDbiSQ6mPZnZy0p6U,7905
73
73
  model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
74
74
  model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization.py,sha256=MtPkZfPIJWI191Hbjp6JluUyLnqiJRi3zNf-CqVNuag,5053
75
- model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py,sha256=sFuUgWwC0aEBpf9zWmCTIcAbykBj3t5vmWAoB_BjYWA,14979
76
- model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py,sha256=ttc8wPa_9LZansutQ2f1ss-RTzgTv739wy3qsdLzyyk,4217
75
+ model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py,sha256=Wu89Rl6gAB5vL5l8jPH-4GFeKG41jusAb_yiHQ9Sjxs,14978
76
+ model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_aggregation_methods.py,sha256=PmuVXCKgwRNvG7pLGdA24Ren1lFH5hW51_FrOmUVHwU,4199
77
77
  model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_functions_mapping.py,sha256=mOxZwOQYnOwSJMiapEEH9o-89ujJdPxSl8zXpnApc0U,1850
78
78
  model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py,sha256=WC1EHoNuo_lrzy4NRhGJ1cgmJ2IsFsbmP86mrVO3AVA,21506
79
79
  model_compression_toolkit/core/common/mixed_precision/search_methods/__init__.py,sha256=sw7LOPN1bM82o3SkMaklyH0jw-TLGK0-fl2Wq73rffI,697
80
- model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py,sha256=QZlQtvAUXUNNc6H2mKEFZhQ-fjP1QCIsxsS5BrhaXvU,16547
80
+ model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py,sha256=YsA2CVrGt_VGJzZ9TMqPtz5b1YX_jb-Qfb9QfV-RXsc,16568
81
81
  model_compression_toolkit/core/common/network_editors/__init__.py,sha256=vZmu55bYqiaOQs3AjfwWDXHmuKZcLHt-wm7uR5fPEqg,1307
82
82
  model_compression_toolkit/core/common/network_editors/actions.py,sha256=nid0_j-Cn10xvmztT8yCKW_6uA7JEnom9SW9syx7wc0,19594
83
83
  model_compression_toolkit/core/common/network_editors/edit_network.py,sha256=dfgawi-nB0ocAJ0xcGn9E-Zv203oUnQLuMiXpX8vTgA,1748
@@ -559,8 +559,8 @@ model_compression_toolkit/xquant/pytorch/model_analyzer.py,sha256=b93o800yVB3Z-i
559
559
  model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py,sha256=bOc-hFL3gdoSM1Th_S2N_-9JJSlPGpZCTx_QLJHS6lg,3388
560
560
  model_compression_toolkit/xquant/pytorch/similarity_functions.py,sha256=CERxq5K8rqaiE-DlwhZBTUd9x69dtYJlkHOPLB54vm8,2354
561
561
  model_compression_toolkit/xquant/pytorch/tensorboard_utils.py,sha256=mkoEktLFFHtEKzzFRn_jCnxjhJolK12TZ5AQeDHzUO8,9767
562
- mct_nightly-2.2.0.20241209.545.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
563
- mct_nightly-2.2.0.20241209.545.dist-info/METADATA,sha256=KvMf3tFdGsWWZUXlYIMAVKzUmG1cLlAe-n4d5JJqiCI,26446
564
- mct_nightly-2.2.0.20241209.545.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
565
- mct_nightly-2.2.0.20241209.545.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
566
- mct_nightly-2.2.0.20241209.545.dist-info/RECORD,,
562
+ mct_nightly-2.2.0.20241211.531.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
563
+ mct_nightly-2.2.0.20241211.531.dist-info/METADATA,sha256=qdiuketUM1fffHOn2tTgXRwUHPHj-IcdJbapmxVE0bM,26446
564
+ mct_nightly-2.2.0.20241211.531.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
565
+ mct_nightly-2.2.0.20241211.531.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
566
+ mct_nightly-2.2.0.20241211.531.dist-info/RECORD,,
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
27
27
  from model_compression_toolkit import pruning
28
28
  from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
29
29
 
30
- __version__ = "2.2.0.20241209.000545"
30
+ __version__ = "2.2.0.20241211.000531"
@@ -26,6 +26,8 @@ class FusedLayerType:
26
26
  """
27
27
  def __init__(self):
28
28
  self.__name__ = 'FusedLayer'
29
+
30
+
29
31
  class GraphFuser:
30
32
 
31
33
  def create_fused_graph(self, graph: Graph) -> Dict[str, str]:
@@ -24,6 +24,7 @@ from model_compression_toolkit.core.common.graph.memory_graph.memory_graph impor
24
24
 
25
25
  SchedulerInfo = namedtuple('SchedulerInfo', [OPERATORS_SCHEDULING, MAX_CUT, CUTS, FUSED_NODES_MAPPING])
26
26
 
27
+
27
28
  def compute_graph_max_cut(memory_graph: MemoryGraph,
28
29
  n_iter: int = 50,
29
30
  astar_n_iter: int = 500,
@@ -48,9 +48,8 @@ def set_bit_widths(mixed_precision_enable: bool,
48
48
  node_name = node.name if not node.reuse else '_'.join(node.name.split('_')[:-2])
49
49
  if node_name in sorted_nodes_names: # only configurable nodes are in this list
50
50
  node_index_in_graph = sorted_nodes_names.index(node_name)
51
- _set_node_final_qc(bit_widths_config,
51
+ _set_node_final_qc(bit_widths_config[node_index_in_graph],
52
52
  node,
53
- node_index_in_graph,
54
53
  graph.fw_info)
55
54
  else:
56
55
  if node.is_activation_quantization_enabled():
@@ -83,8 +82,7 @@ def set_bit_widths(mixed_precision_enable: bool,
83
82
 
84
83
 
85
84
  def _get_node_qc_by_bit_widths(node: BaseNode,
86
- bit_width_cfg: List[int],
87
- node_index_in_graph: int,
85
+ node_bit_width_cfg: int,
88
86
  fw_info) -> Any:
89
87
  """
90
88
  Get the node's quantization configuration that
@@ -93,8 +91,7 @@ def _get_node_qc_by_bit_widths(node: BaseNode,
93
91
 
94
92
  Args:
95
93
  node: Node to get its quantization configuration candidate.
96
- bit_width_cfg: Configuration which determines the node's desired bit width.
97
- node_index_in_graph: Index of the node in the bit_width_cfg.
94
+ node_bit_width_cfg: Configuration which determines the node's desired bit width.
98
95
  fw_info: Information relevant to a specific framework about how layers should be quantized.
99
96
 
100
97
  Returns:
@@ -104,24 +101,21 @@ def _get_node_qc_by_bit_widths(node: BaseNode,
104
101
  kernel_attr = fw_info.get_kernel_op_attributes(node.type)
105
102
 
106
103
  if node.is_activation_quantization_enabled():
107
- bit_index_in_cfg = bit_width_cfg[node_index_in_graph]
108
- qc = node.candidates_quantization_cfg[bit_index_in_cfg]
104
+ qc = node.candidates_quantization_cfg[node_bit_width_cfg]
109
105
 
110
106
  return qc
111
107
 
112
108
  elif kernel_attr is not None:
113
109
  if node.is_weights_quantization_enabled(kernel_attr[0]):
114
- bit_index_in_cfg = bit_width_cfg[node_index_in_graph]
115
- qc = node.candidates_quantization_cfg[bit_index_in_cfg]
110
+ qc = node.candidates_quantization_cfg[node_bit_width_cfg]
116
111
 
117
112
  return qc
118
113
 
119
114
  Logger.critical(f"Quantization configuration for node '{node.name}' not found in candidate configurations.") # pragma: no cover
120
115
 
121
116
 
122
- def _set_node_final_qc(bit_width_cfg: List[int],
117
+ def _set_node_final_qc(node_bit_width_cfg: int,
123
118
  node: BaseNode,
124
- node_index_in_graph: int,
125
119
  fw_info):
126
120
  """
127
121
  Get the node's quantization configuration that
@@ -130,15 +124,13 @@ def _set_node_final_qc(bit_width_cfg: List[int],
130
124
  If the node quantization config was not found, raise an exception.
131
125
 
132
126
  Args:
133
- bit_width_cfg: Configuration which determines the node's desired bit width.
127
+ node_bit_width_cfg: Configuration which determines the node's desired bit width.
134
128
  node: Node to set its node quantization configuration.
135
- node_index_in_graph: Index of the node in the bit_width_cfg.
136
129
  fw_info: Information relevant to a specific framework about how layers should be quantized.
137
130
 
138
131
  """
139
132
  node_qc = _get_node_qc_by_bit_widths(node,
140
- bit_width_cfg,
141
- node_index_in_graph,
133
+ node_bit_width_cfg,
142
134
  fw_info)
143
135
 
144
136
  if node_qc is None:
@@ -30,7 +30,7 @@ def filter_candidates_for_mixed_precision(graph: Graph,
30
30
  such that only a single candidate would remain, with the bitwidth equal to the one defined in the matching layer's
31
31
  base config in the TPC.
32
32
 
33
- Note" This function modifies the graph inplace!
33
+ Note: This function modifies the graph inplace!
34
34
 
35
35
  Args:
36
36
  graph: A graph representation of the model to be quantized.
@@ -57,7 +57,6 @@ def compute_resource_utilization_data(in_model: Any,
57
57
  Returns:
58
58
  ResourceUtilization: An object encapsulating the calculated resource utilization computations.
59
59
 
60
-
61
60
  """
62
61
  core_config = _create_core_config_for_ru(core_config)
63
62
  # We assume that the resource_utilization_data API is used to compute the model resource utilization for
@@ -33,9 +33,10 @@ def sum_ru_values(ru_vector: np.ndarray, set_constraints: bool = True) -> List[A
33
33
  Returns: A list with an lpSum object for lp problem definition with the vector's sum.
34
34
 
35
35
  """
36
- if not set_constraints:
37
- return [0] if len(ru_vector) == 0 else [sum(ru_vector)]
38
- return [lpSum(ru_vector)]
36
+ if set_constraints:
37
+ return [lpSum(ru_vector)]
38
+ return [0] if len(ru_vector) == 0 else [sum(ru_vector)]
39
+
39
40
 
40
41
 
41
42
  def max_ru_values(ru_vector: np.ndarray, set_constraints: bool = True) -> List[float]:
@@ -53,9 +54,10 @@ def max_ru_values(ru_vector: np.ndarray, set_constraints: bool = True) -> List[f
53
54
  in the linear programming problem formalization.
54
55
 
55
56
  """
56
- if not set_constraints:
57
- return [0] if len(ru_vector) == 0 else [max(ru_vector)]
58
- return [ru for ru in ru_vector]
57
+ if set_constraints:
58
+ return [ru for ru in ru_vector]
59
+ return [0] if len(ru_vector) == 0 else [max(ru_vector)]
60
+
59
61
 
60
62
 
61
63
  def total_ru(ru_tensor: np.ndarray, set_constraints: bool = True) -> List[float]:
@@ -74,16 +76,14 @@ def total_ru(ru_tensor: np.ndarray, set_constraints: bool = True) -> List[float]
74
76
  in the linear programming problem formalization.
75
77
 
76
78
  """
77
- if not set_constraints:
79
+ if set_constraints:
80
+ weights_ru = lpSum([ru[0] for ru in ru_tensor])
81
+ return [weights_ru + activation_ru for _, activation_ru in ru_tensor]
82
+ else:
78
83
  weights_ru = sum([ru[0] for ru in ru_tensor])
79
84
  activation_ru = max([ru[1] for ru in ru_tensor])
80
85
  return [weights_ru + activation_ru]
81
86
 
82
- weights_ru = lpSum([ru[0] for ru in ru_tensor])
83
- total_ru = [weights_ru + activation_ru for _, activation_ru in ru_tensor]
84
-
85
- return total_ru
86
-
87
87
 
88
88
  class MpRuAggregation(Enum):
89
89
  """
@@ -73,7 +73,7 @@ def mp_integer_programming_search(search_manager: MixedPrecisionSearchManager,
73
73
 
74
74
  assert lp_problem.status == LpStatusOptimal, Logger.critical(
75
75
  "No solution was found during solving the LP problem")
76
- Logger.info(LpStatus[lp_problem.status])
76
+ Logger.info(f"ILP status: {LpStatus[lp_problem.status]}")
77
77
 
78
78
  # Take the bitwidth index only if its corresponding indicator is one.
79
79
  config = np.asarray(
@@ -82,7 +82,7 @@ def mp_integer_programming_search(search_manager: MixedPrecisionSearchManager,
82
82
  in layer_to_indicator_vars_mapping.values()]
83
83
  ).flatten()
84
84
 
85
- if target_resource_utilization.bops < np.inf:
85
+ if target_resource_utilization.bops_restricted():
86
86
  return search_manager.config_reconstruction_helper.reconstruct_config_from_virtual_graph(config)
87
87
  else:
88
88
  return config
@@ -47,7 +47,7 @@ def greedy_solution_refinement_procedure(mp_solution: List[int],
47
47
 
48
48
  """
49
49
  # Refinement is not supported for BOPs utilization for now...
50
- if target_resource_utilization.bops < np.inf:
50
+ if target_resource_utilization.bops_restricted():
51
51
  Logger.info(f'Target resource utilization constraint BOPs - Skipping MP greedy solution refinement')
52
52
  return mp_solution
53
53
 
@@ -151,8 +151,7 @@ def core_runner(in_model: Any,
151
151
  f'Mixed Precision has overwrite bit-width configuration{core_config.mixed_precision_config.configuration_overwrite}')
152
152
  bit_widths_config = core_config.mixed_precision_config.configuration_overwrite
153
153
 
154
- if (target_resource_utilization.activation_memory < np.inf or
155
- target_resource_utilization.total_memory < np.inf):
154
+ if target_resource_utilization.activation_restricted() or target_resource_utilization.total_mem_restricted():
156
155
  Logger.warning(
157
156
  f"Running mixed precision for activation compression, please note this feature is experimental and is "
158
157
  f"subject to future changes. If you encounter an issue, please open an issue in our GitHub "