mct-nightly 1.10.0.20231127.post419__py3-none-any.whl → 1.10.0.20231129.post414__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 1.10.0.20231127.post419
3
+ Version: 1.10.0.20231129.post414
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -133,10 +133,10 @@ model_compression_toolkit/core/common/visualization/final_config_visualizer.py,s
133
133
  model_compression_toolkit/core/common/visualization/nn_visualizer.py,sha256=6EjZj_KE1tICTQ0XSKIx5ivsRFpRktFywda7pW7YnNQ,5955
134
134
  model_compression_toolkit/core/common/visualization/tensorboard_writer.py,sha256=1Vbr5gKqH1fJl91iTNFlIjyEMh6jm88T4AIWalMrJFw,20099
135
135
  model_compression_toolkit/core/keras/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7VclGVOQ63x2p6mAAuba4,698
136
- model_compression_toolkit/core/keras/constants.py,sha256=0vQZ3-8-IJ735KCet858OcxlDRFx3GyDks97GBF9gS4,2968
136
+ model_compression_toolkit/core/keras/constants.py,sha256=oFYFagoFTOQTrs2RHVc93583EhOvcvbCYHleqsZdQ6s,3046
137
137
  model_compression_toolkit/core/keras/custom_layer_validation.py,sha256=f-b14wuiIgitBe7d0MmofYhDCTO3IhwJgwrh-Hq_t_U,1192
138
138
  model_compression_toolkit/core/keras/default_framework_info.py,sha256=cMdt9KvJMqOmWjFtUiEejzOe77mCpnnd3GzERgNh8Zk,4970
139
- model_compression_toolkit/core/keras/keras_implementation.py,sha256=JBJWhpsK9jOSaIrDq1tK1Xm4fJz3NrZ2lB5nRzz3mUY,28304
139
+ model_compression_toolkit/core/keras/keras_implementation.py,sha256=HhyK68qXyHGEsT5DkfmHTibinRxyTh0DquiXGrBAlK4,28407
140
140
  model_compression_toolkit/core/keras/keras_model_validation.py,sha256=1wNV2clFdC9BzIELRLSO2uKf0xqjLqlkTJudwtCeaJk,1722
141
141
  model_compression_toolkit/core/keras/keras_node_prior_info.py,sha256=f6o5Fmpw0aDrO704_A-SqBrKSO1iNEOyofP9pm3g8yg,3936
142
142
  model_compression_toolkit/core/keras/kpi_data_facade.py,sha256=rArrfMtxWGR1P4nhKKxqh6fo7pauRDzkRsZIh_SXxO4,8502
@@ -168,7 +168,7 @@ model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_s
168
168
  model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py,sha256=wH9ocMLL725-uUPU-zCxdd8NwT5nyd0ZShmI7iuTwF8,1462
169
169
  model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py,sha256=rjIheZW7LbSPv9bzMSmC8wl6UUxaTkd4J2IHinObT-Y,1814
170
170
  model_compression_toolkit/core/keras/hessian/__init__.py,sha256=lNJ29DYxaLUPDstRDA1PGI5r9Fulq_hvrZMlhst1Z5g,697
171
- model_compression_toolkit/core/keras/hessian/activation_trace_hessian_calculator_keras.py,sha256=tj3GtzMY75Wumj2j-teFqlN4EzgUgupCmjtoEVKvpv4,9665
171
+ model_compression_toolkit/core/keras/hessian/activation_trace_hessian_calculator_keras.py,sha256=RL2c7JkvdK37Mgzq2CdSS_1Cg0R1cJ0Gv0Q3cRgV4H4,9642
172
172
  model_compression_toolkit/core/keras/hessian/trace_hessian_calculator_keras.py,sha256=uwDcC6Fr0H-NKaX0NERRkF6rrnPRbxuo9PsapnuPsRo,3952
173
173
  model_compression_toolkit/core/keras/hessian/weights_trace_hessian_calculator_keras.py,sha256=SIi7_wChifkulp6aoc-v7v4I1NrQAwvU4ymG2HI66Yk,10478
174
174
  model_compression_toolkit/core/keras/mixed_precision/__init__.py,sha256=sw7LOPN1bM82o3SkMaklyH0jw-TLGK0-fl2Wq73rffI,697
@@ -226,7 +226,7 @@ model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax
226
226
  model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py,sha256=WmEa8Xjji-_tIbthDxlLAGSr69nWk-YKcHNaVqLa7sg,1375
227
227
  model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py,sha256=tp78axmUQc0Zpj3KwVmV0PGYHvCf7sAW_sRmXXw7gsY,1616
228
228
  model_compression_toolkit/core/pytorch/hessian/__init__.py,sha256=lNJ29DYxaLUPDstRDA1PGI5r9Fulq_hvrZMlhst1Z5g,697
229
- model_compression_toolkit/core/pytorch/hessian/activation_trace_hessian_calculator_pytorch.py,sha256=ZrBQTpBclUpsOKnmSkgGG1y4Rs1OjBaf93Fcsn6trUg,8340
229
+ model_compression_toolkit/core/pytorch/hessian/activation_trace_hessian_calculator_pytorch.py,sha256=6EHwxuOdb57DEBHiYfFODCQHWlUQDArEHLaym19Ml5A,8297
230
230
  model_compression_toolkit/core/pytorch/hessian/trace_hessian_calculator_pytorch.py,sha256=c80LiFbY06mosfZI4yjigiqDsF1ZFdRZ14Xox0kD_94,3480
231
231
  model_compression_toolkit/core/pytorch/hessian/weights_trace_hessian_calculator_pytorch.py,sha256=4TsL-27I672758_B742Zbe-_Ro4OLp7ISxWtWD4oKD8,6899
232
232
  model_compression_toolkit/core/pytorch/mixed_precision/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
@@ -282,7 +282,7 @@ model_compression_toolkit/exporter/model_exporter/keras/__init__.py,sha256=uZ2Ri
282
282
  model_compression_toolkit/exporter/model_exporter/keras/base_keras_exporter.py,sha256=Are89mV0cPZ11PSHnriWCMJzTLPhughxZNwdEo_aqU8,1690
283
283
  model_compression_toolkit/exporter/model_exporter/keras/export_serialization_format.py,sha256=mfc-0Mmajt4fstbwQPWGbeZYb4yexij8Fkv-Vll7sNk,1014
284
284
  model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_keras_exporter.py,sha256=vceRGljjaCNwkNg4-Truysx6RsL8DgrKGFCNilY_WfM,11296
285
- model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py,sha256=G-PvapFvm4qYrwEbU3DKnwq9-0tN4uDHNAX8g-42IsA,3161
285
+ model_compression_toolkit/exporter/model_exporter/keras/fakely_quant_tflite_exporter.py,sha256=X19oNMwJes9c7iVEAIzQFQbc8YqTmkDGxqPdGIJCamw,3195
286
286
  model_compression_toolkit/exporter/model_exporter/keras/int8_tflite_exporter.py,sha256=nGtpDTeH5Tdp7sjyuXsy_9TPpijDYp4nkz366DUUJ0Q,8048
287
287
  model_compression_toolkit/exporter/model_exporter/keras/keras_export_facade.py,sha256=mGaIaMGQ3qVWMEbosyL0uYc-3KsjXumexoaTbeue1X4,5982
288
288
  model_compression_toolkit/exporter/model_exporter/pytorch/__init__.py,sha256=uZ2RigbY9O2PJ0Il8wPpS_s7frgg9WUGd_SHeKGyl1A,699
@@ -447,8 +447,8 @@ model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py,sha
447
447
  model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py,sha256=MVwXNymmFRB2NXIBx4e2mdJ1RfoHxRPYRgjb1MQP5kY,1797
448
448
  model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
449
449
  model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py,sha256=SbvRlIdE32PEBsINt1bhSqvrKL_zbM9V-aeSkOn-sw4,3083
450
- mct_nightly-1.10.0.20231127.post419.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
451
- mct_nightly-1.10.0.20231127.post419.dist-info/METADATA,sha256=mheVEF_27VcAd4a2sfCu5iBfDXJxqCCZ0rYWtG6JiJY,16232
452
- mct_nightly-1.10.0.20231127.post419.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
453
- mct_nightly-1.10.0.20231127.post419.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
454
- mct_nightly-1.10.0.20231127.post419.dist-info/RECORD,,
450
+ mct_nightly-1.10.0.20231129.post414.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
451
+ mct_nightly-1.10.0.20231129.post414.dist-info/METADATA,sha256=BlcFLeMfxtrdsyKQxgLa5xkorVHJlwwK7qGId7PKegQ,16232
452
+ mct_nightly-1.10.0.20231129.post414.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
453
+ mct_nightly-1.10.0.20231129.post414.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
454
+ mct_nightly-1.10.0.20231129.post414.dist-info/RECORD,,
@@ -109,3 +109,6 @@ RELU_POT_BOUND = 8.0
109
109
  # TFOpLambda functions:
110
110
  ADD = 'add'
111
111
  PAD = 'pad'
112
+
113
+ # Special/Custom layers strings
114
+ COMBINED_NMS = 'combined_non_max_suppression'
@@ -152,7 +152,7 @@ class ActivationTraceHessianCalculatorKeras(TraceHessianCalculatorKeras):
152
152
  # Compute the final approximation for each output index
153
153
  num_node_outputs = len(interest_point_scores[0])
154
154
  for output_idx in range(num_node_outputs):
155
- final_approx_per_output.append(2 * tf.reduce_mean([x[output_idx] for x in interest_point_scores]) / output.shape[-1])
155
+ final_approx_per_output.append(tf.reduce_mean([x[output_idx] for x in interest_point_scores]))
156
156
 
157
157
  # final_approx_per_output is a list of all approximations (one per output), thus we average them to
158
158
  # get the final score of a node.
@@ -32,7 +32,8 @@ from model_compression_toolkit.trainable_infrastructure.keras.quantize_wrapper i
32
32
  from model_compression_toolkit.core.common.mixed_precision.sensitivity_evaluation import SensitivityEvaluation
33
33
  from model_compression_toolkit.core.common.mixed_precision.set_layer_to_bitwidth import set_layer_to_bitwidth
34
34
  from model_compression_toolkit.core.common.similarity_analyzer import compute_kl_divergence, compute_cs, compute_mse
35
- from model_compression_toolkit.core.keras.constants import ACTIVATION, SOFTMAX, SIGMOID, ARGMAX, LAYER_NAME
35
+ from model_compression_toolkit.core.keras.constants import ACTIVATION, SOFTMAX, SIGMOID, ARGMAX, LAYER_NAME, \
36
+ COMBINED_NMS
36
37
  from model_compression_toolkit.core.keras.graph_substitutions.substitutions.batchnorm_reconstruction import \
37
38
  keras_batchnorm_reconstruction
38
39
  from model_compression_toolkit.core.keras.graph_substitutions.substitutions.virtual_activation_weights_composition import \
@@ -514,7 +515,8 @@ class KerasImplementation(FrameworkImplementation):
514
515
 
515
516
  if node.layer_class == TFOpLambda:
516
517
  node_attr = getattr(node, 'framework_attr', None)
517
- if node_attr is not None and (ARGMAX in node_attr[LAYER_NAME]):
518
+ if node_attr is not None and (ARGMAX in node_attr[LAYER_NAME]
519
+ or COMBINED_NMS in node_attr[LAYER_NAME]):
518
520
  return False
519
521
  elif node.layer_class in [tf.math.argmax]:
520
522
  return False
@@ -131,8 +131,8 @@ class ActivationTraceHessianCalculatorPytorch(TraceHessianCalculatorPytorch):
131
131
  break
132
132
 
133
133
  trace_hv.append(hessian_trace_approx)
134
- ipts_hessian_trace_approx.append(2 * torch.mean(torch.stack(trace_hv)) / output.shape[
135
- -1]) # Get averaged Hessian trace approximation
134
+
135
+ ipts_hessian_trace_approx.append(torch.mean(torch.stack(trace_hv))) # Get averaged Hessian trace approximation
136
136
 
137
137
  # If a node has multiple outputs, it means that multiple approximations were computed
138
138
  # (one per output since granularity is per-tensor). In this case we average the approximations.
@@ -57,14 +57,13 @@ class FakelyQuantTFLiteExporter(FakelyQuantKerasExporter):
57
57
 
58
58
  """
59
59
  # Use Keras exporter to quantize model's weights before converting it to TFLite.
60
- # Since exporter saves the model, we use a tmp path for saving, and then we delete it.
61
- _, tmp_file = tempfile.mkstemp(TMP_KERAS_EXPORT_FORMAT)
62
- custom_objects = FakelyQuantKerasExporter(self.model,
63
- self.is_layer_exportable_fn,
64
- tmp_file).export()
60
+ # Since exporter saves the model, we use a tmp path for saving, and then we delete it automatically.
61
+ with tempfile.NamedTemporaryFile(suffix=TMP_KERAS_EXPORT_FORMAT) as tmp_file:
62
+ custom_objects = FakelyQuantKerasExporter(self.model,
63
+ self.is_layer_exportable_fn,
64
+ tmp_file.name).export()
65
65
 
66
- model = keras_load_quantized_model(tmp_file)
67
- os.remove(tmp_file)
66
+ model = keras_load_quantized_model(tmp_file.name)
68
67
 
69
68
  self.exported_model = tf.lite.TFLiteConverter.from_keras_model(model).convert()
70
69
  Logger.info(f'Exporting FQ tflite model to: {self.save_model_path}')