mct-nightly 2.2.0.20250107.164940__py3-none-any.whl → 2.2.0.20250108.523__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 2.2.0.20250107.164940
3
+ Version: 2.2.0.20250108.523
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: License :: OSI Approved :: Apache Software License
@@ -1,4 +1,4 @@
1
- model_compression_toolkit/__init__.py,sha256=A5js8ho45WYpU_ZDFPmwshjYKWICZlIgdFrShfdEJGo,1573
1
+ model_compression_toolkit/__init__.py,sha256=T9vJvHgnVfRm0QlXM_rI0gN4vrj4U14twllXd5p0Irs,1573
2
2
  model_compression_toolkit/constants.py,sha256=i_R6uXBfO1ph_X6DNJych2x59SUojfJbn7dNjs_mZnc,3846
3
3
  model_compression_toolkit/defaultdict.py,sha256=LSc-sbZYXENMCw3U9F4GiXuv67IKpdn0Qm7Fr11jy-4,2277
4
4
  model_compression_toolkit/logger.py,sha256=3DByV41XHRR3kLTJNbpaMmikL8icd9e1N-nkQAY9oDk,4567
@@ -361,10 +361,10 @@ model_compression_toolkit/gptq/common/gradual_activation_quantization.py,sha256=
361
361
  model_compression_toolkit/gptq/common/regularization_factory.py,sha256=hyunpXepVeHyoAFJw6zNLK-3ZHBmiut3lmNisJN_L3E,2514
362
362
  model_compression_toolkit/gptq/keras/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
363
363
  model_compression_toolkit/gptq/keras/gptq_keras_implementation.py,sha256=axBwnCSjq5xk-xGymOwSOqjp39It-CVtGcCTRTf0E_4,1248
364
- model_compression_toolkit/gptq/keras/gptq_loss.py,sha256=k5s7D4CTqbYCHgydyevw1c2p3S2TZCECHNvK79QGE2U,7797
364
+ model_compression_toolkit/gptq/keras/gptq_loss.py,sha256=2hzWzsbuVd5XcL85NM57YeOyHxRY0qMArKn8NvQ1UWw,7643
365
365
  model_compression_toolkit/gptq/keras/gptq_training.py,sha256=0WGiP7Gs4xX3FBs1PNaZ7w3hWRigwQXqYjBrs_-x32o,23241
366
366
  model_compression_toolkit/gptq/keras/graph_info.py,sha256=zwoeHX67nJJ5-zYLjzvMXS9TLsy9BsizARbZiDVjVSA,4473
367
- model_compression_toolkit/gptq/keras/quantization_facade.py,sha256=jUAjkIszziedftaQBSmjEL6tYEYpHhlFpSgw2X9OTf4,18672
367
+ model_compression_toolkit/gptq/keras/quantization_facade.py,sha256=meRKqpzZe2Irf21L_rN_mkr5dqPTJHzfSFBeqv4Csp4,18536
368
368
  model_compression_toolkit/gptq/keras/quantizer/__init__.py,sha256=-DK1CDXvlsnEbki4lukZLpl6Xrbo91_jcqxXlG5Eg6Q,963
369
369
  model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py,sha256=Rbl9urzkmACvVxICSEyJ02qFOBxWK0UQWtysFJzBVZw,4899
370
370
  model_compression_toolkit/gptq/keras/quantizer/quant_utils.py,sha256=Vt7Qb8i4JsE4sFtcjpfM4FTXTtfV1t6SwfoNH8a_Iaw,5055
@@ -380,7 +380,7 @@ model_compression_toolkit/gptq/pytorch/gptq_loss.py,sha256=_07Zx_43bnNokwR5S8phI
380
380
  model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py,sha256=tECPTavxn8EEwgLaP2zvxdJH6Vg9jC0YOIMJ7857Sdc,1268
381
381
  model_compression_toolkit/gptq/pytorch/gptq_training.py,sha256=WtehnyiYXdUXf8-uNpV0mdsalF7YF7eKnL7tcFrzZoE,19549
382
382
  model_compression_toolkit/gptq/pytorch/graph_info.py,sha256=4mVM-VvnBaA64ACVdOe6wTGHdMSa2UTLIUe7nACLcdo,4008
383
- model_compression_toolkit/gptq/pytorch/quantization_facade.py,sha256=HSFpx6JgjxGhU-0jA0z85sOOgSjCq6gzDOSkmuksZVE,16713
383
+ model_compression_toolkit/gptq/pytorch/quantization_facade.py,sha256=kMSq9mrpcgMBRgrEKfMBHaJG6HhGRYnuiDzF4ofckwo,16581
384
384
  model_compression_toolkit/gptq/pytorch/quantizer/__init__.py,sha256=ZHNHo1yzye44m9_ht4UUZfTpK01RiVR3Tr74-vtnOGI,968
385
385
  model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py,sha256=fKg-PNOhGBiL-4eySS9Fyw0GkA76Pq8jT_HbJuJ8iZU,4143
386
386
  model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py,sha256=OocYYRqvl7rZ37QT0hTzfJnWGiNCPskg7cziTlR7TRk,3893
@@ -525,8 +525,8 @@ model_compression_toolkit/xquant/pytorch/model_analyzer.py,sha256=b93o800yVB3Z-i
525
525
  model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py,sha256=3jNiV5Z4BVw9cEWuLKNOlLuLdr0EMuKg6eYnSiAq3LU,3952
526
526
  model_compression_toolkit/xquant/pytorch/similarity_functions.py,sha256=CERxq5K8rqaiE-DlwhZBTUd9x69dtYJlkHOPLB54vm8,2354
527
527
  model_compression_toolkit/xquant/pytorch/tensorboard_utils.py,sha256=mkoEktLFFHtEKzzFRn_jCnxjhJolK12TZ5AQeDHzUO8,9767
528
- mct_nightly-2.2.0.20250107.164940.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
529
- mct_nightly-2.2.0.20250107.164940.dist-info/METADATA,sha256=9IxOhXh5nRY6Ck-0wINZ4huD8os58zC-X_15pdsxHCo,26464
530
- mct_nightly-2.2.0.20250107.164940.dist-info/WHEEL,sha256=A3WOREP4zgxI0fKrHUG8DC8013e3dK3n7a6HDbcEIwE,91
531
- mct_nightly-2.2.0.20250107.164940.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
532
- mct_nightly-2.2.0.20250107.164940.dist-info/RECORD,,
528
+ mct_nightly-2.2.0.20250108.523.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
529
+ mct_nightly-2.2.0.20250108.523.dist-info/METADATA,sha256=riYLks2VpIMjq7W0UIbOGVmX68cYfNGXjl04SFFNSnE,26461
530
+ mct_nightly-2.2.0.20250108.523.dist-info/WHEEL,sha256=A3WOREP4zgxI0fKrHUG8DC8013e3dK3n7a6HDbcEIwE,91
531
+ mct_nightly-2.2.0.20250108.523.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
532
+ mct_nightly-2.2.0.20250108.523.dist-info/RECORD,,
@@ -27,4 +27,4 @@ from model_compression_toolkit import data_generation
27
27
  from model_compression_toolkit import pruning
28
28
  from model_compression_toolkit.trainable_infrastructure.keras.load_model import keras_load_quantized_model
29
29
 
30
- __version__ = "2.2.0.20250107.164940"
30
+ __version__ = "2.2.0.20250108.000523"
@@ -144,9 +144,8 @@ def activation_mse(flp_act_list,
144
144
  loss_values_list.append(point_loss)
145
145
  bias_loss_list.append(bias_loss)
146
146
  if weights_for_average_loss is not None:
147
- print(f"weights_for_average_loss.shape: {weights_for_average_loss.shape}")
148
- print(f"tf.stack(loss_values_list).shape: {tf.stack(loss_values_list).shape}")
149
- return tf.reduce_sum(weights_for_average_loss * tf.stack(loss_values_list)), tf.reduce_mean(tf.stack(bias_loss_list))
147
+ return tf.reduce_sum(weights_for_average_loss * tf.stack(loss_values_list)), \
148
+ tf.reduce_mean(tf.stack(bias_loss_list))
150
149
  else:
151
150
  return tf.reduce_mean(tf.stack(loss_values_list)), tf.reduce_mean(tf.stack(bias_loss_list))
152
151
 
@@ -188,4 +187,4 @@ class GPTQMultipleTensorsLoss:
188
187
  weights_for_average_loss=weights_for_average_loss,
189
188
  norm_loss=self.norm_loss)
190
189
 
191
- return loss_act
190
+ return loss_act
@@ -115,6 +115,7 @@ if FOUND_TF:
115
115
  if regularization_factor is None:
116
116
  regularization_factor = REG_DEFAULT_SLA if use_hessian_sample_attention else REG_DEFAULT
117
117
 
118
+ loss = loss or GPTQMultipleTensorsLoss()
118
119
  hessian_weights_config = None
119
120
  if use_hessian_sample_attention:
120
121
  if not use_hessian_based_weights: # pragma: no cover
@@ -128,10 +129,7 @@ if FOUND_TF:
128
129
  hessian_weights_config = GPTQHessianScoresConfig(per_sample=False,
129
130
  hessians_num_samples=GPTQ_HESSIAN_NUM_SAMPLES,
130
131
  hessian_batch_size=hessian_batch_size)
131
-
132
- # If a loss was not passed (and was not initialized due to use_hessian_sample_attention), use the default loss
133
- loss = loss or GPTQMultipleTensorsLoss()
134
-
132
+
135
133
  if isinstance(gradual_activation_quantization, bool):
136
134
  gradual_quant_config = GradualActivationQuantizationConfig() if gradual_activation_quantization else None
137
135
  elif isinstance(gradual_activation_quantization, GradualActivationQuantizationConfig):
@@ -104,6 +104,7 @@ if FOUND_TORCH:
104
104
  if regularization_factor is None:
105
105
  regularization_factor = REG_DEFAULT_SLA if use_hessian_sample_attention else REG_DEFAULT
106
106
 
107
+ loss = loss or multiple_tensors_mse_loss
107
108
  hessian_weights_config = None
108
109
  if use_hessian_sample_attention:
109
110
  if not use_hessian_based_weights: # pragma: no cover
@@ -117,9 +118,6 @@ if FOUND_TORCH:
117
118
  hessian_weights_config = GPTQHessianScoresConfig(per_sample=False,
118
119
  hessians_num_samples=GPTQ_HESSIAN_NUM_SAMPLES,
119
120
  hessian_batch_size=hessian_batch_size)
120
-
121
- # If a loss was not passed (and was not initialized due to use_hessian_sample_attention), use the default loss
122
- loss = loss or multiple_tensors_mse_loss
123
121
 
124
122
  if isinstance(gradual_activation_quantization, bool):
125
123
  gradual_quant_config = GradualActivationQuantizationConfig() if gradual_activation_quantization else None