mct-nightly 1.11.0.20240125.post413__py3-none-any.whl → 1.11.0.20240127.post408__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 1.11.0.20240125.post413
3
+ Version: 1.11.0.20240127.post408
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -10,7 +10,7 @@ model_compression_toolkit/core/runner.py,sha256=RgN9l0v7aFYu6MTuIZGAB2syr6NBqG_v
10
10
  model_compression_toolkit/core/common/__init__.py,sha256=Wh127PbXcETZX_d1PQqZ71ETK3J9XO5A-HpadGUbj6o,1447
11
11
  model_compression_toolkit/core/common/base_substitutions.py,sha256=xDFSmVVs_iFSZfajytI0cuQaNRNcwHX3uqOoHgVUvxQ,1666
12
12
  model_compression_toolkit/core/common/data_loader.py,sha256=7YF5Mqz64Xb4rVwY3knrdIZ4JEHybXxiQqx0deR_c5k,4017
13
- model_compression_toolkit/core/common/defaultdict.py,sha256=P2WOZbWQTfVKtMfpGhGOS_1_5YWfYQWiJ5pBCn6F-3k,2182
13
+ model_compression_toolkit/core/common/defaultdict.py,sha256=LSc-sbZYXENMCw3U9F4GiXuv67IKpdn0Qm7Fr11jy-4,2277
14
14
  model_compression_toolkit/core/common/framework_implementation.py,sha256=XzRiDWi-pXfR8MhTbC3vi2Uaa6pC4BeS0YnyWpqNkNU,21220
15
15
  model_compression_toolkit/core/common/framework_info.py,sha256=hwmstv7IuBRfa6IxDbeG4y-7AxKx4bwCyI_Exi2C7mo,6424
16
16
  model_compression_toolkit/core/common/memory_computation.py,sha256=ixoSpV5ZYZGyzhre3kQcvR2sNA8KBsPZ3lgbkDnw9Cs,1205
@@ -120,7 +120,7 @@ model_compression_toolkit/core/common/quantization/quantization_params_generatio
120
120
  model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py,sha256=noEdvGiyyW7acgQ2OFWLedCODibTGYJifC9qo8YIU5U,4558
121
121
  model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_computation.py,sha256=MR17AECmtQSlmD3emYDM7FYQdObnfl9x_bg3NSIh3fY,4628
122
122
  model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_search.py,sha256=3gUOfFRxJBC1AIXa6j1O4Y7DLuDZrygbxOsVyetYzuw,41685
123
- model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py,sha256=q_FOXCzUHps5OLsysvvuL8w390lYgyZvOqXp618O3QA,5082
123
+ model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_weights_computation.py,sha256=s2m09heGcj7DyHpFDyogAXrvk06ZA3Z8hEq41iRNaBg,5092
124
124
  model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py,sha256=53OFL3IZxtH-UPakf3h_LZkaZAa3cgc2oqgMUe3Sg8o,9689
125
125
  model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py,sha256=oiJn1twYpTaq_z5qX4d8_nnk_jouYWHq8z0WAknl5oE,7879
126
126
  model_compression_toolkit/core/common/quantization/quantizers/__init__.py,sha256=mjbqLD-KcG3eNeCYpu1GBS7VclGVOQ63x2p6mAAuba4,698
@@ -354,7 +354,7 @@ model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.
354
354
  model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py,sha256=BBSDWLmeywjSM5N6oJkMgcuo7zrXTesB4zLwRGG8QB0,12159
355
355
  model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py,sha256=pyhlVpoauHM-zuixHsIGPHFgQoXppL8TlDFCjPE2RuY,10377
356
356
  model_compression_toolkit/gptq/keras/quantizer/ste_rounding/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
357
- model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=K0hP5Kz__6e2Y_DGZfYIGsNqme9kDR8ps4awBtIDZng,8358
357
+ model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py,sha256=nI1jxpNEaCxSuwEd29Oobr-p8htKMj5wYcg52aKzDOQ,8368
358
358
  model_compression_toolkit/gptq/pytorch/__init__.py,sha256=cco4TmeIDIh32nj9ZZXVkws4dd9F2UDrmjKzTN8G0V0,697
359
359
  model_compression_toolkit/gptq/pytorch/gptq_loss.py,sha256=kDuWw-6zh17wZpYWh4Xa94rpoodf82DksgjQCnL7nBc,2719
360
360
  model_compression_toolkit/gptq/pytorch/gptq_pytorch_implementation.py,sha256=tECPTavxn8EEwgLaP2zvxdJH6Vg9jC0YOIMJ7857Sdc,1268
@@ -371,7 +371,7 @@ model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_re
371
371
  model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py,sha256=kLVQC1hXzDpP4Jx7AwnA764oGnY5AMEuvUUhAvhz09M,12347
372
372
  model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py,sha256=FgPSKoV8p8y-gLNz359XdOPD6w_wpDvcJFtTNLWqYb0,9099
373
373
  model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
374
- model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=whVx94NCmYzobvZtPNr4qZTCN-CV8jfs4des_mkK3F8,8770
374
+ model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py,sha256=vJHjrMNEe3Yh7NrPpUS81UMcMTW8dHwoHfqnQgwqbHM,8780
375
375
  model_compression_toolkit/legacy/__init__.py,sha256=lNJ29DYxaLUPDstRDA1PGI5r9Fulq_hvrZMlhst1Z5g,697
376
376
  model_compression_toolkit/legacy/keras_quantization_facade.py,sha256=2pNJoc1mKMbikBS_uebLgFAbTqfA0y9ofDUNCVogSKI,18444
377
377
  model_compression_toolkit/legacy/pytorch_quantization_facade.py,sha256=p-ZGKdGeRIJsR5XmFYgjs3VN49NrwHumNtTY2OSDW-4,17874
@@ -475,8 +475,8 @@ model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py,sha
475
475
  model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py,sha256=MVwXNymmFRB2NXIBx4e2mdJ1RfoHxRPYRgjb1MQP5kY,1797
476
476
  model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
477
477
  model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py,sha256=SbvRlIdE32PEBsINt1bhSqvrKL_zbM9V-aeSkOn-sw4,3083
478
- mct_nightly-1.11.0.20240125.post413.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
479
- mct_nightly-1.11.0.20240125.post413.dist-info/METADATA,sha256=OFuXZWvxyJrgZ6LA2jsoSimSswiQJRlab_F1VL_jTp0,17141
480
- mct_nightly-1.11.0.20240125.post413.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
481
- mct_nightly-1.11.0.20240125.post413.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
482
- mct_nightly-1.11.0.20240125.post413.dist-info/RECORD,,
478
+ mct_nightly-1.11.0.20240127.post408.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
479
+ mct_nightly-1.11.0.20240127.post408.dist-info/METADATA,sha256=mgf6GuZf0ZPUvZMImxM-wqKQx-x9frAw5sVpr70vPyU,17141
480
+ mct_nightly-1.11.0.20240127.post408.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
481
+ mct_nightly-1.11.0.20240127.post408.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
482
+ mct_nightly-1.11.0.20240127.post408.dist-info/RECORD,,
@@ -26,17 +26,17 @@ class DefaultDict:
26
26
  """
27
27
 
28
28
  def __init__(self,
29
- known_dict: Dict[Any, Any],
29
+ known_dict: Dict[Any, Any] = None,
30
30
  default_value: Any = None):
31
31
  """
32
32
 
33
33
  Args:
34
- known_dict: Dictionary to wrap.
34
+ known_dict: Dictionary to wrap. If None is provided, initializes an empty dictionary.
35
35
  default_value: default value when requested key is not in known_dict.
36
36
  """
37
37
 
38
38
  self.default_value = default_value
39
- self.known_dict = known_dict
39
+ self.known_dict = known_dict if known_dict is not None else {}
40
40
 
41
41
  def get(self, key: Any) -> Any:
42
42
  """
@@ -24,7 +24,7 @@ from model_compression_toolkit.core.common.quantization.node_quantization_config
24
24
 
25
25
  # If the quantization config does not contain kernel channel mapping or the weights
26
26
  # quantization is not per-channel, we use a dummy channel mapping.
27
- dummy_channel_mapping = DefaultDict({}, (None, None))
27
+ dummy_channel_mapping = DefaultDict(default_value=(None, None))
28
28
 
29
29
 
30
30
  def get_weights_qparams(kernel: np.ndarray,
@@ -77,7 +77,7 @@ class STEWeightGPTQQuantizer(BaseKerasGPTQTrainableQuantizer):
77
77
 
78
78
  def __init__(self,
79
79
  quantization_config: TrainableQuantizerWeightsConfig,
80
- max_lsbs_change_map: dict = DefaultDict({}, 1)):
80
+ max_lsbs_change_map: dict = DefaultDict(default_value=1)):
81
81
  """
82
82
  Initialize a STEWeightGPTQQuantizer object with parameters to use for the quantization.
83
83
 
@@ -84,7 +84,7 @@ class STEWeightGPTQQuantizer(BasePytorchGPTQTrainableQuantizer):
84
84
 
85
85
  def __init__(self,
86
86
  quantization_config: TrainableQuantizerWeightsConfig,
87
- max_lsbs_change_map: dict = DefaultDict({}, 1)):
87
+ max_lsbs_change_map: dict = DefaultDict(default_value=1)):
88
88
  """
89
89
  Construct a Pytorch model that utilize a fake weight quantizer of STE (Straight Through Estimator) for symmetric quantizer.
90
90