mct-nightly 1.10.0.20231128.post5436__py3-none-any.whl → 1.10.0.20231129.post414__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mct-nightly
3
- Version: 1.10.0.20231128.post5436
3
+ Version: 1.10.0.20231129.post414
4
4
  Summary: A Model Compression Toolkit for neural networks
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -168,7 +168,7 @@ model_compression_toolkit/core/keras/graph_substitutions/substitutions/softmax_s
168
168
  model_compression_toolkit/core/keras/graph_substitutions/substitutions/virtual_activation_weights_composition.py,sha256=wH9ocMLL725-uUPU-zCxdd8NwT5nyd0ZShmI7iuTwF8,1462
169
169
  model_compression_toolkit/core/keras/graph_substitutions/substitutions/weights_activation_split.py,sha256=rjIheZW7LbSPv9bzMSmC8wl6UUxaTkd4J2IHinObT-Y,1814
170
170
  model_compression_toolkit/core/keras/hessian/__init__.py,sha256=lNJ29DYxaLUPDstRDA1PGI5r9Fulq_hvrZMlhst1Z5g,697
171
- model_compression_toolkit/core/keras/hessian/activation_trace_hessian_calculator_keras.py,sha256=tj3GtzMY75Wumj2j-teFqlN4EzgUgupCmjtoEVKvpv4,9665
171
+ model_compression_toolkit/core/keras/hessian/activation_trace_hessian_calculator_keras.py,sha256=RL2c7JkvdK37Mgzq2CdSS_1Cg0R1cJ0Gv0Q3cRgV4H4,9642
172
172
  model_compression_toolkit/core/keras/hessian/trace_hessian_calculator_keras.py,sha256=uwDcC6Fr0H-NKaX0NERRkF6rrnPRbxuo9PsapnuPsRo,3952
173
173
  model_compression_toolkit/core/keras/hessian/weights_trace_hessian_calculator_keras.py,sha256=SIi7_wChifkulp6aoc-v7v4I1NrQAwvU4ymG2HI66Yk,10478
174
174
  model_compression_toolkit/core/keras/mixed_precision/__init__.py,sha256=sw7LOPN1bM82o3SkMaklyH0jw-TLGK0-fl2Wq73rffI,697
@@ -226,7 +226,7 @@ model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/softmax
226
226
  model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/virtual_activation_weights_composition.py,sha256=WmEa8Xjji-_tIbthDxlLAGSr69nWk-YKcHNaVqLa7sg,1375
227
227
  model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/weights_activation_split.py,sha256=tp78axmUQc0Zpj3KwVmV0PGYHvCf7sAW_sRmXXw7gsY,1616
228
228
  model_compression_toolkit/core/pytorch/hessian/__init__.py,sha256=lNJ29DYxaLUPDstRDA1PGI5r9Fulq_hvrZMlhst1Z5g,697
229
- model_compression_toolkit/core/pytorch/hessian/activation_trace_hessian_calculator_pytorch.py,sha256=ZrBQTpBclUpsOKnmSkgGG1y4Rs1OjBaf93Fcsn6trUg,8340
229
+ model_compression_toolkit/core/pytorch/hessian/activation_trace_hessian_calculator_pytorch.py,sha256=6EHwxuOdb57DEBHiYfFODCQHWlUQDArEHLaym19Ml5A,8297
230
230
  model_compression_toolkit/core/pytorch/hessian/trace_hessian_calculator_pytorch.py,sha256=c80LiFbY06mosfZI4yjigiqDsF1ZFdRZ14Xox0kD_94,3480
231
231
  model_compression_toolkit/core/pytorch/hessian/weights_trace_hessian_calculator_pytorch.py,sha256=4TsL-27I672758_B742Zbe-_Ro4OLp7ISxWtWD4oKD8,6899
232
232
  model_compression_toolkit/core/pytorch/mixed_precision/__init__.py,sha256=Rf1RcYmelmdZmBV5qOKvKWF575ofc06JFQSq83Jz99A,696
@@ -447,8 +447,8 @@ model_compression_toolkit/trainable_infrastructure/keras/quantize_wrapper.py,sha
447
447
  model_compression_toolkit/trainable_infrastructure/keras/quantizer_utils.py,sha256=MVwXNymmFRB2NXIBx4e2mdJ1RfoHxRPYRgjb1MQP5kY,1797
448
448
  model_compression_toolkit/trainable_infrastructure/pytorch/__init__.py,sha256=huHoBUcKNB6BnY6YaUCcFvdyBtBI172ZoUD8ZYeNc6o,696
449
449
  model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py,sha256=SbvRlIdE32PEBsINt1bhSqvrKL_zbM9V-aeSkOn-sw4,3083
450
- mct_nightly-1.10.0.20231128.post5436.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
451
- mct_nightly-1.10.0.20231128.post5436.dist-info/METADATA,sha256=xgYuHWMlREV_QtNvSqQmQD82x8tSx3Ht5SlNk8MoY6A,16233
452
- mct_nightly-1.10.0.20231128.post5436.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
453
- mct_nightly-1.10.0.20231128.post5436.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
454
- mct_nightly-1.10.0.20231128.post5436.dist-info/RECORD,,
450
+ mct_nightly-1.10.0.20231129.post414.dist-info/LICENSE.md,sha256=aYSSIb-5AFPeITTvXm1UAoe0uYBiMmSS8flvXaaFUks,10174
451
+ mct_nightly-1.10.0.20231129.post414.dist-info/METADATA,sha256=BlcFLeMfxtrdsyKQxgLa5xkorVHJlwwK7qGId7PKegQ,16232
452
+ mct_nightly-1.10.0.20231129.post414.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
453
+ mct_nightly-1.10.0.20231129.post414.dist-info/top_level.txt,sha256=gsYA8juk0Z-ZmQRKULkb3JLGdOdz8jW_cMRjisn9ga4,26
454
+ mct_nightly-1.10.0.20231129.post414.dist-info/RECORD,,
@@ -152,7 +152,7 @@ class ActivationTraceHessianCalculatorKeras(TraceHessianCalculatorKeras):
152
152
  # Compute the final approximation for each output index
153
153
  num_node_outputs = len(interest_point_scores[0])
154
154
  for output_idx in range(num_node_outputs):
155
- final_approx_per_output.append(2 * tf.reduce_mean([x[output_idx] for x in interest_point_scores]) / output.shape[-1])
155
+ final_approx_per_output.append(tf.reduce_mean([x[output_idx] for x in interest_point_scores]))
156
156
 
157
157
  # final_approx_per_output is a list of all approximations (one per output), thus we average them to
158
158
  # get the final score of a node.
@@ -131,8 +131,8 @@ class ActivationTraceHessianCalculatorPytorch(TraceHessianCalculatorPytorch):
131
131
  break
132
132
 
133
133
  trace_hv.append(hessian_trace_approx)
134
- ipts_hessian_trace_approx.append(2 * torch.mean(torch.stack(trace_hv)) / output.shape[
135
- -1]) # Get averaged Hessian trace approximation
134
+
135
+ ipts_hessian_trace_approx.append(torch.mean(torch.stack(trace_hv))) # Get averaged Hessian trace approximation
136
136
 
137
137
  # If a node has multiple outputs, it means that multiple approximations were computed
138
138
  # (one per output since granularity is per-tensor). In this case we average the approximations.