qadence 1.11.3__py3-none-any.whl → 1.11.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qadence/ml_tools/tensors.py +9 -0
- qadence/ml_tools/trainer.py +5 -2
- qadence/states.py +129 -0
- {qadence-1.11.3.dist-info → qadence-1.11.5.dist-info}/METADATA +13 -3
- {qadence-1.11.3.dist-info → qadence-1.11.5.dist-info}/RECORD +7 -7
- {qadence-1.11.3.dist-info → qadence-1.11.5.dist-info}/WHEEL +0 -0
- {qadence-1.11.3.dist-info → qadence-1.11.5.dist-info}/licenses/LICENSE +0 -0
qadence/ml_tools/tensors.py
CHANGED
@@ -39,3 +39,12 @@ def promote_to(x: Tensor, dtype: Any) -> float | np.ndarray | Tensor:
|
|
39
39
|
return x
|
40
40
|
else:
|
41
41
|
raise ValueError(f"Don't know how to convert Tensor to {dtype}")
|
42
|
+
|
43
|
+
|
44
|
+
def detach_loss_metrics(
|
45
|
+
loss_metrics: tuple[torch.Tensor, dict[str, Any]],
|
46
|
+
) -> tuple[torch.Tensor, dict[str, Any]]:
|
47
|
+
"""Detach the tensors contained in the loss and metrics history."""
|
48
|
+
loss, metrics = loss_metrics
|
49
|
+
detached_metrics = {k: v.detach() if isinstance(v, Tensor) else v for k, v in metrics.items()}
|
50
|
+
return loss.detach(), detached_metrics
|
qadence/ml_tools/trainer.py
CHANGED
@@ -15,6 +15,7 @@ from qadence.ml_tools.data import DictDataLoader, OptimizeResult, data_to_device
|
|
15
15
|
from qadence.ml_tools.information import InformationContent
|
16
16
|
from qadence.ml_tools.optimize_step import optimize_step, update_ng_parameters
|
17
17
|
from qadence.ml_tools.stages import TrainingStage
|
18
|
+
from qadence.ml_tools.tensors import detach_loss_metrics
|
18
19
|
|
19
20
|
from .train_utils.base_trainer import BaseTrainer
|
20
21
|
from .train_utils.accelerator import Accelerator
|
@@ -654,6 +655,8 @@ class Trainer(BaseTrainer):
|
|
654
655
|
All metrics are prefixed with the proper state of the training process
|
655
656
|
- "train_" or "val_" or "test_"
|
656
657
|
A "{state}_loss" is added to metrics.
|
658
|
+
In order to save memory, all tensors are detached so that gradients
|
659
|
+
are not tracked.
|
657
660
|
|
658
661
|
Args:
|
659
662
|
loss_metrics (tuple[torch.Tensor, dict[str, Any]]): Original loss and metrics.
|
@@ -666,8 +669,8 @@ class Trainer(BaseTrainer):
|
|
666
669
|
loss, metrics = loss_metrics
|
667
670
|
updated_metrics = {f"{phase}_{key}": value for key, value in metrics.items()}
|
668
671
|
updated_metrics[f"{phase}_loss"] = loss
|
669
|
-
|
670
|
-
return loss_metrics
|
672
|
+
loss_metrics = (loss, updated_metrics)
|
673
|
+
return detach_loss_metrics(loss_metrics)
|
671
674
|
|
672
675
|
def _aggregate_result(
|
673
676
|
self, result: tuple[torch.Tensor, dict[str, Any]]
|
qadence/states.py
CHANGED
@@ -41,6 +41,10 @@ __all__ = [
|
|
41
41
|
"DensityMatrix",
|
42
42
|
"density_mat",
|
43
43
|
"overlap",
|
44
|
+
"partial_trace",
|
45
|
+
"von_neumann_entropy",
|
46
|
+
"purity",
|
47
|
+
"fidelity",
|
44
48
|
]
|
45
49
|
|
46
50
|
ATOL_64 = 1e-14 # 64 bit precision
|
@@ -589,3 +593,128 @@ def equivalent_state(
|
|
589
593
|
fidelity = overlap(s0, s1)
|
590
594
|
expected = torch.ones_like(fidelity)
|
591
595
|
return torch.allclose(fidelity, expected, rtol=rtol, atol=atol) # type: ignore[no-any-return]
|
596
|
+
|
597
|
+
|
598
|
+
# DensityMatrix utility functions
|
599
|
+
|
600
|
+
|
601
|
+
def partial_trace(rho: DensityMatrix, keep_indices: list[int]) -> DensityMatrix:
|
602
|
+
"""
|
603
|
+
Compute the partial trace of a density matrix for a system of several qubits with batch size.
|
604
|
+
|
605
|
+
This function also permutes qubits according to the order specified in keep_indices.
|
606
|
+
|
607
|
+
Args:
|
608
|
+
rho (DensityMatrix) : Density matrix of shape [batch_size, 2**n_qubits, 2**n_qubits].
|
609
|
+
keep_indices (list[int]): Index of the qubit subsystems to keep.
|
610
|
+
|
611
|
+
Returns:
|
612
|
+
DensityMatrix: Reduced density matrix after the partial trace,
|
613
|
+
of shape [batch_size, 2**n_keep, 2**n_keep].
|
614
|
+
"""
|
615
|
+
from pyqtorch.utils import dm_partial_trace
|
616
|
+
|
617
|
+
return dm_partial_trace(rho.permute((1, 2, 0)), keep_indices).permute((2, 0, 1))
|
618
|
+
|
619
|
+
|
620
|
+
def von_neumann_entropy(rho: DensityMatrix, eps: float = 1e-12) -> torch.Tensor:
|
621
|
+
"""Calculate the von Neumann entropy of a quantum density matrix.
|
622
|
+
|
623
|
+
The von Neumann entropy is defined as S(ρ) = -Tr(ρ log₂ ρ) = -∑ᵢ λᵢ log₂ λᵢ,
|
624
|
+
where λᵢ are the eigenvalues of ρ.
|
625
|
+
|
626
|
+
Args:
|
627
|
+
rho: Density matrix of shape [batch_size, dim, dim]
|
628
|
+
eps: Small value to avoid log(0) for zero eigenvalues
|
629
|
+
|
630
|
+
Returns:
|
631
|
+
Von Neumann entropy for each density matrix in the batch, shape [batch_size]
|
632
|
+
"""
|
633
|
+
|
634
|
+
# Compute eigenvalues for each density matrix in the batch
|
635
|
+
# For a Hermitian density matrix, eigenvalues should be real and non-negative
|
636
|
+
eigenvalues = torch.linalg.eigvalsh(rho)
|
637
|
+
|
638
|
+
# Normalize eigenvalues to ensure they sum to 1 (trace preservation)
|
639
|
+
# This step might be redundant but helps with numerical stability
|
640
|
+
eigenvalues = eigenvalues / torch.sum(eigenvalues, dim=1, keepdim=True)
|
641
|
+
|
642
|
+
# Filter out very small eigenvalues to avoid numerical issues
|
643
|
+
valid_eigenvalues = eigenvalues.clone()
|
644
|
+
valid_eigenvalues[valid_eigenvalues < eps] = eps
|
645
|
+
|
646
|
+
# Compute the entropy: -∑ᵢ λᵢ log₂ λᵢ
|
647
|
+
# Using natural logarithm and converting to base 2
|
648
|
+
log_base_conversion = torch.log(torch.tensor(2.0, device=rho.device))
|
649
|
+
entropy = -torch.sum(
|
650
|
+
valid_eigenvalues * torch.log(valid_eigenvalues) / log_base_conversion, dim=1
|
651
|
+
)
|
652
|
+
|
653
|
+
return entropy
|
654
|
+
|
655
|
+
|
656
|
+
def purity(rho: DensityMatrix, order: int = 2) -> Tensor:
|
657
|
+
"""Compute the n-th purity of a density matrix.
|
658
|
+
|
659
|
+
Args:
|
660
|
+
rho (DensityMatrix): Density matrix.
|
661
|
+
order (int, optional): Exponent n.
|
662
|
+
|
663
|
+
Returns:
|
664
|
+
Tensor: Tr[rho ** n]
|
665
|
+
"""
|
666
|
+
# Compute eigenvalues
|
667
|
+
eigenvalues = torch.linalg.eigvalsh(rho)
|
668
|
+
|
669
|
+
# Compute the sum of eigenvalues raised to power n
|
670
|
+
return torch.sum(eigenvalues**order, dim=1)
|
671
|
+
|
672
|
+
|
673
|
+
def fidelity(rho: DensityMatrix, sigma: DensityMatrix) -> Tensor:
|
674
|
+
"""Calculate the fidelity between two quantum states represented by density matrices.
|
675
|
+
|
676
|
+
The fidelity is defined as F(ρ,σ) = Tr[√(√ρ σ √ρ)], or equivalently,
|
677
|
+
F(ρ,σ) = ||√ρ·√σ||₁ where ||·||₁ is the trace norm.
|
678
|
+
|
679
|
+
Args:
|
680
|
+
rho: First density matrix of shape [batch_size, dim, dim]
|
681
|
+
sigma: Second density matrix of shape [batch_size, dim, dim]
|
682
|
+
|
683
|
+
Returns:
|
684
|
+
Fidelity between each pair of density matrices in the batch, shape [batch_size]
|
685
|
+
"""
|
686
|
+
|
687
|
+
# Compute square root of rho
|
688
|
+
rho_eigvals, rho_eigvecs = torch.linalg.eigh(rho)
|
689
|
+
|
690
|
+
# Ensure non-negative eigenvalues
|
691
|
+
rho_eigvals = torch.clamp(rho_eigvals, min=0)
|
692
|
+
|
693
|
+
# Compute square root using eigendecomposition
|
694
|
+
sqrt_eigvals = torch.sqrt(rho_eigvals)
|
695
|
+
|
696
|
+
# Compute √ρ for each batch element
|
697
|
+
sqrt_rho = torch.zeros_like(rho)
|
698
|
+
for i in range(rho.shape[0]):
|
699
|
+
sqrt_rho[i] = torch.mm(
|
700
|
+
rho_eigvecs[i],
|
701
|
+
torch.mm(
|
702
|
+
torch.diag(sqrt_eigvals[i]).to(dtype=rho_eigvecs.dtype), rho_eigvecs[i].t().conj()
|
703
|
+
),
|
704
|
+
)
|
705
|
+
|
706
|
+
# Compute √ρ σ √ρ for each batch element
|
707
|
+
inner_product = torch.zeros_like(rho)
|
708
|
+
for i in range(rho.shape[0]):
|
709
|
+
inner_product[i] = torch.mm(sqrt_rho[i], torch.mm(sigma[i], sqrt_rho[i]))
|
710
|
+
|
711
|
+
# Compute eigenvalues of inner product
|
712
|
+
inner_eigvals = torch.linalg.eigvalsh(inner_product)
|
713
|
+
|
714
|
+
# Ensure non-negative eigenvalues
|
715
|
+
inner_eigvals = torch.clamp(inner_eigvals, min=0)
|
716
|
+
|
717
|
+
# Compute the fidelity as the sum of the square roots of eigenvalues
|
718
|
+
fidelity_values = torch.sum(torch.sqrt(inner_eigvals), dim=1)
|
719
|
+
|
720
|
+
return fidelity_values
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: qadence
|
3
|
-
Version: 1.11.
|
3
|
+
Version: 1.11.5
|
4
4
|
Summary: Pasqal interface for circuit-based quantum computing SDKs
|
5
5
|
Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>, Pim Venderbosch <pim.venderbosch@pasqal.com>, Manu Lahariya <manu.lahariya@pasqal.com>, Sungwoo Ahn <sungwoo.ahn@pasqal.com>
|
6
6
|
License: PASQAL OPEN-SOURCE SOFTWARE LICENSE (MIT-derived)
|
@@ -24,7 +24,7 @@ Requires-Dist: nevergrad
|
|
24
24
|
Requires-Dist: numpy
|
25
25
|
Requires-Dist: openfermion
|
26
26
|
Requires-Dist: pasqal-cloud
|
27
|
-
Requires-Dist: pyqtorch==1.7.
|
27
|
+
Requires-Dist: pyqtorch==1.7.7
|
28
28
|
Requires-Dist: pyyaml
|
29
29
|
Requires-Dist: rich
|
30
30
|
Requires-Dist: scipy
|
@@ -44,7 +44,7 @@ Requires-Dist: nvidia-pyindex; extra == 'dlprof'
|
|
44
44
|
Provides-Extra: horqrux
|
45
45
|
Requires-Dist: einops; extra == 'horqrux'
|
46
46
|
Requires-Dist: flax; extra == 'horqrux'
|
47
|
-
Requires-Dist: horqrux==0.
|
47
|
+
Requires-Dist: horqrux==0.9.0; extra == 'horqrux'
|
48
48
|
Requires-Dist: jax; extra == 'horqrux'
|
49
49
|
Requires-Dist: jaxopt; extra == 'horqrux'
|
50
50
|
Requires-Dist: optax; extra == 'horqrux'
|
@@ -225,5 +225,15 @@ doi = {10.1109/MS.2025.3536607}
|
|
225
225
|
}
|
226
226
|
```
|
227
227
|
|
228
|
+
If you use the approximate Generalized parameter shift rule for your publication, we kindly ask you to cite:
|
229
|
+
```latex
|
230
|
+
@misc{2505.18090,
|
231
|
+
Author = {Vytautas Abramavicius and Evan Philip and Kaonan Micadei and Charles Moussa and Mario Dagrada and Vincent E. Elfving and Panagiotis Barkoutsos and Roland Guichard},
|
232
|
+
Title = {Evaluation of derivatives using approximate generalized parameter shift rule},
|
233
|
+
Year = {2025},
|
234
|
+
Eprint = {arXiv:2505.18090},
|
235
|
+
}
|
236
|
+
```
|
237
|
+
|
228
238
|
## License
|
229
239
|
Qadence is a free and open source software package, released under the PASQAL OPEN-SOURCE SOFTWARE LICENSE (MIT-derived).
|
@@ -18,7 +18,7 @@ qadence/qubit_support.py,sha256=Nkn1Q01RVViTcggSIom7EFKdWpAuM4TMGwBZ5feCUxA,2120
|
|
18
18
|
qadence/register.py,sha256=MlI1-L1P_e7ugjelhH-1YdxrfPsgmLmX5m-dueawuWQ,13172
|
19
19
|
qadence/serial_expr_grammar.peg,sha256=z5ytL7do9kO8o4h-V5GrsDuLdso0KsRcMuIYURFfmAY,328
|
20
20
|
qadence/serialization.py,sha256=IB0OgYhtV3F9AmMMMbGcfgNil9vBzs92j5G3yj4KPhg,15616
|
21
|
-
qadence/states.py,sha256=
|
21
|
+
qadence/states.py,sha256=GosmwMN0oK9N3hNnSgfb8qDsOGVEHZqOSNERvGgscpw,20408
|
22
22
|
qadence/types.py,sha256=HtOKf6xi-kTtncqctRWK0Wpxut7KEXHdqoQVqfx0vxo,11927
|
23
23
|
qadence/utils.py,sha256=fChJDz7OelWNGLPjoBBcmleWGluWhR36Mf0LnqCx8FA,12376
|
24
24
|
qadence/analog/__init__.py,sha256=BCyS9R4KUjzUXN0Ax3b0eMo8ZAuSkGoJQVtZ4_pvAFs,279
|
@@ -111,8 +111,8 @@ qadence/ml_tools/optimize_step.py,sha256=21m2Wxmxkj_kMHQnKygOWqFdcO-wi5CnMnIZTGE
|
|
111
111
|
qadence/ml_tools/parameters.py,sha256=gew2Kq_5-RgRpaTvs8eauVhgo0sTqqDQEV6WHFEiLGM,1301
|
112
112
|
qadence/ml_tools/qcnn_model.py,sha256=2ua_SuaXC9nJKtBnMCKkU3b_gMwRijIeBPj16YsfN2I,5369
|
113
113
|
qadence/ml_tools/stages.py,sha256=qW2phMIvQBLM3tn2UoGN-ePiBnZoNq5k844eHVnnn8Y,1407
|
114
|
-
qadence/ml_tools/tensors.py,sha256=
|
115
|
-
qadence/ml_tools/trainer.py,sha256=
|
114
|
+
qadence/ml_tools/tensors.py,sha256=l1j33DRFj4i06Fq5DhwHUEQjbmAfEwDjzpMu-FQub1E,1708
|
115
|
+
qadence/ml_tools/trainer.py,sha256=wUOTCu2FjzONFphRE0Bp0qNwcBdmcxzEQDPhJXGE254,35103
|
116
116
|
qadence/ml_tools/utils.py,sha256=PW8FyoV0mG_DtN1U8njTDV5qxZ0EK4mnFwMAsLBArfk,1410
|
117
117
|
qadence/ml_tools/callbacks/__init__.py,sha256=pTdfjulDGNKca--9BgrdmMyvJSah_0spp929Th6RzC8,913
|
118
118
|
qadence/ml_tools/callbacks/callback.py,sha256=JVY1BtPItCx11oAa1-3wICZyDfDLFdc5pmjTbfASHqA,29929
|
@@ -146,7 +146,7 @@ qadence/transpile/flatten.py,sha256=k4HAfVzvDV40HyfaukiEHyJtAtvFRIcyDbAWiCL8tf0,
|
|
146
146
|
qadence/transpile/invert.py,sha256=IeyidgBwECCKB0i7Ym0KkLyfcx42LyT2mbqkfbK1H8M,4843
|
147
147
|
qadence/transpile/noise.py,sha256=LDcDJtQGkgUPkL2t69gg6AScTb-p3J3SxCDZbYOu1L8,1668
|
148
148
|
qadence/transpile/transpile.py,sha256=xnzkHA6Qdb-Y5Fv9Latrolrpw44N6_OKc7_QGt70f0I,2713
|
149
|
-
qadence-1.11.
|
150
|
-
qadence-1.11.
|
151
|
-
qadence-1.11.
|
152
|
-
qadence-1.11.
|
149
|
+
qadence-1.11.5.dist-info/METADATA,sha256=fWleO5GRnMifx1ETu9KB86VBJgLfHksX66aD-R9DOQ0,11202
|
150
|
+
qadence-1.11.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
151
|
+
qadence-1.11.5.dist-info/licenses/LICENSE,sha256=IfA3wQpmMOjCnDZ0P8Od2Bxb39rND9s5zfGHp1vMTbQ,2359
|
152
|
+
qadence-1.11.5.dist-info/RECORD,,
|
File without changes
|
File without changes
|