qadence 1.10.1__py3-none-any.whl → 1.10.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qadence/backends/horqrux/convert_ops.py +1 -1
- qadence/blocks/block_to_tensor.py +8 -8
- qadence/blocks/matrix.py +4 -0
- qadence/ml_tools/__init__.py +1 -0
- qadence/ml_tools/config.py +23 -4
- qadence/ml_tools/constructors.py +56 -1
- qadence/ml_tools/information/__init__.py +3 -0
- qadence/ml_tools/information/information_content.py +339 -0
- qadence/ml_tools/trainer.py +106 -1
- qadence/operations/primitive.py +0 -4
- qadence/states.py +27 -5
- qadence/types.py +2 -0
- qadence/utils.py +0 -2
- {qadence-1.10.1.dist-info → qadence-1.10.3.dist-info}/METADATA +9 -6
- {qadence-1.10.1.dist-info → qadence-1.10.3.dist-info}/RECORD +17 -15
- {qadence-1.10.1.dist-info → qadence-1.10.3.dist-info}/WHEEL +0 -0
- {qadence-1.10.1.dist-info → qadence-1.10.3.dist-info}/licenses/LICENSE +0 -0
@@ -245,7 +245,7 @@ class HorqHamiltonianEvolution(NativeHorqHEvo):
|
|
245
245
|
|
246
246
|
self._time_evolution = lambda values: values[self.param_names[0]]
|
247
247
|
|
248
|
-
def
|
248
|
+
def _unitary(self, values: dict[str, Array]) -> Array:
|
249
249
|
"""The evolved operator given current parameter values for generator and time evolution."""
|
250
250
|
return expm(self._hamiltonian(self, values) * (-1j * self._time_evolution(values)))
|
251
251
|
|
@@ -79,6 +79,7 @@ def _fill_identities(
|
|
79
79
|
torch.Tensor: augmented matrix with dimensions (2**nqubits, 2**nqubits)
|
80
80
|
or a tensor (2**n_qubits) if diag_only
|
81
81
|
"""
|
82
|
+
full_qubit_support = tuple(sorted(full_qubit_support))
|
82
83
|
qubit_support = tuple(sorted(qubit_support))
|
83
84
|
block_mat = block_mat.to(device)
|
84
85
|
mat = IMAT.to(device) if qubit_support[0] != full_qubit_support[0] else block_mat
|
@@ -469,14 +470,13 @@ def _block_to_tensor_embedded(
|
|
469
470
|
)
|
470
471
|
|
471
472
|
elif isinstance(block, MatrixBlock):
|
472
|
-
mat =
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
# )
|
473
|
+
mat = _fill_identities(
|
474
|
+
block.matrix.unsqueeze(0),
|
475
|
+
block.qubit_support,
|
476
|
+
qubit_support,
|
477
|
+
endianness=endianness,
|
478
|
+
device=device,
|
479
|
+
)
|
480
480
|
|
481
481
|
elif isinstance(block, SWAP):
|
482
482
|
swap_block = _swap_block(block)
|
qadence/blocks/matrix.py
CHANGED
@@ -7,6 +7,8 @@ import numpy as np
|
|
7
7
|
import torch
|
8
8
|
from torch.linalg import eigvals
|
9
9
|
|
10
|
+
from math import log
|
11
|
+
|
10
12
|
from qadence.blocks import PrimitiveBlock
|
11
13
|
from qadence.noise import NoiseHandler
|
12
14
|
|
@@ -84,6 +86,8 @@ class MatrixBlock(PrimitiveBlock):
|
|
84
86
|
if not self.is_unitary(matrix):
|
85
87
|
logger.warning("Provided matrix is not unitary.")
|
86
88
|
self.matrix = matrix.clone()
|
89
|
+
if int(log(self.matrix.size(1), 2)) != len(qubit_support):
|
90
|
+
raise ValueError("Provided matrix does not match the qubit_support length.")
|
87
91
|
super().__init__(qubit_support, noise)
|
88
92
|
|
89
93
|
@cached_property
|
qadence/ml_tools/__init__.py
CHANGED
@@ -4,6 +4,7 @@ from .callbacks.saveload import load_checkpoint, load_model, write_checkpoint
|
|
4
4
|
from .config import AnsatzConfig, FeatureMapConfig, TrainConfig
|
5
5
|
from .constructors import create_ansatz, create_fm_blocks, observable_from_config
|
6
6
|
from .data import DictDataLoader, InfiniteTensorDataset, OptimizeResult, to_dataloader
|
7
|
+
from .information import InformationContent
|
7
8
|
from .models import QNN
|
8
9
|
from .optimize_step import optimize_step as default_optimize_step
|
9
10
|
from .parameters import get_parameters, num_parameters, set_parameters
|
qadence/ml_tools/config.py
CHANGED
@@ -434,15 +434,17 @@ class AnsatzConfig:
|
|
434
434
|
"""What type of ansatz.
|
435
435
|
|
436
436
|
`AnsatzType.HEA` for Hardware Efficient Ansatz.
|
437
|
-
`AnsatzType.IIA` for Identity
|
437
|
+
`AnsatzType.IIA` for Identity Intialized Ansatz.
|
438
|
+
`AnsatzType.ALA` for Alternating Layer Ansatz.
|
438
439
|
"""
|
439
440
|
|
440
441
|
ansatz_strategy: Strategy = Strategy.DIGITAL
|
441
442
|
"""Ansatz strategy.
|
442
443
|
|
443
|
-
`Strategy.DIGITAL` for fully digital ansatz. Required if `ansatz_type` is `AnsatzType.
|
444
|
-
`Strategy.SDAQC` for analog entangling block.
|
445
|
-
`
|
444
|
+
`Strategy.DIGITAL` for fully digital ansatz. Required if `ansatz_type` is `AnsatzType.ALA`.
|
445
|
+
`Strategy.SDAQC` for analog entangling block. Only available for `AnsatzType.HEA` or
|
446
|
+
`AnsatzType.ALA`.
|
447
|
+
`Strategy.RYDBERG` for fully rydberg hea ansatz. Only available for `AnsatzType.HEA`.
|
446
448
|
"""
|
447
449
|
|
448
450
|
strategy_args: dict = field(default_factory=dict)
|
@@ -484,6 +486,13 @@ class AnsatzConfig:
|
|
484
486
|
"""
|
485
487
|
# The default for a dataclass can not be a mutable object without using this default_factory.
|
486
488
|
|
489
|
+
m_block_qubits: int | None = None
|
490
|
+
"""
|
491
|
+
The number of qubits in the local entangling block of an Alternating Layer Ansatz (ALA).
|
492
|
+
|
493
|
+
Only used when `ansatz_type` is `AnsatzType.ALA`.
|
494
|
+
"""
|
495
|
+
|
487
496
|
param_prefix: str = "theta"
|
488
497
|
"""The base bame of the variational parameter."""
|
489
498
|
|
@@ -499,3 +508,13 @@ class AnsatzConfig:
|
|
499
508
|
assert (
|
500
509
|
self.ansatz_strategy != Strategy.RYDBERG
|
501
510
|
), "Rydberg strategy not allowed for Identity-initialized ansatz."
|
511
|
+
|
512
|
+
if self.ansatz_type == AnsatzType.ALA:
|
513
|
+
assert (
|
514
|
+
self.ansatz_strategy == Strategy.DIGITAL
|
515
|
+
), f"{self.ansatz_strategy} not allowed for Alternating Layer Ansatz.\
|
516
|
+
Only `Strategy.DIGITAL` allowed."
|
517
|
+
|
518
|
+
assert (
|
519
|
+
self.m_block_qubits is not None
|
520
|
+
), "m_block_qubits must be specified for Alternating Layer Ansatz."
|
qadence/ml_tools/constructors.py
CHANGED
@@ -13,13 +13,14 @@ from qadence.constructors import (
|
|
13
13
|
analog_feature_map,
|
14
14
|
feature_map,
|
15
15
|
hamiltonian_factory,
|
16
|
-
iia,
|
17
16
|
rydberg_feature_map,
|
18
17
|
rydberg_hea,
|
19
18
|
rydberg_tower_feature_map,
|
20
19
|
)
|
20
|
+
from qadence.constructors.ala import ala_digital
|
21
21
|
from qadence.constructors.hamiltonians import ObservableConfig, TDetuning
|
22
22
|
from qadence.constructors.hea import hea_digital, hea_sDAQC
|
23
|
+
from qadence.constructors.iia import iia
|
23
24
|
from qadence.measurements import Measurements
|
24
25
|
from qadence.noise import NoiseHandler
|
25
26
|
from qadence.operations import CNOT, RX, RY, I, N, Z
|
@@ -596,6 +597,58 @@ def _create_hea(
|
|
596
597
|
)
|
597
598
|
|
598
599
|
|
600
|
+
def _create_ala_digital(
|
601
|
+
num_qubits: int,
|
602
|
+
config: AnsatzConfig,
|
603
|
+
) -> AbstractBlock:
|
604
|
+
"""
|
605
|
+
Create the Digital Alternating Layer Ansatz based on the configuration.
|
606
|
+
|
607
|
+
Args:
|
608
|
+
num_qubits (int): The number of qubits.
|
609
|
+
config (AnsatzConfig): The configuration for the ansatz.
|
610
|
+
|
611
|
+
Returns:
|
612
|
+
AbstractBlock: The Digital Alternating Layer Ansatz.
|
613
|
+
"""
|
614
|
+
operations = config.strategy_args.get("operation", [RX, RY, RX])
|
615
|
+
entangler = config.strategy_args.get("entangler", CNOT)
|
616
|
+
|
617
|
+
return ala_digital(
|
618
|
+
n_qubits=num_qubits,
|
619
|
+
m_block_qubits=config.m_block_qubits, # type: ignore[arg-type]
|
620
|
+
param_prefix=config.param_prefix,
|
621
|
+
operations=operations,
|
622
|
+
entangler=entangler,
|
623
|
+
)
|
624
|
+
|
625
|
+
|
626
|
+
def _create_ala(
|
627
|
+
num_qubits: int,
|
628
|
+
config: AnsatzConfig,
|
629
|
+
) -> AbstractBlock:
|
630
|
+
"""
|
631
|
+
Create the Alternating Layer Ansatz based on the configuration.
|
632
|
+
|
633
|
+
Args:
|
634
|
+
num_qubits (int): The number of qubits.
|
635
|
+
config (AnsatzConfig): The configuration for the ansatz.
|
636
|
+
|
637
|
+
Returns:
|
638
|
+
AbstractBlock: The Alternating Layer Ansatz.
|
639
|
+
|
640
|
+
Raises:
|
641
|
+
ValueError: If the ansatz strategy is not `Strategy.DIGITAL`.
|
642
|
+
"""
|
643
|
+
if config.ansatz_strategy == Strategy.DIGITAL:
|
644
|
+
return _create_ala_digital(num_qubits=num_qubits, config=config)
|
645
|
+
else:
|
646
|
+
raise ValueError(
|
647
|
+
f"Invalid ansatz strategy {config.ansatz_strategy} provided. Only `Strategy.DIGITAL` \
|
648
|
+
allowed"
|
649
|
+
)
|
650
|
+
|
651
|
+
|
599
652
|
def create_ansatz(
|
600
653
|
register: int | Register,
|
601
654
|
config: AnsatzConfig,
|
@@ -619,6 +672,8 @@ def create_ansatz(
|
|
619
672
|
return _create_iia(num_qubits=num_qubits, config=config)
|
620
673
|
elif config.ansatz_type == AnsatzType.HEA:
|
621
674
|
return _create_hea(register=register, config=config)
|
675
|
+
elif config.ansatz_type == AnsatzType.ALA:
|
676
|
+
return _create_ala(num_qubits=num_qubits, config=config)
|
622
677
|
else:
|
623
678
|
raise NotImplementedError(
|
624
679
|
f"Ansatz of type {config.ansatz_type} not implemented yet. Only `AnsatzType.HEA` and\
|
@@ -0,0 +1,339 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import functools
|
4
|
+
from logging import getLogger
|
5
|
+
from math import log, sqrt
|
6
|
+
from statistics import NormalDist
|
7
|
+
from typing import Any, Callable
|
8
|
+
|
9
|
+
import torch
|
10
|
+
from torch import nn
|
11
|
+
from torch.func import functional_call # type: ignore
|
12
|
+
|
13
|
+
logger = getLogger("ml_tools")
|
14
|
+
|
15
|
+
|
16
|
+
class InformationContent:
|
17
|
+
def __init__(
|
18
|
+
self,
|
19
|
+
model: nn.Module,
|
20
|
+
loss_fn: Callable,
|
21
|
+
xs: Any,
|
22
|
+
epsilons: torch.Tensor,
|
23
|
+
variation_multiple: int = 20,
|
24
|
+
) -> None:
|
25
|
+
"""Information Landscape class.
|
26
|
+
|
27
|
+
This class handles the study of loss landscape from information theoretic
|
28
|
+
perspective and provides methods to get bounds on the norm of the
|
29
|
+
gradient from the Information Content of the loss landscape.
|
30
|
+
|
31
|
+
Args:
|
32
|
+
model: The quantum or classical model to analyze.
|
33
|
+
loss_fn: Loss function that takes model output and calculates loss
|
34
|
+
xs: Input data to evaluate the model on
|
35
|
+
epsilons: The thresholds to use for discretization of the finite derivatives
|
36
|
+
variation_multiple: The number of sets of variational parameters to generate per each
|
37
|
+
variational parameter. The number of variational parameters required for the
|
38
|
+
statistical analysis scales linearly with the amount of them present in the
|
39
|
+
model. This is that linear factor.
|
40
|
+
|
41
|
+
Notes:
|
42
|
+
This class provides flexibility in terms of what the model, the loss function,
|
43
|
+
and the xs are. The only requirement is that the loss_fn takes the model and xs as
|
44
|
+
arguments and returns the loss, and another dictionary of other metrics.
|
45
|
+
|
46
|
+
Thus, assumed structure:
|
47
|
+
loss_fn(model, xs) -> (loss, metrics, ...)
|
48
|
+
|
49
|
+
Example: A Classifier
|
50
|
+
```python
|
51
|
+
model = nn.Linear(10, 1)
|
52
|
+
|
53
|
+
def loss_fn(
|
54
|
+
model: nn.Module,
|
55
|
+
xs: tuple[torch.Tensor, torch.Tensor]
|
56
|
+
) -> tuple[torch.Tensor, dict[str, float]:
|
57
|
+
criterion = nn.MSELoss()
|
58
|
+
inputs, labels = xs
|
59
|
+
outputs = model(inputs)
|
60
|
+
loss = criterion(outputs, labels)
|
61
|
+
metrics = {"loss": loss.item()}
|
62
|
+
return loss, metrics
|
63
|
+
|
64
|
+
xs = (torch.randn(10, 10), torch.randn(10, 1))
|
65
|
+
|
66
|
+
info_landscape = InfoLandscape(model, loss_fn, xs)
|
67
|
+
```
|
68
|
+
In this example, the model is a linear classifier, and the `xs` include both the
|
69
|
+
inputs and the target labels. The logic for calculation of the loss from this lies
|
70
|
+
entirely within the `loss_fn` function. This can then further be used to obtain the
|
71
|
+
bounds on the average norm of the gradient of the loss function.
|
72
|
+
|
73
|
+
Example: A Physics Informed Neural Network
|
74
|
+
```python
|
75
|
+
class PhysicsInformedNN(nn.Module):
|
76
|
+
// <Initialization Logic>
|
77
|
+
|
78
|
+
def forward(self, xs: dict[str, torch.Tensor]):
|
79
|
+
return {
|
80
|
+
"pde_residual": pde_residual(xs["pde"]),
|
81
|
+
"boundary_condition": bc_term(xs["bc"]),
|
82
|
+
}
|
83
|
+
|
84
|
+
def loss_fn(
|
85
|
+
model: PhysicsInformedNN,
|
86
|
+
xs: dict[str, torch.Tensor]
|
87
|
+
) -> tuple[torch.Tensor, dict[str, float]:
|
88
|
+
pde_residual, bc_term = model(xs)
|
89
|
+
loss = torch.mean(torch.sum(pde_residual**2, dim=1), dim=0)
|
90
|
+
+ torch.mean(torch.sum(bc_term**2, dim=1), dim=0)
|
91
|
+
|
92
|
+
return loss, {"pde_residual": pde_residual, "bc_term": bc_term}
|
93
|
+
|
94
|
+
xs = {
|
95
|
+
"pde": torch.linspace(0, 1, 10),
|
96
|
+
"bc": torch.tensor([0.0]),
|
97
|
+
}
|
98
|
+
|
99
|
+
info_landscape = InfoLandscape(model, loss_fn, xs)
|
100
|
+
```
|
101
|
+
|
102
|
+
In this example, the model is a Physics Informed Neural Network, and the `xs`
|
103
|
+
are the inputs to the different residual components of the model. The logic
|
104
|
+
for calculation of the residuals lies within the PhysicsInformedNN class, and
|
105
|
+
the loss function is defined to calculate the loss that is to be optimized
|
106
|
+
from these residuals. This can then further be used to obtain the
|
107
|
+
bounds on the average norm of the gradient of the loss function.
|
108
|
+
|
109
|
+
The first value that the `loss_fn` returns is the loss value that is being optimized.
|
110
|
+
The function is also expected to return other value(s), often the metrics that are
|
111
|
+
used to calculate the loss. These values are ignored for the purpose of this class.
|
112
|
+
"""
|
113
|
+
self.model = model
|
114
|
+
self.loss_fn = loss_fn
|
115
|
+
self.xs = xs
|
116
|
+
self.epsilons = epsilons
|
117
|
+
self.device = next(model.parameters()).device
|
118
|
+
|
119
|
+
self.param_shapes = {}
|
120
|
+
self.total_params = 0
|
121
|
+
|
122
|
+
for name, param in model.named_parameters():
|
123
|
+
self.param_shapes[name] = param.shape
|
124
|
+
self.total_params += param.numel()
|
125
|
+
self.n_variations = variation_multiple * self.total_params
|
126
|
+
self.all_variations = torch.empty(
|
127
|
+
(self.n_variations, self.total_params), device=self.device
|
128
|
+
).uniform_(0, 2 * torch.pi)
|
129
|
+
|
130
|
+
def reshape_param_variations(self) -> dict[str, torch.Tensor]:
|
131
|
+
"""Reshape variations of the model's variational parameters.
|
132
|
+
|
133
|
+
Returns:
|
134
|
+
Dictionary of parameter tensors, each with shape [n_variations, *param_shape]
|
135
|
+
"""
|
136
|
+
param_variations = {}
|
137
|
+
start_idx = 0
|
138
|
+
|
139
|
+
for name, shape in self.param_shapes.items():
|
140
|
+
param_size = torch.prod(torch.tensor(shape)).item()
|
141
|
+
param_variations[name] = self.all_variations[
|
142
|
+
:, start_idx : start_idx + param_size
|
143
|
+
].view(self.n_variations, *shape)
|
144
|
+
start_idx += param_size
|
145
|
+
|
146
|
+
return param_variations
|
147
|
+
|
148
|
+
def batched_loss(self) -> torch.Tensor:
|
149
|
+
"""Calculate loss for all parameter variations in a batched manner.
|
150
|
+
|
151
|
+
Returns: Tensor of loss values for each parameter variation
|
152
|
+
"""
|
153
|
+
param_variations = self.reshape_param_variations()
|
154
|
+
losses = torch.zeros(self.n_variations, device=self.device)
|
155
|
+
|
156
|
+
for i in range(self.n_variations):
|
157
|
+
params = {name: param[i] for name, param in param_variations.items()}
|
158
|
+
current_model = lambda x: functional_call(self.model, params, (x,))
|
159
|
+
losses[i] = self.loss_fn(current_model, self.xs)[0]
|
160
|
+
|
161
|
+
return losses
|
162
|
+
|
163
|
+
def randomized_finite_der(self) -> torch.Tensor:
|
164
|
+
"""
|
165
|
+
Calculate normalized finite difference of loss on doing random walk in the parameter space.
|
166
|
+
|
167
|
+
This serves as a proxy for the derivative of the loss with respect to parameters.
|
168
|
+
|
169
|
+
Returns:
|
170
|
+
Tensor containing normalized finite differences (approximate directional derivatives)
|
171
|
+
between consecutive points in the random walk. Shape: [n_variations - 1]
|
172
|
+
"""
|
173
|
+
losses = self.batched_loss()
|
174
|
+
|
175
|
+
return (losses[1:] - losses[:-1]) / (
|
176
|
+
torch.norm(self.all_variations[1:] - self.all_variations[:-1], dim=1) + 1e-8
|
177
|
+
)
|
178
|
+
|
179
|
+
def discretize_derivatives(self) -> torch.Tensor:
|
180
|
+
"""
|
181
|
+
Convert finite derivatives into discrete values.
|
182
|
+
|
183
|
+
Returns:
|
184
|
+
Tensor containing discretized derivatives with shape [n_epsilons, n_variations-2]
|
185
|
+
Each row contains {-1, 0, 1} values for that epsilon
|
186
|
+
"""
|
187
|
+
derivatives = self.randomized_finite_der()
|
188
|
+
|
189
|
+
derivatives = derivatives.unsqueeze(0)
|
190
|
+
epsilons = self.epsilons.unsqueeze(1)
|
191
|
+
|
192
|
+
discretized = torch.zeros((len(epsilons), len(derivatives[0])), device=self.device)
|
193
|
+
discretized[derivatives > epsilons] = 1
|
194
|
+
discretized[derivatives < -epsilons] = -1
|
195
|
+
|
196
|
+
return discretized
|
197
|
+
|
198
|
+
def calculate_transition_probabilities_batch(self) -> torch.Tensor:
|
199
|
+
"""
|
200
|
+
Calculate transition probabilities for multiple epsilon values.
|
201
|
+
|
202
|
+
Returns:
|
203
|
+
Tensor of shape [n_epsilons, 6] containing probabilities for each transition type
|
204
|
+
Columns order: [+1to0, +1to-1, 0to+1, 0to-1, -1to0, -1to+1]
|
205
|
+
"""
|
206
|
+
discretized = self.discretize_derivatives()
|
207
|
+
|
208
|
+
current = discretized[:, :-1]
|
209
|
+
next_val = discretized[:, 1:]
|
210
|
+
|
211
|
+
transitions = torch.stack(
|
212
|
+
[
|
213
|
+
((current == 1) & (next_val == 0)).sum(dim=1),
|
214
|
+
((current == 1) & (next_val == -1)).sum(dim=1),
|
215
|
+
((current == 0) & (next_val == 1)).sum(dim=1),
|
216
|
+
((current == 0) & (next_val == -1)).sum(dim=1),
|
217
|
+
((current == -1) & (next_val == 0)).sum(dim=1),
|
218
|
+
((current == -1) & (next_val == 1)).sum(dim=1),
|
219
|
+
],
|
220
|
+
dim=1,
|
221
|
+
).float()
|
222
|
+
|
223
|
+
total_transitions = current.size(1)
|
224
|
+
probabilities = transitions / total_transitions
|
225
|
+
|
226
|
+
return probabilities
|
227
|
+
|
228
|
+
@functools.cached_property
|
229
|
+
def calculate_IC(self) -> torch.Tensor:
|
230
|
+
"""
|
231
|
+
Calculate Information Content for multiple epsilon values.
|
232
|
+
|
233
|
+
Returns: Tensor of IC values for each epsilon [n_epsilons]
|
234
|
+
"""
|
235
|
+
probs = self.calculate_transition_probabilities_batch()
|
236
|
+
|
237
|
+
mask = probs > 1e-4
|
238
|
+
|
239
|
+
ic_terms = torch.where(mask, -probs * torch.log(probs), torch.zeros_like(probs))
|
240
|
+
ic_values = ic_terms.sum(dim=1) / torch.log(torch.tensor(6.0))
|
241
|
+
|
242
|
+
return ic_values
|
243
|
+
|
244
|
+
def max_IC(self) -> tuple[float, float]:
|
245
|
+
"""
|
246
|
+
Get the maximum Information Content and its corresponding epsilon.
|
247
|
+
|
248
|
+
Returns: Tuple of (maximum IC value, optimal epsilon)
|
249
|
+
"""
|
250
|
+
max_ic, max_idx = torch.max(self.calculate_IC, dim=0)
|
251
|
+
max_epsilon = self.epsilons[max_idx]
|
252
|
+
return max_ic.item(), max_epsilon.item()
|
253
|
+
|
254
|
+
def sensitivity_IC(self, eta: float) -> float:
|
255
|
+
"""
|
256
|
+
Find the minimum value of epsilon such that the information content is less than eta.
|
257
|
+
|
258
|
+
Args:
|
259
|
+
eta: Threshold value, the sensitivity IC.
|
260
|
+
|
261
|
+
Returns: The epsilon value that gives IC that is less than the sensitivity IC.
|
262
|
+
"""
|
263
|
+
ic_values = self.calculate_IC
|
264
|
+
mask = ic_values < eta
|
265
|
+
epsilons = self.epsilons[mask]
|
266
|
+
return float(epsilons.min().item())
|
267
|
+
|
268
|
+
@staticmethod
|
269
|
+
@functools.lru_cache
|
270
|
+
def q_value(H_value: float) -> float:
|
271
|
+
"""
|
272
|
+
Compute the q value.
|
273
|
+
|
274
|
+
q is the solution to the equation:
|
275
|
+
H(x) = 4h(x) + 2h(1/2 - 2x)
|
276
|
+
|
277
|
+
It is the value of the probability of 4 of the 6 transitions such that
|
278
|
+
the IC is the same as the IC of our system.
|
279
|
+
|
280
|
+
This quantity is useful in calculating the bounds on the norms of the gradients.
|
281
|
+
|
282
|
+
Args:
|
283
|
+
H_value (float): The information content.
|
284
|
+
|
285
|
+
Returns:
|
286
|
+
float: The q value
|
287
|
+
"""
|
288
|
+
|
289
|
+
x = torch.linspace(0.001, 0.16667, 10000)
|
290
|
+
|
291
|
+
H = -4 * x * torch.log(x) / torch.log(torch.tensor(6)) - 2 * (0.5 - 2 * x) * torch.log(
|
292
|
+
0.5 - 2 * x
|
293
|
+
) / torch.log(torch.tensor(6))
|
294
|
+
err = torch.abs(H - H_value)
|
295
|
+
idx = torch.argmin(err)
|
296
|
+
return float(x[idx].item())
|
297
|
+
|
298
|
+
def get_grad_norm_bounds_max_IC(self) -> tuple[float, float]:
|
299
|
+
"""
|
300
|
+
Compute the bounds on the average norm of the gradient.
|
301
|
+
|
302
|
+
Returns:
|
303
|
+
tuple[Tensor, Tensor]: The lower and upper bounds.
|
304
|
+
"""
|
305
|
+
max_IC, epsilon_m = self.max_IC()
|
306
|
+
lower_bound = (
|
307
|
+
epsilon_m
|
308
|
+
* sqrt(self.total_params)
|
309
|
+
/ (NormalDist().inv_cdf(1 - 2 * self.q_value(max_IC)))
|
310
|
+
)
|
311
|
+
upper_bound = (
|
312
|
+
epsilon_m
|
313
|
+
* sqrt(self.total_params)
|
314
|
+
/ (NormalDist().inv_cdf(0.5 * (1 + 2 * self.q_value(max_IC))))
|
315
|
+
)
|
316
|
+
|
317
|
+
if max_IC < log(2, 6):
|
318
|
+
logger.warning(
|
319
|
+
"Warning: The maximum IC is less than the required value. The bounds may be"
|
320
|
+
+ " inaccurate."
|
321
|
+
)
|
322
|
+
|
323
|
+
return lower_bound, upper_bound
|
324
|
+
|
325
|
+
def get_grad_norm_bounds_sensitivity_IC(self, eta: float) -> float:
|
326
|
+
"""
|
327
|
+
Compute the bounds on the average norm of the gradient.
|
328
|
+
|
329
|
+
Args:
|
330
|
+
eta (float): The sensitivity IC.
|
331
|
+
|
332
|
+
Returns:
|
333
|
+
Tensor: The lower bound.
|
334
|
+
"""
|
335
|
+
epsilon_sensitivity = self.sensitivity_IC(eta)
|
336
|
+
upper_bound = (
|
337
|
+
epsilon_sensitivity * sqrt(self.total_params) / (NormalDist().inv_cdf(1 - 3 * eta / 2))
|
338
|
+
)
|
339
|
+
return upper_bound
|
qadence/ml_tools/trainer.py
CHANGED
@@ -14,7 +14,8 @@ from torch import dtype as torch_dtype
|
|
14
14
|
from torch.utils.data import DataLoader
|
15
15
|
|
16
16
|
from qadence.ml_tools.config import TrainConfig
|
17
|
-
from qadence.ml_tools.data import DictDataLoader, OptimizeResult
|
17
|
+
from qadence.ml_tools.data import DictDataLoader, OptimizeResult, data_to_device
|
18
|
+
from qadence.ml_tools.information import InformationContent
|
18
19
|
from qadence.ml_tools.optimize_step import optimize_step, update_ng_parameters
|
19
20
|
from qadence.ml_tools.stages import TrainingStage
|
20
21
|
|
@@ -711,3 +712,107 @@ class Trainer(BaseTrainer):
|
|
711
712
|
self.opt_result = OptimizeResult(
|
712
713
|
self.current_epoch, self.model_old, self.optimizer_old, loss, metrics
|
713
714
|
)
|
715
|
+
|
716
|
+
def get_ic_grad_bounds(
|
717
|
+
self,
|
718
|
+
eta: float,
|
719
|
+
epsilons: torch.Tensor,
|
720
|
+
variation_multiple: int = 20,
|
721
|
+
dataloader: DataLoader | DictDataLoader | None = None,
|
722
|
+
) -> tuple[float, float, float]:
|
723
|
+
"""
|
724
|
+
Calculate the bounds on the gradient norm of the loss using Information Content.
|
725
|
+
|
726
|
+
Args:
|
727
|
+
eta (float): The sensitivity IC.
|
728
|
+
epsilons (torch.Tensor): The epsilons to use for thresholds to for discretization of the
|
729
|
+
finite derivatives.
|
730
|
+
variation_multiple (int): The number of sets of variational parameters to generate per
|
731
|
+
each variational parameter. The number of variational parameters required for the
|
732
|
+
statisctiacal analysis scales linearly with the amount of them present in the
|
733
|
+
model. This is that linear factor.
|
734
|
+
dataloader (DataLoader | DictDataLoader | None): The dataloader for training data. A
|
735
|
+
new dataloader can be provided, or the dataloader provided in the trinaer will be
|
736
|
+
used. In case no dataloaders are provided at either places, it assumes that the
|
737
|
+
model does not require any input data.
|
738
|
+
|
739
|
+
Returns:
|
740
|
+
tuple[float, float, float]: The max IC lower bound, max IC upper bound, and sensitivity
|
741
|
+
IC upper bound.
|
742
|
+
|
743
|
+
Examples:
|
744
|
+
```python
|
745
|
+
import torch
|
746
|
+
from torch.optim.adam import Adam
|
747
|
+
|
748
|
+
from qadence.constructors import ObservableConfig
|
749
|
+
from qadence.ml_tools.config import AnsatzConfig, FeatureMapConfig, TrainConfig
|
750
|
+
from qadence.ml_tools.data import to_dataloader
|
751
|
+
from qadence.ml_tools.models import QNN
|
752
|
+
from qadence.ml_tools.optimize_step import optimize_step
|
753
|
+
from qadence.ml_tools.trainer import Trainer
|
754
|
+
from qadence.operations.primitive import Z
|
755
|
+
|
756
|
+
fm_config = FeatureMapConfig(num_features=1)
|
757
|
+
ansatz_config = AnsatzConfig(depth=4)
|
758
|
+
obs_config = ObservableConfig(detuning=Z)
|
759
|
+
|
760
|
+
qnn = QNN.from_configs(
|
761
|
+
register=4,
|
762
|
+
obs_config=obs_config,
|
763
|
+
fm_config=fm_config,
|
764
|
+
ansatz_config=ansatz_config,
|
765
|
+
)
|
766
|
+
|
767
|
+
optimizer = Adam(qnn.parameters(), lr=0.001)
|
768
|
+
|
769
|
+
batch_size = 25
|
770
|
+
x = torch.linspace(0, 1, 32).reshape(-1, 1)
|
771
|
+
y = torch.sin(x)
|
772
|
+
train_loader = to_dataloader(x, y, batch_size=batch_size, infinite=True)
|
773
|
+
|
774
|
+
train_config = TrainConfig(max_iter=100)
|
775
|
+
|
776
|
+
trainer = Trainer(
|
777
|
+
model=qnn,
|
778
|
+
optimizer=optimizer,
|
779
|
+
config=train_config,
|
780
|
+
loss_fn="mse",
|
781
|
+
train_dataloader=train_loader,
|
782
|
+
optimize_step=optimize_step,
|
783
|
+
)
|
784
|
+
|
785
|
+
# Perform exploratory landscape analysis with Information Content
|
786
|
+
ic_sensitivity_threshold = 1e-4
|
787
|
+
epsilons = torch.logspace(-2, 2, 10)
|
788
|
+
|
789
|
+
max_ic_lower_bound, max_ic_upper_bound, sensitivity_ic_upper_bound = (
|
790
|
+
trainer.get_ic_grad_bounds(
|
791
|
+
eta=ic_sensitivity_threshold,
|
792
|
+
epsilons=epsilons,
|
793
|
+
)
|
794
|
+
)
|
795
|
+
|
796
|
+
# Resume training as usual...
|
797
|
+
|
798
|
+
trainer.fit(train_loader)
|
799
|
+
```
|
800
|
+
"""
|
801
|
+
if not self._use_grad:
|
802
|
+
logger.warning(
|
803
|
+
"Gradient norm bounds are only relevant when using a gradient based optimizer. \
|
804
|
+
Currently the trainer is set to use a gradient-free optimizer."
|
805
|
+
)
|
806
|
+
|
807
|
+
dataloader = dataloader if dataloader is not None else self.train_dataloader
|
808
|
+
|
809
|
+
batch = next(iter(self._batch_iter(dataloader, num_batches=1)))
|
810
|
+
|
811
|
+
xs = data_to_device(batch, device=self.device, dtype=self.data_dtype)
|
812
|
+
|
813
|
+
ic = InformationContent(self.model, self.loss_fn, xs, epsilons)
|
814
|
+
|
815
|
+
max_ic_lower_bound, max_ic_upper_bound = ic.get_grad_norm_bounds_max_IC()
|
816
|
+
sensitivity_ic_upper_bound = ic.get_grad_norm_bounds_sensitivity_IC(eta)
|
817
|
+
|
818
|
+
return max_ic_lower_bound, max_ic_upper_bound, sensitivity_ic_upper_bound
|
qadence/operations/primitive.py
CHANGED
@@ -376,10 +376,6 @@ class SWAP(PrimitiveBlock):
|
|
376
376
|
def eigenvalues(self) -> Tensor:
|
377
377
|
return torch.tensor([-1, 1, 1, 1], dtype=cdouble)
|
378
378
|
|
379
|
-
@property
|
380
|
-
def n_qubits(self) -> int:
|
381
|
-
return 2
|
382
|
-
|
383
379
|
@property
|
384
380
|
def _block_title(self) -> str:
|
385
381
|
c, t = self.qubit_support
|
qadence/states.py
CHANGED
@@ -40,6 +40,7 @@ __all__ = [
|
|
40
40
|
"equivalent_state",
|
41
41
|
"DensityMatrix",
|
42
42
|
"density_mat",
|
43
|
+
"overlap",
|
43
44
|
]
|
44
45
|
|
45
46
|
ATOL_64 = 1e-14 # 64 bit precision
|
@@ -559,11 +560,32 @@ def rand_bitstring(N: int) -> str:
|
|
559
560
|
return "".join(str(random.randint(0, 1)) for _ in range(N))
|
560
561
|
|
561
562
|
|
563
|
+
def overlap(s0: torch.Tensor, s1: torch.Tensor) -> torch.Tensor:
|
564
|
+
"""
|
565
|
+
Computes the exact overlap between two statevectors.
|
566
|
+
|
567
|
+
Arguments:
|
568
|
+
s0 (torch.Tensor): A statevector or batch of statevectors.
|
569
|
+
s1 (torch.Tensor): A statevector or batch of statevectors.
|
570
|
+
|
571
|
+
Returns:
|
572
|
+
A torch.Tensor with the result.
|
573
|
+
|
574
|
+
Examples:
|
575
|
+
```python exec="on" source="material-block" result="json"
|
576
|
+
from qadence.states import rand_bitstring
|
577
|
+
|
578
|
+
print(rand_bitstring(N=8))
|
579
|
+
```
|
580
|
+
"""
|
581
|
+
from qadence.overlap import overlap_exact
|
582
|
+
|
583
|
+
return overlap_exact(s0, s1)
|
584
|
+
|
585
|
+
|
562
586
|
def equivalent_state(
|
563
587
|
s0: torch.Tensor, s1: torch.Tensor, rtol: float = 0.0, atol: float = NORMALIZATION_ATOL
|
564
588
|
) -> bool:
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
expected = torch.ones_like(fid)
|
569
|
-
return torch.allclose(fid, expected, rtol=rtol, atol=atol) # type: ignore[no-any-return]
|
589
|
+
fidelity = overlap(s0, s1)
|
590
|
+
expected = torch.ones_like(fidelity)
|
591
|
+
return torch.allclose(fidelity, expected, rtol=rtol, atol=atol) # type: ignore[no-any-return]
|
qadence/types.py
CHANGED
qadence/utils.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: qadence
|
3
|
-
Version: 1.10.
|
3
|
+
Version: 1.10.3
|
4
4
|
Summary: Pasqal interface for circuit-based quantum computing SDKs
|
5
|
-
Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>, Pim Venderbosch <pim.venderbosch@pasqal.com>
|
5
|
+
Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>, Pim Venderbosch <pim.venderbosch@pasqal.com>, Manu Lahariya <manu.lahariya@pasqal.com>
|
6
6
|
License: Apache 2.0
|
7
7
|
License-File: LICENSE
|
8
8
|
Classifier: License :: OSI Approved :: Apache Software License
|
@@ -43,7 +43,7 @@ Requires-Dist: nvidia-pyindex; extra == 'dlprof'
|
|
43
43
|
Provides-Extra: horqrux
|
44
44
|
Requires-Dist: einops; extra == 'horqrux'
|
45
45
|
Requires-Dist: flax; extra == 'horqrux'
|
46
|
-
Requires-Dist: horqrux==0.
|
46
|
+
Requires-Dist: horqrux==0.7.0; extra == 'horqrux'
|
47
47
|
Requires-Dist: jax; extra == 'horqrux'
|
48
48
|
Requires-Dist: jaxopt; extra == 'horqrux'
|
49
49
|
Requires-Dist: optax; extra == 'horqrux'
|
@@ -56,8 +56,8 @@ Provides-Extra: protocols
|
|
56
56
|
Requires-Dist: qadence-protocols; extra == 'protocols'
|
57
57
|
Provides-Extra: pulser
|
58
58
|
Requires-Dist: pasqal-cloud==0.12.7; extra == 'pulser'
|
59
|
-
Requires-Dist: pulser-core==1.2.
|
60
|
-
Requires-Dist: pulser-simulation==1.2.
|
59
|
+
Requires-Dist: pulser-core==1.2.2; extra == 'pulser'
|
60
|
+
Requires-Dist: pulser-simulation==1.2.2; extra == 'pulser'
|
61
61
|
Provides-Extra: visualization
|
62
62
|
Requires-Dist: graphviz; extra == 'visualization'
|
63
63
|
Description-Content-Type: text/markdown
|
@@ -75,7 +75,10 @@ programs** with tunable qubit interactions and arbitrary register topologies rea
|
|
75
75
|
|
76
76
|
**For a high-level overview of Qadence features, [check out our white paper](https://arxiv.org/abs/2401.09915).**
|
77
77
|
|
78
|
-
**For more detailed information, [check out the documentation](https://pasqal-io.github.io/qadence/latest/)
|
78
|
+
**For more detailed information, [check out the documentation](https://pasqal-io.github.io/qadence/latest/).
|
79
|
+
|
80
|
+
**For any questions or comments, [feel free to start a discussion](https://github.com/pasqal-io/qadence/discussions).
|
81
|
+
**
|
79
82
|
|
80
83
|
[](https://github.com/pasqal-io/qadence/actions/workflows/lint.yml)
|
81
84
|
[](https://github.com/pasqal-io/qadence/actions/workflows/test_fast.yml)
|
@@ -18,9 +18,9 @@ qadence/qubit_support.py,sha256=Nkn1Q01RVViTcggSIom7EFKdWpAuM4TMGwBZ5feCUxA,2120
|
|
18
18
|
qadence/register.py,sha256=mwmvS6PcTY0F9cIhTUXG3NT73FIagfMCwVqYa4DrQrk,13001
|
19
19
|
qadence/serial_expr_grammar.peg,sha256=z5ytL7do9kO8o4h-V5GrsDuLdso0KsRcMuIYURFfmAY,328
|
20
20
|
qadence/serialization.py,sha256=qEET6Gu9u2aSibPve3bJrqDzK2_gO3RPDJjt4ZY8GbE,15596
|
21
|
-
qadence/states.py,sha256=
|
22
|
-
qadence/types.py,sha256=
|
23
|
-
qadence/utils.py,sha256=
|
21
|
+
qadence/states.py,sha256=Aj28aNHGWkZrFw_mKpHrxCA1bDXlkFhw18D70tg0RF0,15953
|
22
|
+
qadence/types.py,sha256=fFjSG9JlPqxFizIK4PfjNZ4e13-yGckejq-fP5TEMdc,12051
|
23
|
+
qadence/utils.py,sha256=U670ftNhxkD2hejvV1mTM2YnhDgTms_39saZs-7GKl8,9777
|
24
24
|
qadence/analog/__init__.py,sha256=BCyS9R4KUjzUXN0Ax3b0eMo8ZAuSkGoJQVtZ4_pvAFs,279
|
25
25
|
qadence/analog/addressing.py,sha256=GSt4heEmRkBmoQIgdgkTclEFxZY-jjuAd77_SsZtGdI,6513
|
26
26
|
qadence/analog/constants.py,sha256=B2phQoN1ASL8CwM-Dsa1rbraYwGwwPSeiB3HbVe-MPA,1243
|
@@ -35,7 +35,7 @@ qadence/backends/utils.py,sha256=SSiMxZjaFS8e8sB6ZBLXPKuJNQGl93pRMy9hnI4oDrw,910
|
|
35
35
|
qadence/backends/horqrux/__init__.py,sha256=0OdVy6cq0oQggV48LO1WXdaZuSkDkz7OYNEPIkNAmfk,140
|
36
36
|
qadence/backends/horqrux/backend.py,sha256=KNFFGN9dsgB9QKtNXiP3LyMY9DQ-7W7ScyE6k29fHJY,8842
|
37
37
|
qadence/backends/horqrux/config.py,sha256=xz7JlUcwW_4JAbvProbSI9hA1SXZRRAN0Hr2bvmLzfg,892
|
38
|
-
qadence/backends/horqrux/convert_ops.py,sha256=
|
38
|
+
qadence/backends/horqrux/convert_ops.py,sha256=lUQ3faf3Y4MvIzWzczHLVLs9f9iPij0Dy8JgSHx9ufo,8647
|
39
39
|
qadence/backends/pulser/__init__.py,sha256=capQ-eHqwtOeLf4mWsI0BIseAHhiLGie5cFD4-iVhUo,116
|
40
40
|
qadence/backends/pulser/backend.py,sha256=cI4IgijPpItNdDmLpKkJFas0X02wMiZd_XmVas41gEI,14846
|
41
41
|
qadence/backends/pulser/channels.py,sha256=ZF0yEXUFHAmi3IdeXjzdTNGR5NzaRRFTiUpUGVg2sO4,329
|
@@ -52,11 +52,11 @@ qadence/backends/pyqtorch/convert_ops.py,sha256=qG26-HmtUDaZO0KDnw2sbT3CRx_poS7e
|
|
52
52
|
qadence/blocks/__init__.py,sha256=H6jEA_CptkE-eoB4UfSbUiDszbxxhZwECV_TgoZWXoU,960
|
53
53
|
qadence/blocks/abstract.py,sha256=DSQUE71rMyRBwAP--4Tx1WQC_LCXaNlftjd7goGyrpQ,12027
|
54
54
|
qadence/blocks/analog.py,sha256=ymnnlSVoW1XL05ZvnnHCqRTHuOXIEY_7E9M0PNKJZy4,10812
|
55
|
-
qadence/blocks/block_to_tensor.py,sha256=
|
55
|
+
qadence/blocks/block_to_tensor.py,sha256=CG4KUeBhbfCr6o7JCN6CN-herDUXA8tcC4shi06uUmk,17338
|
56
56
|
qadence/blocks/composite.py,sha256=f9D8L3u5Ktu_-xDBWsWiPlY8I-YW5YFgU18BtqwFHK0,8937
|
57
57
|
qadence/blocks/embedding.py,sha256=MI-gTPEe1e56AiHJr6MJwMAHdA7ZYmTo0b0VmFfyISQ,7029
|
58
58
|
qadence/blocks/manipulate.py,sha256=kPmzej7mnWFoqTJA2CkGulT7hcPha0GGPARC8rjZltg,2387
|
59
|
-
qadence/blocks/matrix.py,sha256=
|
59
|
+
qadence/blocks/matrix.py,sha256=JgzFLWoWDytaE0MEYe-Di7tbwb4jSmMF8tsOF04RIRo,4214
|
60
60
|
qadence/blocks/primitive.py,sha256=GLruKpiFBStWVd_M9mzLr3MqDNPbyaMUzEVB6xV3cPQ,17657
|
61
61
|
qadence/blocks/utils.py,sha256=_V43qD7kQNK8JS3gxfpkRn56ZIF_GGrhAnARn1hq2hk,17772
|
62
62
|
qadence/constructors/__init__.py,sha256=kFAMJMZbEUQlNZBAJi2XOaPFMh-ynb2_A1lI85la4y0,1027
|
@@ -101,22 +101,24 @@ qadence/mitigations/__init__.py,sha256=RzaxYJftePFMloGhBVSixZ8fSe-ps_Jc-EyPm6xz-
|
|
101
101
|
qadence/mitigations/analog_zne.py,sha256=5n1ffjGM1I5sd9TATsB90pKdDh32UTFJ-ZyOHKdM5z0,7821
|
102
102
|
qadence/mitigations/protocols.py,sha256=0TeHvlGTN8_88XNEwrjA97C5BUlrh34wYmx0w6-5Tyw,1622
|
103
103
|
qadence/mitigations/readout.py,sha256=nI-voV5N0R7630Cn8t8x9EdV9iB76P0LDkRosy1s0Ec,6631
|
104
|
-
qadence/ml_tools/__init__.py,sha256=
|
105
|
-
qadence/ml_tools/config.py,sha256=
|
106
|
-
qadence/ml_tools/constructors.py,sha256=
|
104
|
+
qadence/ml_tools/__init__.py,sha256=AsZyk_i3EKR12m038o4cAdEfRje8RaCNW6CgmyZ9I94,980
|
105
|
+
qadence/ml_tools/config.py,sha256=r78n5tHDuMBPnItWP9FYaDPxneTEAtbUJb5yLZBs64A,20163
|
106
|
+
qadence/ml_tools/constructors.py,sha256=MT37r2OZ9uqlf0J7jBGNzMlnaZjfhF6rmll0sIWyaAg,29700
|
107
107
|
qadence/ml_tools/data.py,sha256=5sAqG9rUtGZPzFlzEDhMjSeOXF8Z0BmszJ_FRzYAy2A,5311
|
108
108
|
qadence/ml_tools/models.py,sha256=DKSVFNC-Iq0-AmBrCZ1kqUpTBHQh_pX_1MqYT8eCG08,17045
|
109
109
|
qadence/ml_tools/optimize_step.py,sha256=wUnxfWy0c9rEKe41-26On1bPFBwmSYBF4WCGn76oyq8,3376
|
110
110
|
qadence/ml_tools/parameters.py,sha256=gew2Kq_5-RgRpaTvs8eauVhgo0sTqqDQEV6WHFEiLGM,1301
|
111
111
|
qadence/ml_tools/stages.py,sha256=qW2phMIvQBLM3tn2UoGN-ePiBnZoNq5k844eHVnnn8Y,1407
|
112
112
|
qadence/ml_tools/tensors.py,sha256=xZ9ZRzOqEaMgLUGWQf1najDmL6iLuN1ojCGVFs1Tm94,1337
|
113
|
-
qadence/ml_tools/trainer.py,sha256=
|
113
|
+
qadence/ml_tools/trainer.py,sha256=XOwupRd6lWihNCZCoX3X6S8pzTBqyEvPaWGJ7HYnl98,31317
|
114
114
|
qadence/ml_tools/utils.py,sha256=PW8FyoV0mG_DtN1U8njTDV5qxZ0EK4mnFwMAsLBArfk,1410
|
115
115
|
qadence/ml_tools/callbacks/__init__.py,sha256=pTdfjulDGNKca--9BgrdmMyvJSah_0spp929Th6RzC8,913
|
116
116
|
qadence/ml_tools/callbacks/callback.py,sha256=XoqTS1uLOkbh4FtKpDSXbUA5_LzjOAoVMaa2jYcYB3w,28800
|
117
117
|
qadence/ml_tools/callbacks/callbackmanager.py,sha256=HwxgbqJi1GWYg2lgUqEyw9Y6a71YG_m5DmhpaeB6kLs,8007
|
118
118
|
qadence/ml_tools/callbacks/saveload.py,sha256=2z8v1A3qIIPZuusEcSNqgYTnKGKkDj71KvY_atJvKnM,6015
|
119
119
|
qadence/ml_tools/callbacks/writer_registry.py,sha256=_lPb4VvDHiiRNh2EaEKxOSslnJgBAImGw5SoVReg-Rs,15351
|
120
|
+
qadence/ml_tools/information/__init__.py,sha256=ShyaFJtSRmahI8dIRgDlfjp8XobJ23GTd7X3kU-5F34,88
|
121
|
+
qadence/ml_tools/information/information_content.py,sha256=Uv6e831Pi4udhnvEBBALqbkScuDfez-mGM5XTdZyo_w,12750
|
120
122
|
qadence/ml_tools/loss/__init__.py,sha256=d_0FlisdmgLY0qL1PeaabbcWX1B42RBdm7220cfzSN4,247
|
121
123
|
qadence/ml_tools/loss/loss.py,sha256=Bditg8nelMEpG4Yt0aopcAQz84xIc6O-AGUO2M0nqbM,2982
|
122
124
|
qadence/ml_tools/train_utils/__init__.py,sha256=1A2FlFg7kn68R1fdRC73S8DzA9gkBW7whdNHjzH5UTA,235
|
@@ -129,7 +131,7 @@ qadence/operations/analog.py,sha256=v11DSrg-XUbwIAWAWM43y3VQbYKsx2ynx-HimUoC-x0,
|
|
129
131
|
qadence/operations/control_ops.py,sha256=fPSwOxJaVtJNbwri1UdD20W1JXQlB-inPTCJG3Fk4hI,10187
|
130
132
|
qadence/operations/ham_evo.py,sha256=brJ11tlwj6UPYkUcnId-BKlzNStsZd0vp9FKHCFTjlM,10642
|
131
133
|
qadence/operations/parametric.py,sha256=kV5d-diaQAoRlqKqoo0CGCbPej6eAxHQXniqfFKff3g,5394
|
132
|
-
qadence/operations/primitive.py,sha256=
|
134
|
+
qadence/operations/primitive.py,sha256=hPJMDgWaEEdSYDZsr__hAcwy-QJEtzbM4qtFDcLmNBg,9881
|
133
135
|
qadence/transpile/__init__.py,sha256=JrrQ4Osc4nNRWWjRGmVn57fWc8WwF92MokhKLRZ1vVA,499
|
134
136
|
qadence/transpile/apply_fn.py,sha256=glZo2_wMOjw7_KgWKYbqg8j-9SDs-RefWIfxWgdQK8I,1336
|
135
137
|
qadence/transpile/block.py,sha256=jV-EyatrwwdL2ahjF3wyEhC3PKMBPLaL5sQN1VNFc_w,11582
|
@@ -139,7 +141,7 @@ qadence/transpile/flatten.py,sha256=k4HAfVzvDV40HyfaukiEHyJtAtvFRIcyDbAWiCL8tf0,
|
|
139
141
|
qadence/transpile/invert.py,sha256=IeyidgBwECCKB0i7Ym0KkLyfcx42LyT2mbqkfbK1H8M,4843
|
140
142
|
qadence/transpile/noise.py,sha256=LDcDJtQGkgUPkL2t69gg6AScTb-p3J3SxCDZbYOu1L8,1668
|
141
143
|
qadence/transpile/transpile.py,sha256=xnzkHA6Qdb-Y5Fv9Latrolrpw44N6_OKc7_QGt70f0I,2713
|
142
|
-
qadence-1.10.
|
143
|
-
qadence-1.10.
|
144
|
-
qadence-1.10.
|
145
|
-
qadence-1.10.
|
144
|
+
qadence-1.10.3.dist-info/METADATA,sha256=fytA26nk5vWG6gnLJZHXngs9fKcLuVpafBiLZH2yQCQ,10191
|
145
|
+
qadence-1.10.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
146
|
+
qadence-1.10.3.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
147
|
+
qadence-1.10.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|