qadence 1.6.0__py3-none-any.whl → 1.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qadence/backends/horqrux/backend.py +19 -31
- qadence/backends/pulser/backend.py +14 -3
- qadence/backends/pulser/convert_ops.py +20 -7
- qadence/backends/pyqtorch/convert_ops.py +40 -16
- qadence/blocks/block_to_tensor.py +3 -2
- qadence/blocks/primitive.py +2 -1
- qadence/measurements/shadow.py +3 -16
- qadence/ml_tools/config.py +11 -1
- qadence/ml_tools/printing.py +1 -3
- qadence/ml_tools/saveload.py +11 -3
- qadence/ml_tools/train_grad.py +36 -3
- qadence/operations/ham_evo.py +3 -5
- qadence/overlap.py +7 -12
- qadence/states.py +20 -5
- qadence/utils.py +40 -1
- {qadence-1.6.0.dist-info → qadence-1.6.1.dist-info}/METADATA +6 -8
- {qadence-1.6.0.dist-info → qadence-1.6.1.dist-info}/RECORD +19 -19
- {qadence-1.6.0.dist-info → qadence-1.6.1.dist-info}/WHEEL +0 -0
- {qadence-1.6.0.dist-info → qadence-1.6.1.dist-info}/licenses/LICENSE +0 -0
@@ -195,28 +195,6 @@ class Backend(BackendInterface):
|
|
195
195
|
if n_shots < 1:
|
196
196
|
raise ValueError("You can only call sample with n_shots>0.")
|
197
197
|
|
198
|
-
def _sample(
|
199
|
-
_probs: ArrayLike, n_shots: int, endianness: Endianness, n_qubits: int
|
200
|
-
) -> Counter:
|
201
|
-
_logits = jax.vmap(lambda _p: jnp.log(_p / (1 - _p)))(_probs)
|
202
|
-
|
203
|
-
def _smple(accumulator: ArrayLike, i: int) -> tuple[ArrayLike, None]:
|
204
|
-
accumulator = accumulator.at[i].set(
|
205
|
-
jax.random.categorical(jax.random.PRNGKey(i), _logits)
|
206
|
-
)
|
207
|
-
return accumulator, None
|
208
|
-
|
209
|
-
samples = jax.lax.scan(
|
210
|
-
_smple, jnp.empty_like(jnp.arange(n_shots)), jnp.arange(n_shots)
|
211
|
-
)[0]
|
212
|
-
return Counter(
|
213
|
-
{
|
214
|
-
int_to_basis(k=k, n_qubits=n_qubits, endianness=endianness): count.item()
|
215
|
-
for k, count in enumerate(jnp.bincount(samples))
|
216
|
-
if count > 0
|
217
|
-
}
|
218
|
-
)
|
219
|
-
|
220
198
|
wf = self.run(
|
221
199
|
circuit=circuit,
|
222
200
|
param_values=param_values,
|
@@ -225,16 +203,26 @@ class Backend(BackendInterface):
|
|
225
203
|
unhorqify_state=False,
|
226
204
|
)
|
227
205
|
probs = jnp.abs(jnp.float_power(wf, 2.0)).ravel()
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
)
|
235
|
-
|
206
|
+
key = jax.random.PRNGKey(0)
|
207
|
+
# JAX handles pseudo random number generation by tracking an explicit state via a random key
|
208
|
+
# For more details, see https://jax.readthedocs.io/en/latest/random-numbers.html
|
209
|
+
samples = jax.vmap(
|
210
|
+
lambda subkey: jax.random.choice(
|
211
|
+
key=subkey, a=jnp.arange(0, 2**circuit.abstract.n_qubits), p=probs
|
212
|
+
)
|
213
|
+
)(jax.random.split(key, n_shots))
|
236
214
|
|
237
|
-
return
|
215
|
+
return [
|
216
|
+
Counter(
|
217
|
+
{
|
218
|
+
int_to_basis(
|
219
|
+
k=k, n_qubits=circuit.abstract.n_qubits, endianness=endianness
|
220
|
+
): count.item()
|
221
|
+
for k, count in enumerate(jnp.bincount(samples))
|
222
|
+
if count > 0
|
223
|
+
}
|
224
|
+
)
|
225
|
+
]
|
238
226
|
|
239
227
|
def assign_parameters(self, circuit: ConvertedCircuit, param_values: ParamDictType) -> Any:
|
240
228
|
raise NotImplementedError
|
@@ -264,7 +264,10 @@ class Backend(BackendInterface):
|
|
264
264
|
if isinstance(noise_probs, Iterable):
|
265
265
|
noisy_batched_dms = []
|
266
266
|
for noise_prob in noise_probs:
|
267
|
-
|
267
|
+
noisy_sim = run_noisy_sim(noise_prob)
|
268
|
+
if not param_values:
|
269
|
+
noisy_sim = noisy_sim[0]
|
270
|
+
noisy_batched_dms.append(noisy_sim)
|
268
271
|
noisy_batched_dms = torch.stack(noisy_batched_dms)
|
269
272
|
else:
|
270
273
|
noisy_batched_dms = run_noisy_sim(noise_probs)
|
@@ -350,10 +353,18 @@ class Backend(BackendInterface):
|
|
350
353
|
# TODO: There should be a better check for batched density matrices.
|
351
354
|
if dms.size()[0] > 1:
|
352
355
|
res_list = [
|
353
|
-
[
|
356
|
+
[
|
357
|
+
obs.native(
|
358
|
+
dm.squeeze(), param_values, qubit_support=support, noise=noise
|
359
|
+
)
|
360
|
+
for dm in dms
|
361
|
+
]
|
354
362
|
for obs in observable
|
355
363
|
]
|
356
|
-
res = torch.stack(
|
364
|
+
res = torch.stack(
|
365
|
+
[torch.transpose(torch.stack(res), 0, -1) for res in res_list]
|
366
|
+
)
|
367
|
+
|
357
368
|
else:
|
358
369
|
res_list = [
|
359
370
|
obs.native(dms, param_values, qubit_support=support) for obs in observable
|
@@ -5,6 +5,7 @@ from typing import Sequence
|
|
5
5
|
import torch
|
6
6
|
from torch.nn import Module
|
7
7
|
|
8
|
+
from qadence import Noise
|
8
9
|
from qadence.blocks import (
|
9
10
|
AbstractBlock,
|
10
11
|
)
|
@@ -26,17 +27,29 @@ class PulserObservable(Module):
|
|
26
27
|
def __init__(self, block: AbstractBlock, n_qubits: int | None):
|
27
28
|
super().__init__()
|
28
29
|
self.block = block
|
29
|
-
self.n_qubits = n_qubits
|
30
|
+
self.n_qubits = n_qubits if n_qubits else max(block.qubit_support) + 1
|
31
|
+
|
32
|
+
if not self.block.is_parametric:
|
33
|
+
block_mat = block_to_tensor(
|
34
|
+
self.block, {}, qubit_support=tuple(i for i in range(self.n_qubits))
|
35
|
+
).squeeze(0)
|
36
|
+
self.register_buffer("block_mat", block_mat)
|
30
37
|
|
31
38
|
def forward(
|
32
39
|
self,
|
33
40
|
state: torch.Tensor,
|
34
|
-
values: dict[str, torch.Tensor]
|
41
|
+
values: dict[str, torch.Tensor] = dict(),
|
35
42
|
qubit_support: tuple | None = None,
|
43
|
+
noise: Noise | None = None,
|
36
44
|
endianness: Endianness = Endianness.BIG,
|
37
45
|
) -> torch.Tensor:
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
46
|
+
if not self.block.is_parametric:
|
47
|
+
block_mat = self.block_mat
|
48
|
+
else:
|
49
|
+
block_mat = block_to_tensor(
|
50
|
+
self.block, values, qubit_support=qubit_support, endianness=endianness # type: ignore [arg-type] # noqa
|
51
|
+
).squeeze(0)
|
52
|
+
if noise is None: # Compute expectations for state vector.
|
53
|
+
return torch.sum(torch.matmul(state, block_mat) * state.conj(), dim=1)
|
54
|
+
else: # Compute expectations for density matrices.
|
55
|
+
return torch.trace(torch.matmul(block_mat, state))
|
@@ -4,7 +4,7 @@ from functools import reduce
|
|
4
4
|
from itertools import chain as flatten
|
5
5
|
from math import prod
|
6
6
|
from operator import add
|
7
|
-
from typing import Any, Sequence, Tuple
|
7
|
+
from typing import Any, Iterable, Sequence, Tuple
|
8
8
|
|
9
9
|
import pyqtorch as pyq
|
10
10
|
import sympy
|
@@ -203,20 +203,36 @@ class PyQComposedBlock(pyq.QuantumCircuit):
|
|
203
203
|
n_qubits: int,
|
204
204
|
config: Configuration = None,
|
205
205
|
):
|
206
|
-
"""
|
206
|
+
"""
|
207
|
+
Merge operations that are adjacent and have identical qubit_support.
|
207
208
|
|
208
|
-
call
|
209
|
+
It results in fewer call of apply_operator
|
209
210
|
"""
|
210
211
|
super().__init__(n_qubits, ops)
|
211
212
|
self.qubits = qubits
|
213
|
+
self.merged_qubits_support = [
|
214
|
+
grouped_op[-1].qubit_support for grouped_op in self.grouped_operations()
|
215
|
+
]
|
212
216
|
|
213
|
-
def
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
217
|
+
def grouped_operations(self) -> list[list[Module]]:
|
218
|
+
# takes a list of operations and group adjacent operations into sublist
|
219
|
+
# if those operations have the same control qubits
|
220
|
+
def _sublist_grouper(x: Iterable[list[Module]], y: Module) -> list[list[Module]]:
|
221
|
+
# Appends the element y with the last sublist in the list x
|
222
|
+
# if they have the same qubit_support.
|
223
|
+
# Appends the element y as a new sublist to x if it has different qubit_domain
|
224
|
+
x = list(x)
|
225
|
+
if y.qubit_support == x[-1][-1].qubit_support:
|
226
|
+
x[-1].append(y)
|
227
|
+
return x
|
228
|
+
else:
|
229
|
+
x.append([y])
|
230
|
+
return x
|
218
231
|
|
219
|
-
|
232
|
+
return list(reduce(_sublist_grouper, iter(self.operations[1:]), [[self.operations[0]]]))
|
233
|
+
|
234
|
+
def merged_unitary(self, values: dict[str, Tensor] | None, batch_size: int) -> list[Tensor]:
|
235
|
+
# compute the tensor multiplication of each group of operations
|
220
236
|
batch_first_perm = (2, 0, 1)
|
221
237
|
undo_perm = tuple(argsort(tensor(batch_first_perm)))
|
222
238
|
|
@@ -225,7 +241,7 @@ class PyQComposedBlock(pyq.QuantumCircuit):
|
|
225
241
|
m = m.unsqueeze(2).repeat(
|
226
242
|
1, 1, batch_size
|
227
243
|
) # Primitive gates are 2D, so we expand them.
|
228
|
-
elif m.shape != (2, 2, batch_size):
|
244
|
+
elif m.shape != (2, 2, batch_size) and m.shape != (4, 4, batch_size):
|
229
245
|
m = m.repeat(1, 1, batch_size) # In case a tensor is 3D doesnt have batch_size.
|
230
246
|
return m
|
231
247
|
|
@@ -237,13 +253,21 @@ class PyQComposedBlock(pyq.QuantumCircuit):
|
|
237
253
|
m, undo_perm
|
238
254
|
) # We need to undo the permute since PyQ expects (2, 2, batch_size).
|
239
255
|
|
240
|
-
|
256
|
+
def _list_wise_bmm(ops: list[Module]) -> Tensor:
|
257
|
+
# Takes a list of operations and apply torch.bmm to all the unitaries of the list
|
258
|
+
return _batch_last(
|
259
|
+
reduce(bmm, [_batch_first(_expand(op.unitary(values))) for op in reversed(ops)])
|
260
|
+
) # We reverse the list of tensors here since matmul is not commutative.
|
241
261
|
|
242
|
-
return
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
262
|
+
return list(map(_list_wise_bmm, reversed(self.grouped_operations())))[::-1]
|
263
|
+
|
264
|
+
def forward(self, state: Tensor, values: dict[str, Tensor] | None = None) -> Tensor:
|
265
|
+
# compute evolution of the state by the list of operations
|
266
|
+
batch_size = infer_batchsize(values)
|
267
|
+
return reduce(
|
268
|
+
lambda y, x: apply_operator(state=y, operator=x[0], qubits=x[1]),
|
269
|
+
zip(self.merged_unitary(values, batch_size), self.merged_qubits_support),
|
270
|
+
state,
|
247
271
|
)
|
248
272
|
|
249
273
|
|
@@ -181,12 +181,13 @@ def _controlled_block_with_params(
|
|
181
181
|
AbstractBlock: redefined controlled rotation block
|
182
182
|
dict with new parameters which are added
|
183
183
|
"""
|
184
|
-
from qadence.operations import I
|
184
|
+
from qadence.operations import I
|
185
|
+
from qadence.utils import P1
|
185
186
|
|
186
187
|
# redefine controlled rotation block in a way suitable for matrix evaluation
|
187
188
|
control = block.qubit_support[:-1]
|
188
189
|
target = block.qubit_support[-1]
|
189
|
-
p1 = kron(
|
190
|
+
p1 = kron(P1(qubit) for qubit in control)
|
190
191
|
p0 = I(control[0]) - p1
|
191
192
|
c_block = kron(p0, I(target)) + kron(p1, block.blocks[0])
|
192
193
|
|
qadence/blocks/primitive.py
CHANGED
@@ -20,7 +20,6 @@ from qadence.parameters import (
|
|
20
20
|
stringify,
|
21
21
|
)
|
22
22
|
from qadence.types import TParameter
|
23
|
-
from qadence.utils import format_parameter
|
24
23
|
|
25
24
|
|
26
25
|
class PrimitiveBlock(AbstractBlock):
|
@@ -258,6 +257,8 @@ class ScaleBlock(ParametricBlock):
|
|
258
257
|
|
259
258
|
@property
|
260
259
|
def _block_title(self) -> str:
|
260
|
+
from qadence.utils import format_parameter
|
261
|
+
|
261
262
|
(scale,) = self.parameters.expressions()
|
262
263
|
s = rf"\[mul: {format_parameter(scale)}] "
|
263
264
|
return s
|
qadence/measurements/shadow.py
CHANGED
@@ -10,13 +10,7 @@ from torch import Tensor
|
|
10
10
|
from qadence.backend import Backend
|
11
11
|
from qadence.backends.pyqtorch import Backend as PyQBackend
|
12
12
|
from qadence.blocks import AbstractBlock, chain, kron
|
13
|
-
from qadence.blocks.block_to_tensor import
|
14
|
-
HMAT,
|
15
|
-
IMAT,
|
16
|
-
SDAGMAT,
|
17
|
-
ZMAT,
|
18
|
-
block_to_tensor,
|
19
|
-
)
|
13
|
+
from qadence.blocks.block_to_tensor import HMAT, IMAT, SDAGMAT, ZMAT, block_to_tensor
|
20
14
|
from qadence.blocks.composite import CompositeBlock
|
21
15
|
from qadence.blocks.primitive import PrimitiveBlock
|
22
16
|
from qadence.blocks.utils import get_pauli_blocks, unroll_block_with_scaling
|
@@ -24,8 +18,8 @@ from qadence.circuit import QuantumCircuit
|
|
24
18
|
from qadence.engines.differentiable_backend import DifferentiableBackend
|
25
19
|
from qadence.noise import Noise
|
26
20
|
from qadence.operations import X, Y, Z
|
27
|
-
from qadence.states import one_state, zero_state
|
28
21
|
from qadence.types import Endianness
|
22
|
+
from qadence.utils import P0_MATRIX, P1_MATRIX
|
29
23
|
|
30
24
|
pauli_gates = [X, Y, Z]
|
31
25
|
|
@@ -37,13 +31,6 @@ UNITARY_TENSOR = [
|
|
37
31
|
]
|
38
32
|
|
39
33
|
|
40
|
-
# Projector matrices in Big-Endian convention.
|
41
|
-
PROJECTOR_MATRICES = {
|
42
|
-
"0": zero_state(n_qubits=1).t() @ zero_state(n_qubits=1),
|
43
|
-
"1": one_state(n_qubits=1).t() @ one_state(n_qubits=1),
|
44
|
-
}
|
45
|
-
|
46
|
-
|
47
34
|
def identity(n_qubits: int) -> Tensor:
|
48
35
|
return torch.eye(2**n_qubits, dtype=torch.complex128)
|
49
36
|
|
@@ -113,7 +100,7 @@ def local_shadow(sample: Counter, unitary_ids: list) -> Tensor:
|
|
113
100
|
bitstring = list(sample.keys())[0]
|
114
101
|
local_density_matrices = []
|
115
102
|
for bit, unitary_id in zip(bitstring, unitary_ids):
|
116
|
-
proj_mat =
|
103
|
+
proj_mat = P0_MATRIX if bit == "0" else P1_MATRIX
|
117
104
|
unitary_tensor = UNITARY_TENSOR[unitary_id].squeeze(dim=0)
|
118
105
|
local_density_matrices.append(
|
119
106
|
3 * (unitary_tensor.adjoint() @ proj_mat @ unitary_tensor) - identity(1)
|
qadence/ml_tools/config.py
CHANGED
@@ -38,6 +38,16 @@ class TrainConfig:
|
|
38
38
|
"""
|
39
39
|
checkpoint_best_only: bool = False
|
40
40
|
"""Write model/optimizer checkpoint only if a metric has improved."""
|
41
|
+
val_every: int | None = None
|
42
|
+
"""Calculate validation metric.
|
43
|
+
|
44
|
+
If None, validation check is not performed.
|
45
|
+
"""
|
46
|
+
val_epsilon: float = 1e-5
|
47
|
+
"""Safety margin to check if validation loss is smaller than the lowest.
|
48
|
+
|
49
|
+
validation loss across previous iterations.
|
50
|
+
"""
|
41
51
|
validation_criterion: Optional[Callable] = None
|
42
52
|
"""A boolean function which evaluates a given validation metric is satisfied."""
|
43
53
|
trainstop_criterion: Optional[Callable] = None
|
@@ -59,4 +69,4 @@ class TrainConfig:
|
|
59
69
|
if self.trainstop_criterion is None:
|
60
70
|
self.trainstop_criterion = lambda x: x <= self.max_iter
|
61
71
|
if self.validation_criterion is None:
|
62
|
-
self.validation_criterion = lambda x: False
|
72
|
+
self.validation_criterion = lambda *x: False
|
qadence/ml_tools/printing.py
CHANGED
@@ -11,9 +11,7 @@ def print_metrics(loss: float | None, metrics: dict, iteration: int) -> None:
|
|
11
11
|
print(msg)
|
12
12
|
|
13
13
|
|
14
|
-
def write_tensorboard(
|
15
|
-
writer: SummaryWriter, loss: float | None, metrics: dict, iteration: int
|
16
|
-
) -> None:
|
14
|
+
def write_tensorboard(writer: SummaryWriter, loss: float, metrics: dict, iteration: int) -> None:
|
17
15
|
writer.add_scalar("loss", loss, iteration)
|
18
16
|
for key, arg in metrics.items():
|
19
17
|
writer.add_scalar(key, arg, iteration)
|
qadence/ml_tools/saveload.py
CHANGED
@@ -53,7 +53,7 @@ def load_checkpoint(
|
|
53
53
|
|
54
54
|
|
55
55
|
def write_checkpoint(
|
56
|
-
folder: Path, model: Module, optimizer: Optimizer | NGOptimizer, iteration: int
|
56
|
+
folder: Path, model: Module, optimizer: Optimizer | NGOptimizer, iteration: int | str
|
57
57
|
) -> None:
|
58
58
|
from qadence.ml_tools.models import TransformedModule
|
59
59
|
from qadence.models import QNN, QuantumModel
|
@@ -64,11 +64,19 @@ def write_checkpoint(
|
|
64
64
|
device = str(model.device).split(":")[0] # in case of using several CUDA devices
|
65
65
|
except Exception:
|
66
66
|
pass
|
67
|
+
|
68
|
+
iteration_substring = f"{iteration:03n}" if isinstance(iteration, int) else iteration
|
67
69
|
model_checkpoint_name: str = (
|
68
|
-
f"model_{type(model).__name__}_ckpt_"
|
70
|
+
f"model_{type(model).__name__}_ckpt_"
|
71
|
+
+ f"{iteration_substring}"
|
72
|
+
+ f"_device_{device}"
|
73
|
+
+ ".pt"
|
69
74
|
)
|
70
75
|
opt_checkpoint_name: str = (
|
71
|
-
f"opt_{type(optimizer).__name__}_ckpt_"
|
76
|
+
f"opt_{type(optimizer).__name__}_ckpt_"
|
77
|
+
+ f"{iteration_substring}"
|
78
|
+
+ f"_device_{device}"
|
79
|
+
+ ".pt"
|
72
80
|
)
|
73
81
|
try:
|
74
82
|
d = (
|
qadence/ml_tools/train_grad.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
+
import math
|
3
4
|
from logging import getLogger
|
4
5
|
from typing import Callable, Union
|
5
6
|
|
@@ -13,7 +14,7 @@ from torch.utils.data import DataLoader
|
|
13
14
|
from torch.utils.tensorboard import SummaryWriter
|
14
15
|
|
15
16
|
from qadence.ml_tools.config import TrainConfig
|
16
|
-
from qadence.ml_tools.data import DictDataLoader
|
17
|
+
from qadence.ml_tools.data import DictDataLoader, data_to_device
|
17
18
|
from qadence.ml_tools.optimize_step import optimize_step
|
18
19
|
from qadence.ml_tools.printing import print_metrics, write_tensorboard
|
19
20
|
from qadence.ml_tools.saveload import load_checkpoint, write_checkpoint
|
@@ -125,6 +126,22 @@ def train(
|
|
125
126
|
# initialize tensorboard
|
126
127
|
writer = SummaryWriter(config.folder, purge_step=init_iter)
|
127
128
|
|
129
|
+
perform_val = isinstance(config.val_every, int)
|
130
|
+
if perform_val:
|
131
|
+
if not isinstance(dataloader, DictDataLoader):
|
132
|
+
raise ValueError(
|
133
|
+
"If `config.val_every` is provided as an integer, dataloader must"
|
134
|
+
"be an instance of `DictDataLoader`."
|
135
|
+
)
|
136
|
+
iter_keys = dataloader.dataloaders.keys()
|
137
|
+
if "train" not in iter_keys or "val" not in iter_keys:
|
138
|
+
raise ValueError(
|
139
|
+
"If `config.val_every` is provided as an integer, the dictdataloader"
|
140
|
+
"must have `train` and `val` keys to access the respective dataloaders."
|
141
|
+
)
|
142
|
+
val_dataloader = dataloader.dataloaders["val"]
|
143
|
+
dataloader = dataloader.dataloaders["train"]
|
144
|
+
|
128
145
|
## Training
|
129
146
|
progress = Progress(
|
130
147
|
TextColumn("[progress.description]{task.description}"),
|
@@ -135,8 +152,12 @@ def train(
|
|
135
152
|
data_dtype = None
|
136
153
|
if dtype:
|
137
154
|
data_dtype = float64 if dtype == complex128 else float32
|
155
|
+
|
156
|
+
best_val_loss = math.inf
|
138
157
|
with progress:
|
139
158
|
dl_iter = iter(dataloader) if dataloader is not None else None
|
159
|
+
if perform_val:
|
160
|
+
dl_iter_val = iter(val_dataloader) if val_dataloader is not None else None
|
140
161
|
|
141
162
|
# outer epoch loop
|
142
163
|
for iteration in progress.track(range(init_iter, init_iter + config.max_iter)):
|
@@ -177,8 +198,20 @@ def train(
|
|
177
198
|
if iteration % config.write_every == 0:
|
178
199
|
write_tensorboard(writer, loss, metrics, iteration)
|
179
200
|
|
201
|
+
if perform_val:
|
202
|
+
if iteration % config.val_every == 0:
|
203
|
+
xs = next(dl_iter_val)
|
204
|
+
xs_to_device = data_to_device(xs, device=device, dtype=data_dtype)
|
205
|
+
val_loss, _ = loss_fn(model, xs_to_device)
|
206
|
+
if config.validation_criterion(val_loss, best_val_loss, config.val_epsilon): # type: ignore[misc]
|
207
|
+
best_val_loss = val_loss
|
208
|
+
if config.folder and config.checkpoint_best_only:
|
209
|
+
write_checkpoint(config.folder, model, optimizer, iteration="best")
|
210
|
+
metrics["val_loss"] = val_loss
|
211
|
+
write_tensorboard(writer, math.nan, metrics, iteration)
|
212
|
+
|
180
213
|
if config.folder:
|
181
|
-
if iteration % config.checkpoint_every == 0:
|
214
|
+
if iteration % config.checkpoint_every == 0 and not config.checkpoint_best_only:
|
182
215
|
write_checkpoint(config.folder, model, optimizer, iteration)
|
183
216
|
|
184
217
|
except KeyboardInterrupt:
|
@@ -186,7 +219,7 @@ def train(
|
|
186
219
|
break
|
187
220
|
|
188
221
|
# Final writing and checkpointing
|
189
|
-
if config.folder:
|
222
|
+
if config.folder and not config.checkpoint_best_only:
|
190
223
|
write_checkpoint(config.folder, model, optimizer, iteration)
|
191
224
|
write_tensorboard(writer, loss, metrics, iteration)
|
192
225
|
writer.close()
|
qadence/operations/ham_evo.py
CHANGED
@@ -10,10 +10,7 @@ import sympy
|
|
10
10
|
import torch
|
11
11
|
from torch import Tensor
|
12
12
|
|
13
|
-
from qadence.blocks import
|
14
|
-
AbstractBlock,
|
15
|
-
TimeEvolutionBlock,
|
16
|
-
)
|
13
|
+
from qadence.blocks import AbstractBlock, TimeEvolutionBlock
|
17
14
|
from qadence.blocks.block_to_tensor import block_to_tensor
|
18
15
|
from qadence.blocks.utils import (
|
19
16
|
add, # noqa
|
@@ -30,7 +27,6 @@ from qadence.parameters import (
|
|
30
27
|
extract_original_param_entry,
|
31
28
|
)
|
32
29
|
from qadence.types import LTSOrder, OpName, TGenerator, TParameter
|
33
|
-
from qadence.utils import eigenvalues
|
34
30
|
|
35
31
|
logger = getLogger(__name__)
|
36
32
|
|
@@ -112,6 +108,8 @@ class HamEvo(TimeEvolutionBlock):
|
|
112
108
|
def eigenvalues_generator(
|
113
109
|
self, max_num_evals: int | None = None, max_num_gaps: int | None = None
|
114
110
|
) -> Tensor:
|
111
|
+
from qadence.utils import eigenvalues
|
112
|
+
|
115
113
|
if isinstance(self.generator, AbstractBlock):
|
116
114
|
generator_tensor = block_to_tensor(self.generator)
|
117
115
|
elif isinstance(self.generator, Tensor):
|
qadence/overlap.py
CHANGED
@@ -14,21 +14,20 @@ from qadence.circuit import QuantumCircuit
|
|
14
14
|
from qadence.divergences import js_divergence
|
15
15
|
from qadence.measurements import Measurements
|
16
16
|
from qadence.models.quantum_model import QuantumModel
|
17
|
-
from qadence.operations import SWAP, H, I, S
|
17
|
+
from qadence.operations import SWAP, H, I, S
|
18
18
|
from qadence.transpile import reassign
|
19
19
|
from qadence.types import BackendName, DiffMode, OverlapMethod
|
20
|
+
from qadence.utils import P0, P1
|
20
21
|
|
21
22
|
# Modules to be automatically added to the qadence namespace
|
22
23
|
__all__ = ["Overlap", "OverlapMethod"]
|
23
24
|
|
24
25
|
|
25
26
|
def _cswap(control: int, target1: int, target2: int) -> AbstractBlock:
|
26
|
-
# define projectors on control qubit
|
27
|
-
p0 = 0.5 * I(control) + 0.5 * Z(control)
|
28
|
-
p1 = 0.5 * I(control) + (-0.5) * Z(control)
|
29
|
-
|
30
27
|
# construct controlled-SWAP block
|
31
|
-
cswap_blocks = kron(
|
28
|
+
cswap_blocks = kron(P0(control), I(target1), I(target2)) + kron(
|
29
|
+
P1(control), SWAP(target1, target2)
|
30
|
+
)
|
32
31
|
cswap = tag(cswap_blocks, f"CSWAP({control}, {target1}, {target2})")
|
33
32
|
|
34
33
|
return cswap
|
@@ -37,16 +36,12 @@ def _cswap(control: int, target1: int, target2: int) -> AbstractBlock:
|
|
37
36
|
def _controlled_unitary(control: int, unitary_block: AbstractBlock) -> AbstractBlock:
|
38
37
|
n_qubits = unitary_block.n_qubits
|
39
38
|
|
40
|
-
# define projectors on control qubit
|
41
|
-
p0 = 0.5 * I(control) + 0.5 * Z(control)
|
42
|
-
p1 = 0.5 * I(control) + (-0.5) * Z(control)
|
43
|
-
|
44
39
|
# shift qubit support of unitary
|
45
40
|
shifted_unitary_block = reassign(unitary_block, {i: control + i + 1 for i in range(n_qubits)})
|
46
41
|
|
47
42
|
# construct controlled-U block
|
48
|
-
cu_blocks = kron(
|
49
|
-
|
43
|
+
cu_blocks = kron(P0(control), *[I(control + i + 1) for i in range(n_qubits)]) + kron(
|
44
|
+
P1(control), shifted_unitary_block
|
50
45
|
)
|
51
46
|
cu = tag(cu_blocks, f"c-U({control}, {shifted_unitary_block.qubit_support})")
|
52
47
|
|
qadence/states.py
CHANGED
@@ -1,10 +1,12 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
import random
|
4
|
+
import warnings
|
4
5
|
from functools import singledispatch
|
5
6
|
from typing import List
|
6
7
|
|
7
8
|
import torch
|
9
|
+
from jax.typing import ArrayLike
|
8
10
|
from torch import Tensor, concat
|
9
11
|
from torch.distributions import Categorical, Distribution
|
10
12
|
|
@@ -12,7 +14,6 @@ from qadence.blocks import ChainBlock, KronBlock, PrimitiveBlock, chain, kron
|
|
12
14
|
from qadence.circuit import QuantumCircuit
|
13
15
|
from qadence.execution import run
|
14
16
|
from qadence.operations import CNOT, RX, RY, RZ, H, I, X
|
15
|
-
from qadence.overlap import fidelity
|
16
17
|
from qadence.types import PI, BackendName, Endianness, StateGeneratorType
|
17
18
|
from qadence.utils import basis_to_int
|
18
19
|
|
@@ -185,14 +186,18 @@ def one_state(n_qubits: int, batch_size: int = 1) -> Tensor:
|
|
185
186
|
|
186
187
|
@singledispatch
|
187
188
|
def product_state(
|
188
|
-
bitstring: str,
|
189
|
-
|
189
|
+
bitstring: str,
|
190
|
+
batch_size: int = 1,
|
191
|
+
endianness: Endianness = Endianness.BIG,
|
192
|
+
backend: str = "pyqtorch",
|
193
|
+
) -> ArrayLike:
|
190
194
|
"""
|
191
195
|
Creates a product state from a bitstring.
|
192
196
|
|
193
197
|
Arguments:
|
194
198
|
bitstring (str): A bitstring.
|
195
199
|
batch_size (int) : Batch size.
|
200
|
+
backend (str): The backend to use. Default is "pyqtorch".
|
196
201
|
|
197
202
|
Returns:
|
198
203
|
A torch.Tensor.
|
@@ -201,10 +206,18 @@ def product_state(
|
|
201
206
|
```python exec="on" source="material-block" result="json"
|
202
207
|
from qadence.states import product_state
|
203
208
|
|
204
|
-
print(product_state("1100"))
|
209
|
+
print(product_state("1100", backend="pyqtorch"))
|
210
|
+
print(product_state("1100", backend="horqrux"))
|
205
211
|
```
|
206
212
|
"""
|
207
|
-
|
213
|
+
if batch_size:
|
214
|
+
warnings.warn(
|
215
|
+
"The input `batch_size` is going to be deprecated. "
|
216
|
+
"For now, default batch_size is set to 1.",
|
217
|
+
DeprecationWarning,
|
218
|
+
stacklevel=2,
|
219
|
+
)
|
220
|
+
return run(product_block(bitstring), backend=backend, endianness=endianness)
|
208
221
|
|
209
222
|
|
210
223
|
@product_state.register
|
@@ -529,6 +542,8 @@ def rand_bitstring(N: int) -> str:
|
|
529
542
|
def equivalent_state(
|
530
543
|
s0: torch.Tensor, s1: torch.Tensor, rtol: float = 0.0, atol: float = NORMALIZATION_ATOL
|
531
544
|
) -> bool:
|
545
|
+
from qadence.overlap import fidelity
|
546
|
+
|
532
547
|
fid = fidelity(s0, s1)
|
533
548
|
expected = torch.ones_like(fid)
|
534
549
|
return torch.allclose(fid, expected, rtol=rtol, atol=atol) # type: ignore[no-any-return]
|
qadence/utils.py
CHANGED
@@ -2,8 +2,9 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
import math
|
4
4
|
from collections import Counter
|
5
|
+
from functools import partial
|
5
6
|
from logging import getLogger
|
6
|
-
from typing import Any
|
7
|
+
from typing import TYPE_CHECKING, Any
|
7
8
|
|
8
9
|
import numpy as np
|
9
10
|
import sympy
|
@@ -14,6 +15,10 @@ from torch.linalg import eigvals
|
|
14
15
|
|
15
16
|
from qadence.types import Endianness, ResultType, TNumber
|
16
17
|
|
18
|
+
if TYPE_CHECKING:
|
19
|
+
from qadence.operations import Projector
|
20
|
+
|
21
|
+
|
17
22
|
# Modules to be automatically added to the qadence namespace
|
18
23
|
__all__ = [] # type: ignore
|
19
24
|
|
@@ -259,3 +264,37 @@ def validate_values_and_state(
|
|
259
264
|
else:
|
260
265
|
if not is_qadence_shape(state, n_qubits) or state.shape[0] > 1:
|
261
266
|
raise ValueError("Jax only supports unbatched states.")
|
267
|
+
|
268
|
+
|
269
|
+
def one_qubit_projector(state: str, target: int) -> Projector:
|
270
|
+
"""Returns the projector for a single qubit system.
|
271
|
+
|
272
|
+
Args:
|
273
|
+
state (str): The state of the projector.
|
274
|
+
target (int): The target qubit.
|
275
|
+
|
276
|
+
Returns:
|
277
|
+
Projector: The projector operator.
|
278
|
+
"""
|
279
|
+
from qadence.operations import Projector
|
280
|
+
|
281
|
+
assert state in ["0", "1"], "State must be either '0' or '1'."
|
282
|
+
return Projector(ket=state, bra=state, qubit_support=target)
|
283
|
+
|
284
|
+
|
285
|
+
def one_qubit_projector_matrix(state: str) -> Tensor:
|
286
|
+
"""Returns the projector for a single qubit system.
|
287
|
+
|
288
|
+
Args:
|
289
|
+
state (str): The state of the projector.
|
290
|
+
|
291
|
+
Returns:
|
292
|
+
Tensor: The projector operator.
|
293
|
+
"""
|
294
|
+
return one_qubit_projector(state, 0).tensor().squeeze()
|
295
|
+
|
296
|
+
|
297
|
+
P0 = partial(one_qubit_projector, "0")
|
298
|
+
P1 = partial(one_qubit_projector, "1")
|
299
|
+
P0_MATRIX = one_qubit_projector_matrix("0")
|
300
|
+
P1_MATRIX = one_qubit_projector_matrix("1")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: qadence
|
3
|
-
Version: 1.6.
|
3
|
+
Version: 1.6.1
|
4
4
|
Summary: Pasqal interface for circuit-based quantum computing SDKs
|
5
5
|
Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>
|
6
6
|
License: Apache 2.0
|
@@ -30,13 +30,11 @@ Requires-Dist: sympytorch>=0.1.2
|
|
30
30
|
Requires-Dist: tensorboard>=2.12.0
|
31
31
|
Requires-Dist: torch
|
32
32
|
Provides-Extra: all
|
33
|
-
Requires-Dist:
|
34
|
-
Requires-Dist: graphviz; extra == 'all'
|
33
|
+
Requires-Dist: braket; extra == 'all'
|
35
34
|
Requires-Dist: libs; extra == 'all'
|
36
|
-
Requires-Dist: pasqal-cloud==0.8.1; extra == 'all'
|
37
35
|
Requires-Dist: protocols; extra == 'all'
|
38
|
-
Requires-Dist: pulser
|
39
|
-
Requires-Dist:
|
36
|
+
Requires-Dist: pulser; extra == 'all'
|
37
|
+
Requires-Dist: visualization; extra == 'all'
|
40
38
|
Provides-Extra: braket
|
41
39
|
Requires-Dist: amazon-braket-sdk<1.71.2; extra == 'braket'
|
42
40
|
Provides-Extra: dlprof
|
@@ -56,8 +54,8 @@ Provides-Extra: protocols
|
|
56
54
|
Requires-Dist: qadence-protocols; extra == 'protocols'
|
57
55
|
Provides-Extra: pulser
|
58
56
|
Requires-Dist: pasqal-cloud==0.8.1; extra == 'pulser'
|
59
|
-
Requires-Dist: pulser-core==0.18.
|
60
|
-
Requires-Dist: pulser-simulation==0.18.
|
57
|
+
Requires-Dist: pulser-core==0.18.1; extra == 'pulser'
|
58
|
+
Requires-Dist: pulser-simulation==0.18.1; extra == 'pulser'
|
61
59
|
Provides-Extra: visualization
|
62
60
|
Requires-Dist: graphviz; extra == 'visualization'
|
63
61
|
Description-Content-Type: text/markdown
|
@@ -9,7 +9,7 @@ qadence/finitediff.py,sha256=TijuaWUbX9VlbLyMYco6HkK9eCoRTVnKug4Ekd6mlTI,1592
|
|
9
9
|
qadence/libs.py,sha256=HetkKO8TCTlVCViQdVQJvxwBekrhd-y_iMox4UJMY1M,410
|
10
10
|
qadence/log_config.yaml,sha256=WwSpxqMSXgPJ7wO_wh46UnFzXdgX9NVA4MbN3TcJFyE,485
|
11
11
|
qadence/logger.py,sha256=Hb76pK3VyQjVjJb4_NqFlOJgjYJVa8t7DHJFlzOM86M,407
|
12
|
-
qadence/overlap.py,sha256=
|
12
|
+
qadence/overlap.py,sha256=VHEXfsqcn7aax1-f_iJ8Fe5Eoz4AcceQbwgqipifgzY,17104
|
13
13
|
qadence/parameters.py,sha256=9BuEt2MZRwxhmPZHVPLt-GF1ZUgXkmg63owgDiJa74U,12325
|
14
14
|
qadence/protocols.py,sha256=bcYTxSjgMPV-a-D6yv90jCpnGik8myzaNpFv9z1gzJ0,442
|
15
15
|
qadence/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -17,9 +17,9 @@ qadence/qubit_support.py,sha256=Nkn1Q01RVViTcggSIom7EFKdWpAuM4TMGwBZ5feCUxA,2120
|
|
17
17
|
qadence/register.py,sha256=cBMzwZ7GWZ5ieuFt0bpproEI6a2ncNwfjj7ic379zyg,10276
|
18
18
|
qadence/serial_expr_grammar.peg,sha256=z5ytL7do9kO8o4h-V5GrsDuLdso0KsRcMuIYURFfmAY,328
|
19
19
|
qadence/serialization.py,sha256=0UdcDQP2tJOtygVQI258G3MgnDiZJmBY4o15w0G-O0Y,15686
|
20
|
-
qadence/states.py,sha256=
|
20
|
+
qadence/states.py,sha256=XmywPGJw6vvWa0QkVegJHaYaimzXZQx42Uafo8HoTgo,14809
|
21
21
|
qadence/types.py,sha256=wsxZNik5d8xdw46Dp1xRHpVS1-mSM-kMl2J_W64UhgA,9732
|
22
|
-
qadence/utils.py,sha256=
|
22
|
+
qadence/utils.py,sha256=mTDI54uXsFXrKKFshmXFzDdoaeLMLlqA1MR2G061TSc,10092
|
23
23
|
qadence/analog/__init__.py,sha256=BCyS9R4KUjzUXN0Ax3b0eMo8ZAuSkGoJQVtZ4_pvAFs,279
|
24
24
|
qadence/analog/addressing.py,sha256=fu5-xW9lquEbagApNp23S_ET1kl0iDtZUrIYSVNmw9s,6435
|
25
25
|
qadence/analog/constants.py,sha256=B2phQoN1ASL8CwM-Dsa1rbraYwGwwPSeiB3HbVe-MPA,1243
|
@@ -37,31 +37,31 @@ qadence/backends/braket/backend.py,sha256=WX5FG4WsrtdnG0at2DvIY0n_AFm44t4g5OIJ1e
|
|
37
37
|
qadence/backends/braket/config.py,sha256=7cu22dmYdp48Fu760HPfxBHinaUnGmzx9MkE_EPhVN8,594
|
38
38
|
qadence/backends/braket/convert_ops.py,sha256=DVXV7sT9sX_yGOgPKclD9KIGgmbBRuDy_e39i1Z8I1s,3417
|
39
39
|
qadence/backends/horqrux/__init__.py,sha256=0OdVy6cq0oQggV48LO1WXdaZuSkDkz7OYNEPIkNAmfk,140
|
40
|
-
qadence/backends/horqrux/backend.py,sha256=
|
40
|
+
qadence/backends/horqrux/backend.py,sha256=W5sYvX9QP-xD3MMjwX-ZMcpHuncPVqBTyn80jgWViUM,9094
|
41
41
|
qadence/backends/horqrux/config.py,sha256=xz7JlUcwW_4JAbvProbSI9hA1SXZRRAN0Hr2bvmLzfg,892
|
42
42
|
qadence/backends/horqrux/convert_ops.py,sha256=nzfYF0yjB7zwaHCEXWZUUYDfz38Yi22xF2zDRFaOwR0,8564
|
43
43
|
qadence/backends/pulser/__init__.py,sha256=capQ-eHqwtOeLf4mWsI0BIseAHhiLGie5cFD4-iVhUo,116
|
44
|
-
qadence/backends/pulser/backend.py,sha256=
|
44
|
+
qadence/backends/pulser/backend.py,sha256=51lbX-KfK6wFxFW7t0QwsXXwAw06D6z2msvSZzM_vD8,15363
|
45
45
|
qadence/backends/pulser/channels.py,sha256=ZF0yEXUFHAmi3IdeXjzdTNGR5NzaRRFTiUpUGVg2sO4,329
|
46
46
|
qadence/backends/pulser/cloud.py,sha256=0uUluvbFV9sOuCPraE-9uiVtC3Q8QaDY1IJMDi8grDM,2057
|
47
47
|
qadence/backends/pulser/config.py,sha256=aoHDmtgq5i0Zryxenw_p3uARY0B1w-UaYvfqDmrWHM0,2175
|
48
|
-
qadence/backends/pulser/convert_ops.py,sha256=
|
48
|
+
qadence/backends/pulser/convert_ops.py,sha256=of8NCZwHX0zSisRo-sBRSyQVg841iFKLhUm6WWZGsCY,1800
|
49
49
|
qadence/backends/pulser/devices.py,sha256=DermLZNfmCB3SqteKVW4uhg4jp6ya1G6ptnXbBnJogI,2448
|
50
50
|
qadence/backends/pulser/pulses.py,sha256=F4fExIRAhLPMtVg1bhNtDihUYHxu5RExGjovk8-CQIo,11884
|
51
51
|
qadence/backends/pulser/waveforms.py,sha256=0uz95b7rUaUUtN0tuHBZmJ0H6UBmfHST_59ozwsRCzg,2227
|
52
52
|
qadence/backends/pyqtorch/__init__.py,sha256=0OdVy6cq0oQggV48LO1WXdaZuSkDkz7OYNEPIkNAmfk,140
|
53
53
|
qadence/backends/pyqtorch/backend.py,sha256=7CkXccCHwK3pijNDmIB_-hdRGa79yhZ3_3A2p7wOSB0,9785
|
54
54
|
qadence/backends/pyqtorch/config.py,sha256=hhea1dDAeee7uDE1fiCh4lJRS0EMSc3mmbXn92HBdyA,1898
|
55
|
-
qadence/backends/pyqtorch/convert_ops.py,sha256=
|
55
|
+
qadence/backends/pyqtorch/convert_ops.py,sha256=0CLIncSmOSOZh4o7kjdOSc9a60srtegGul-dj_Q8Ufc,18935
|
56
56
|
qadence/blocks/__init__.py,sha256=H6jEA_CptkE-eoB4UfSbUiDszbxxhZwECV_TgoZWXoU,960
|
57
57
|
qadence/blocks/abstract.py,sha256=35RcVlNvD1BmBoJ8bbYJ3LrdU72wixt9ZmTbCtEwNus,11796
|
58
58
|
qadence/blocks/analog.py,sha256=ymnnlSVoW1XL05ZvnnHCqRTHuOXIEY_7E9M0PNKJZy4,10812
|
59
|
-
qadence/blocks/block_to_tensor.py,sha256=
|
59
|
+
qadence/blocks/block_to_tensor.py,sha256=Sg7YGKUoPUUHKvyB8Khztrk7UYnV5SD451_3I00n84w,17367
|
60
60
|
qadence/blocks/composite.py,sha256=z_lXRBVnh-DdvfZdv6T0ZEmVhlU76zBt72P_FGGa-PQ,8897
|
61
61
|
qadence/blocks/embedding.py,sha256=TQt620UIVaNYHP34tpK9slv-PFiLvTQRYw5Ez9RuImE,6513
|
62
62
|
qadence/blocks/manipulate.py,sha256=kPmzej7mnWFoqTJA2CkGulT7hcPha0GGPARC8rjZltg,2387
|
63
63
|
qadence/blocks/matrix.py,sha256=WTByjt_0yf3OiK-OcJNEfSGO8Hyq_tIBlklSO_BtOb0,3776
|
64
|
-
qadence/blocks/primitive.py,sha256=
|
64
|
+
qadence/blocks/primitive.py,sha256=NWxgpG77cjBy-APg-kAGOX2kOR-OuH8hzml8U_Zfv1A,16640
|
65
65
|
qadence/blocks/utils.py,sha256=iCJDi6HTYYaQQCoP3cdIKeCDuy8KQCxctrHN5QWXV-M,16349
|
66
66
|
qadence/constructors/__init__.py,sha256=PWfSKcEJmo5azIkcRuRWsmch3FOeXl055iPsboNzryQ,938
|
67
67
|
qadence/constructors/ansatze.py,sha256=bTrcF2RsyA_Btmkk80tWxP1dn9fK_SXAQFueIuWkT-c,9660
|
@@ -97,7 +97,7 @@ qadence/exceptions/exceptions.py,sha256=4j_VJpx2sZ2Mir5BJUWu4nwb131FY1ygO4q8-Xly
|
|
97
97
|
qadence/measurements/__init__.py,sha256=RIjG9tVJMqhNzyj7maZI250Um0KgHl2PizDcKJag-JU,161
|
98
98
|
qadence/measurements/protocols.py,sha256=mD50R9yPs5bIYH7Efd0BsR0503apiyrsZydi_Q6BJag,1161
|
99
99
|
qadence/measurements/samples.py,sha256=AVvszDwgfKnZ_ooATyTA3270vGeg1V3WO94jsfrTk-8,1200
|
100
|
-
qadence/measurements/shadow.py,sha256=
|
100
|
+
qadence/measurements/shadow.py,sha256=lYZWbBCJJh7pFXPV5jSvsyN_0g22ao3jARpKnx1jeJQ,12342
|
101
101
|
qadence/measurements/tomography.py,sha256=8fzXhYOu_DaMiUoZzLvpP03WhuwlZ3ldkWepLUHjWqM,2665
|
102
102
|
qadence/measurements/utils.py,sha256=CJmnSobzdeR4T4FuEpad7d-BSJ9W-wTaU9hRbveB6kY,6534
|
103
103
|
qadence/mitigations/__init__.py,sha256=RzaxYJftePFMloGhBVSixZ8fSe-ps_Jc-EyPm6xz-bs,159
|
@@ -105,15 +105,15 @@ qadence/mitigations/analog_zne.py,sha256=g0QkjSdF-N9Dv2N8Oza4sylnjUMid5ea-4NCT9T
|
|
105
105
|
qadence/mitigations/protocols.py,sha256=Jq9MyLujfTyWmc7XVUGYVRUkJT1MmZw-GgmWpVjmX2Y,1608
|
106
106
|
qadence/mitigations/readout.py,sha256=HPfYmdjRlieUdOBMZTghFK4DRWfveM4KkDkEI0bMI0E,6262
|
107
107
|
qadence/ml_tools/__init__.py,sha256=_H5A_BWZRZVGoJszb9s8XRJnLnJxUNfYjuT9HT2yASo,786
|
108
|
-
qadence/ml_tools/config.py,sha256=
|
108
|
+
qadence/ml_tools/config.py,sha256=bA_9TYeuy0DPdhFg0_3PFctd6bx4hKugg4LmlUB2jyw,2647
|
109
109
|
qadence/ml_tools/data.py,sha256=8ZUFjhQSp94w7icX7RzM2J39Yo7P_T-AgjcThBc8miI,4283
|
110
110
|
qadence/ml_tools/models.py,sha256=lELcq__wfGsurUm2UOgYkOzdBDwu66Nik9ySoAjDKnY,12673
|
111
111
|
qadence/ml_tools/optimize_step.py,sha256=ATXWmAqybJVK3QmAaDqVXB5mxjTo2MIi_e0a5WSPFpc,1800
|
112
112
|
qadence/ml_tools/parameters.py,sha256=gew2Kq_5-RgRpaTvs8eauVhgo0sTqqDQEV6WHFEiLGM,1301
|
113
|
-
qadence/ml_tools/printing.py,sha256=
|
114
|
-
qadence/ml_tools/saveload.py,sha256=
|
113
|
+
qadence/ml_tools/printing.py,sha256=YK2zc9SOc5wiLnMxm3Q1gSwPAVW9Vy2Pcnjg9gP0aKU,694
|
114
|
+
qadence/ml_tools/saveload.py,sha256=nKzEKaondgXX9pZaBmkUU3ccI92GbVRRMPfYCwOUJPk,5103
|
115
115
|
qadence/ml_tools/tensors.py,sha256=xZ9ZRzOqEaMgLUGWQf1najDmL6iLuN1ojCGVFs1Tm94,1337
|
116
|
-
qadence/ml_tools/train_grad.py,sha256=
|
116
|
+
qadence/ml_tools/train_grad.py,sha256=6ZFFd7wGKFFqz3wpVkSD_xFjQPJ4IgIpA0IjYanohcs,9507
|
117
117
|
qadence/ml_tools/train_no_grad.py,sha256=PrOfPwu6C-YqfFxnRkbeyOQzqSyjRrx4AZZd6C-1xRw,4705
|
118
118
|
qadence/ml_tools/utils.py,sha256=_GZSN5Flk1nRFutkXih397Q3cWKdX0UP8c9CRXpUL7c,1654
|
119
119
|
qadence/models/__init__.py,sha256=0nZzAC2TGr8Yuf40-R7m2cSsr_BlNq_GsMOwaOYZLqM,193
|
@@ -125,7 +125,7 @@ qadence/noise/readout.py,sha256=UpUdxaGu09SmqKXn0O7RYfF7b7UcRZiNMfDlpY0weV0,6726
|
|
125
125
|
qadence/operations/__init__.py,sha256=HAAo9VZUTq2H7kcEarChTgTWCIq7LT25-xBxkwE0F9c,1922
|
126
126
|
qadence/operations/analog.py,sha256=v11DSrg-XUbwIAWAWM43y3VQbYKsx2ynx-HimUoC-x0,7435
|
127
127
|
qadence/operations/control_ops.py,sha256=ZDOmTXxQJXSP2ASNWNUlt7pIuEjAVNT2FmexbK_TisM,9484
|
128
|
-
qadence/operations/ham_evo.py,sha256=
|
128
|
+
qadence/operations/ham_evo.py,sha256=4KdIIkvkDZwoMs19qxDdNBsDC3W4keg33j1wZHXJNrE,7387
|
129
129
|
qadence/operations/parametric.py,sha256=BHGGn8k7hIZX8_0V1K1_FOnILAROEtqZGjBdIfzMcWI,4911
|
130
130
|
qadence/operations/primitive.py,sha256=ekiylIW7mWjesBXVyVmowF75Ek82T_eNUVcDTEAGzFg,9002
|
131
131
|
qadence/transpile/__init__.py,sha256=lb5LwsYb6lN5YFBsU3YBey7-0OcUQpYa3Q4hG6zmgi0,457
|
@@ -136,7 +136,7 @@ qadence/transpile/digitalize.py,sha256=iWRwYAYQsD2INHj0HNbGJriv_3fRCuBW1nDBrwtKS
|
|
136
136
|
qadence/transpile/flatten.py,sha256=EdhSG5WyF56nbnxINNLqrHgY84MRM1YFjT3fR4aph5Q,3427
|
137
137
|
qadence/transpile/invert.py,sha256=KAefHTG2AWr39aengVhXrzCtJPhrZC-ZnL6vYvmbnY0,4867
|
138
138
|
qadence/transpile/transpile.py,sha256=6MRRkk1OS279L1fwUQjazA6qlfpbd-T_EJMKT8hAhOU,2721
|
139
|
-
qadence-1.6.
|
140
|
-
qadence-1.6.
|
141
|
-
qadence-1.6.
|
142
|
-
qadence-1.6.
|
139
|
+
qadence-1.6.1.dist-info/METADATA,sha256=Pxh7WJ1Ox0a0x-ydT-YDAABkP5RMC-AJx_oxbFQsSkU,9168
|
140
|
+
qadence-1.6.1.dist-info/WHEEL,sha256=zEMcRr9Kr03x1ozGwg5v9NQBKn3kndp6LSoSlVg-jhU,87
|
141
|
+
qadence-1.6.1.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
142
|
+
qadence-1.6.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|