qadence 1.8.0__py3-none-any.whl → 1.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qadence/__init__.py +1 -1
- qadence/analog/parse_analog.py +1 -2
- qadence/backends/gpsr.py +8 -2
- qadence/backends/pulser/backend.py +7 -23
- qadence/backends/pyqtorch/backend.py +80 -5
- qadence/backends/pyqtorch/config.py +10 -3
- qadence/backends/pyqtorch/convert_ops.py +63 -2
- qadence/blocks/primitive.py +1 -0
- qadence/execution.py +0 -2
- qadence/log_config.yaml +10 -0
- qadence/measurements/shadow.py +97 -128
- qadence/measurements/utils.py +2 -2
- qadence/mitigations/readout.py +12 -6
- qadence/ml_tools/__init__.py +4 -8
- qadence/ml_tools/callbacks/__init__.py +30 -0
- qadence/ml_tools/callbacks/callback.py +451 -0
- qadence/ml_tools/callbacks/callbackmanager.py +214 -0
- qadence/ml_tools/{saveload.py → callbacks/saveload.py} +11 -11
- qadence/ml_tools/callbacks/writer_registry.py +441 -0
- qadence/ml_tools/config.py +132 -258
- qadence/ml_tools/data.py +7 -3
- qadence/ml_tools/loss/__init__.py +10 -0
- qadence/ml_tools/loss/loss.py +87 -0
- qadence/ml_tools/optimize_step.py +45 -10
- qadence/ml_tools/stages.py +46 -0
- qadence/ml_tools/train_utils/__init__.py +7 -0
- qadence/ml_tools/train_utils/base_trainer.py +555 -0
- qadence/ml_tools/train_utils/config_manager.py +184 -0
- qadence/ml_tools/trainer.py +708 -0
- qadence/model.py +1 -1
- qadence/noise/__init__.py +2 -2
- qadence/noise/protocols.py +18 -53
- qadence/operations/ham_evo.py +87 -26
- qadence/transpile/noise.py +12 -5
- qadence/types.py +15 -3
- {qadence-1.8.0.dist-info → qadence-1.9.1.dist-info}/METADATA +3 -4
- {qadence-1.8.0.dist-info → qadence-1.9.1.dist-info}/RECORD +39 -32
- {qadence-1.8.0.dist-info → qadence-1.9.1.dist-info}/WHEEL +1 -1
- qadence/ml_tools/printing.py +0 -154
- qadence/ml_tools/train_grad.py +0 -395
- qadence/ml_tools/train_no_grad.py +0 -199
- qadence/noise/readout.py +0 -218
- {qadence-1.8.0.dist-info → qadence-1.9.1.dist-info}/licenses/LICENSE +0 -0
qadence/measurements/shadow.py
CHANGED
@@ -1,40 +1,37 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
-
from collections import Counter
|
4
|
-
from functools import reduce
|
5
|
-
|
6
3
|
import numpy as np
|
7
4
|
import torch
|
8
5
|
from torch import Tensor
|
9
6
|
|
10
7
|
from qadence.backend import Backend
|
11
8
|
from qadence.backends.pyqtorch import Backend as PyQBackend
|
12
|
-
from qadence.blocks import AbstractBlock, chain, kron
|
13
|
-
from qadence.blocks.block_to_tensor import HMAT, IMAT, SDAGMAT
|
9
|
+
from qadence.blocks import AbstractBlock, KronBlock, chain, kron
|
10
|
+
from qadence.blocks.block_to_tensor import HMAT, IMAT, SDAGMAT
|
14
11
|
from qadence.blocks.composite import CompositeBlock
|
15
12
|
from qadence.blocks.primitive import PrimitiveBlock
|
16
13
|
from qadence.blocks.utils import get_pauli_blocks, unroll_block_with_scaling
|
17
14
|
from qadence.circuit import QuantumCircuit
|
18
15
|
from qadence.engines.differentiable_backend import DifferentiableBackend
|
16
|
+
from qadence.measurements.utils import get_qubit_indices_for_op
|
19
17
|
from qadence.noise import NoiseHandler
|
20
|
-
from qadence.operations import X, Y, Z
|
18
|
+
from qadence.operations import H, I, SDagger, X, Y, Z
|
21
19
|
from qadence.types import Endianness
|
22
|
-
from qadence.utils import P0_MATRIX, P1_MATRIX
|
23
20
|
|
24
21
|
pauli_gates = [X, Y, Z]
|
25
|
-
|
22
|
+
pauli_rotations = [
|
23
|
+
lambda index: H(index),
|
24
|
+
lambda index: SDagger(index) * H(index),
|
25
|
+
lambda index: None,
|
26
|
+
]
|
26
27
|
|
27
28
|
UNITARY_TENSOR = [
|
28
|
-
|
29
|
-
|
29
|
+
HMAT,
|
30
|
+
HMAT @ SDAGMAT,
|
30
31
|
IMAT,
|
31
32
|
]
|
32
33
|
|
33
34
|
|
34
|
-
def identity(n_qubits: int) -> Tensor:
|
35
|
-
return torch.eye(2**n_qubits, dtype=torch.complex128)
|
36
|
-
|
37
|
-
|
38
35
|
def _max_observable_weight(observable: AbstractBlock) -> int:
|
39
36
|
"""
|
40
37
|
Get the maximal weight for the given observable.
|
@@ -88,27 +85,40 @@ def number_of_samples(
|
|
88
85
|
return N, K
|
89
86
|
|
90
87
|
|
91
|
-
def
|
88
|
+
def nested_operator_indexing(
|
89
|
+
idx_array: np.ndarray,
|
90
|
+
) -> list:
|
91
|
+
"""Obtain the list of rotation operators from indices.
|
92
|
+
|
93
|
+
Args:
|
94
|
+
idx_array (np.ndarray): Indices for obtaining the operators.
|
95
|
+
|
96
|
+
Returns:
|
97
|
+
list: Map of rotations.
|
92
98
|
"""
|
93
|
-
|
99
|
+
if idx_array.ndim == 1:
|
100
|
+
return [pauli_rotations[int(ind_pauli)](i) for i, ind_pauli in enumerate(idx_array)] # type: ignore[abstract]
|
101
|
+
return [nested_operator_indexing(sub_array) for sub_array in idx_array]
|
102
|
+
|
103
|
+
|
104
|
+
def kron_if_non_empty(list_operations: list) -> KronBlock | None:
|
105
|
+
filtered_op: list = list(filter(None, list_operations))
|
106
|
+
return kron(*filtered_op) if len(filtered_op) > 0 else None
|
94
107
|
|
95
|
-
See https://arxiv.org/pdf/2002.08953.pdf
|
96
|
-
Supplementary Material 1 and Eqs. (S17,S44).
|
97
108
|
|
98
|
-
|
109
|
+
def extract_operators(unitary_ids: np.ndarray, n_qubits: int) -> list:
|
110
|
+
"""Sample `shadow_size` rotations of `n_qubits`.
|
111
|
+
|
112
|
+
Args:
|
113
|
+
unitary_ids (np.ndarray): Indices for obtaining the operators.
|
114
|
+
n_qubits (int): Number of qubits
|
115
|
+
Returns:
|
116
|
+
list: Pauli strings.
|
99
117
|
"""
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
unitary_tensor = UNITARY_TENSOR[unitary_id].squeeze(dim=0)
|
105
|
-
local_density_matrices.append(
|
106
|
-
3 * (unitary_tensor.adjoint() @ proj_mat @ unitary_tensor) - identity(1)
|
107
|
-
)
|
108
|
-
if len(local_density_matrices) == 1:
|
109
|
-
return local_density_matrices[0]
|
110
|
-
else:
|
111
|
-
return reduce(torch.kron, local_density_matrices)
|
118
|
+
operations = nested_operator_indexing(unitary_ids)
|
119
|
+
if n_qubits > 1:
|
120
|
+
operations = [kron_if_non_empty(ops) for ops in operations]
|
121
|
+
return operations
|
112
122
|
|
113
123
|
|
114
124
|
def classical_shadow(
|
@@ -119,25 +129,21 @@ def classical_shadow(
|
|
119
129
|
backend: Backend | DifferentiableBackend = PyQBackend(),
|
120
130
|
noise: NoiseHandler | None = None,
|
121
131
|
endianness: Endianness = Endianness.BIG,
|
122
|
-
) -> list:
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
+
) -> tuple[np.ndarray, list[Tensor]]:
|
133
|
+
unitary_ids = np.random.randint(0, 3, size=(shadow_size, circuit.n_qubits))
|
134
|
+
shadow: list = list()
|
135
|
+
all_rotations = extract_operators(unitary_ids, circuit.n_qubits)
|
136
|
+
|
137
|
+
for i in range(shadow_size):
|
138
|
+
if all_rotations[i]:
|
139
|
+
rotated_circuit = QuantumCircuit(
|
140
|
+
circuit.register, chain(circuit.block, all_rotations[i])
|
141
|
+
)
|
132
142
|
else:
|
133
|
-
|
134
|
-
rotated_circuit = QuantumCircuit(
|
135
|
-
circuit.n_qubits,
|
136
|
-
chain(circuit.block, random_unitary_block),
|
137
|
-
)
|
143
|
+
rotated_circuit = circuit
|
138
144
|
# Reverse endianness to get sample bitstrings in ILO.
|
139
145
|
conv_circ = backend.circuit(rotated_circuit)
|
140
|
-
|
146
|
+
batch_samples = backend.sample(
|
141
147
|
circuit=conv_circ,
|
142
148
|
param_values=param_values,
|
143
149
|
n_shots=1,
|
@@ -145,97 +151,61 @@ def classical_shadow(
|
|
145
151
|
noise=noise,
|
146
152
|
endianness=endianness,
|
147
153
|
)
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
def reconstruct_state(shadow: list) -> Tensor:
|
159
|
-
"""Reconstruct the state density matrix for the given shadow."""
|
160
|
-
return reduce(torch.add, shadow) / len(shadow)
|
161
|
-
|
162
|
-
|
163
|
-
def compute_traces(
|
164
|
-
qubit_support: tuple,
|
165
|
-
N: int,
|
166
|
-
K: int,
|
167
|
-
shadow: list,
|
168
|
-
observable: AbstractBlock,
|
169
|
-
endianness: Endianness = Endianness.BIG,
|
170
|
-
) -> list:
|
171
|
-
floor = int(np.floor(N / K))
|
172
|
-
traces = []
|
173
|
-
# TODO: Parallelize embarrassingly parallel loop.
|
174
|
-
for k in range(K):
|
175
|
-
reconstructed_state = reconstruct_state(shadow=shadow[k * floor : (k + 1) * floor])
|
176
|
-
# Reshape the observable matrix to fit the density matrix dimensions
|
177
|
-
# by filling indentites.
|
178
|
-
# Please note the endianness is also flipped to get results in LE.
|
179
|
-
# FIXME: Changed below from Little to Big, double-check when Roland is back
|
180
|
-
# FIXME: Correct these comments.
|
181
|
-
trace = (
|
182
|
-
(
|
183
|
-
block_to_tensor(
|
184
|
-
block=observable,
|
185
|
-
qubit_support=qubit_support,
|
186
|
-
endianness=Endianness.BIG,
|
187
|
-
).squeeze(dim=0)
|
188
|
-
@ reconstructed_state
|
189
|
-
)
|
190
|
-
.trace()
|
191
|
-
.real
|
192
|
-
)
|
193
|
-
traces.append(trace)
|
194
|
-
return traces
|
154
|
+
shadow.append(batch_samples)
|
155
|
+
bitstrings = list()
|
156
|
+
batchsize = len(batch_samples)
|
157
|
+
for b in range(batchsize):
|
158
|
+
bitstrings.append([list(batch[b].keys())[0] for batch in shadow])
|
159
|
+
bitstrings_torch = [
|
160
|
+
1 - 2 * torch.stack([torch.tensor([int(b_i) for b_i in sample]) for sample in batch])
|
161
|
+
for batch in bitstrings
|
162
|
+
]
|
163
|
+
return unitary_ids, bitstrings_torch
|
195
164
|
|
196
165
|
|
197
166
|
def estimators(
|
198
|
-
qubit_support: tuple,
|
199
167
|
N: int,
|
200
168
|
K: int,
|
201
|
-
|
169
|
+
unitary_shadow_ids: np.ndarray,
|
170
|
+
shadow_samples: Tensor,
|
202
171
|
observable: AbstractBlock,
|
203
|
-
endianness: Endianness = Endianness.BIG,
|
204
172
|
) -> Tensor:
|
205
173
|
"""
|
206
|
-
Return estimators
|
207
|
-
|
208
|
-
for K equally-sized shadow partitions.
|
174
|
+
Return trace estimators from the samples for K equally-sized shadow partitions.
|
209
175
|
|
210
176
|
See https://arxiv.org/pdf/2002.08953.pdf
|
211
177
|
Algorithm 1.
|
212
178
|
"""
|
213
|
-
|
214
|
-
|
179
|
+
|
180
|
+
obs_qubit_support = observable.qubit_support
|
215
181
|
if isinstance(observable, PrimitiveBlock):
|
216
|
-
if
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
K=K,
|
221
|
-
shadow=shadow,
|
222
|
-
observable=observable,
|
223
|
-
endianness=endianness,
|
224
|
-
)
|
225
|
-
else:
|
226
|
-
traces = [torch.tensor(0.0)]
|
182
|
+
if isinstance(observable, I):
|
183
|
+
return torch.tensor(1.0, dtype=torch.get_default_dtype())
|
184
|
+
obs_to_pauli_index = [pauli_gates.index(type(observable))]
|
185
|
+
|
227
186
|
elif isinstance(observable, CompositeBlock):
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
187
|
+
obs_to_pauli_index = [
|
188
|
+
pauli_gates.index(type(p)) for p in observable.blocks if not isinstance(p, I) # type: ignore[arg-type]
|
189
|
+
]
|
190
|
+
ind_I = set(get_qubit_indices_for_op((observable, 1.0), I(0)))
|
191
|
+
obs_qubit_support = tuple([ind for ind in observable.qubit_support if ind not in ind_I])
|
192
|
+
|
193
|
+
floor = int(np.floor(N / K))
|
194
|
+
traces = []
|
195
|
+
for k in range(K):
|
196
|
+
indices_match = np.all(
|
197
|
+
unitary_shadow_ids[k * floor : (k + 1) * floor, obs_qubit_support]
|
198
|
+
== obs_to_pauli_index,
|
199
|
+
axis=1,
|
200
|
+
)
|
201
|
+
if indices_match.sum() > 0:
|
202
|
+
trace = torch.prod(
|
203
|
+
shadow_samples[k * floor : (k + 1) * floor][indices_match][:, obs_qubit_support],
|
204
|
+
axis=-1,
|
205
|
+
).sum() / sum(indices_match)
|
206
|
+
traces.append(trace)
|
237
207
|
else:
|
238
|
-
traces
|
208
|
+
traces.append(torch.tensor(0.0))
|
239
209
|
return torch.tensor(traces, dtype=torch.get_default_dtype())
|
240
210
|
|
241
211
|
|
@@ -258,7 +228,7 @@ def estimations(
|
|
258
228
|
N, K = number_of_samples(observables=observables, accuracy=accuracy, confidence=confidence)
|
259
229
|
if shadow_size is not None:
|
260
230
|
N = shadow_size
|
261
|
-
|
231
|
+
unitaries_ids, batch_shadow_samples = classical_shadow(
|
262
232
|
shadow_size=N,
|
263
233
|
circuit=circuit,
|
264
234
|
param_values=param_values,
|
@@ -271,18 +241,17 @@ def estimations(
|
|
271
241
|
for observable in observables:
|
272
242
|
pauli_decomposition = unroll_block_with_scaling(observable)
|
273
243
|
batch_estimations = []
|
274
|
-
for batch in
|
244
|
+
for batch in batch_shadow_samples:
|
275
245
|
pauli_term_estimations = []
|
276
246
|
for pauli_term in pauli_decomposition:
|
277
247
|
# Get the estimators for the current Pauli term.
|
278
248
|
# This is a tensor<float> of size K.
|
279
249
|
estimation = estimators(
|
280
|
-
qubit_support=circuit.block.qubit_support,
|
281
250
|
N=N,
|
282
251
|
K=K,
|
283
|
-
|
252
|
+
unitary_shadow_ids=unitaries_ids,
|
253
|
+
shadow_samples=batch,
|
284
254
|
observable=pauli_term[0],
|
285
|
-
endianness=endianness,
|
286
255
|
)
|
287
256
|
# Compute the median of means for the current Pauli term.
|
288
257
|
# Weigh the median by the Pauli term scaling.
|
qadence/measurements/utils.py
CHANGED
@@ -15,7 +15,7 @@ from qadence.blocks import AbstractBlock, PrimitiveBlock, chain
|
|
15
15
|
from qadence.circuit import QuantumCircuit
|
16
16
|
from qadence.engines.differentiable_backend import DifferentiableBackend
|
17
17
|
from qadence.noise import NoiseHandler
|
18
|
-
from qadence.operations import H, SDagger, X, Y
|
18
|
+
from qadence.operations import H, I, SDagger, X, Y
|
19
19
|
from qadence.parameters import evaluate
|
20
20
|
from qadence.utils import Endianness
|
21
21
|
|
@@ -113,7 +113,7 @@ def rotate(circuit: QuantumCircuit, pauli_term: Tuple[AbstractBlock, Basic]) ->
|
|
113
113
|
|
114
114
|
rotations = []
|
115
115
|
|
116
|
-
for op, gate in [(X(0),
|
116
|
+
for op, gate in [(X(0), I), (Y(0), SDagger)]:
|
117
117
|
qubit_indices = get_qubit_indices_for_op(pauli_term, op=op)
|
118
118
|
for index in qubit_indices:
|
119
119
|
rotations.append(gate(index) * H(index))
|
qadence/mitigations/readout.py
CHANGED
@@ -10,9 +10,10 @@ from numpy.linalg import inv, matrix_rank, pinv
|
|
10
10
|
from scipy.linalg import norm
|
11
11
|
from scipy.optimize import LinearConstraint, minimize
|
12
12
|
|
13
|
+
from qadence.backends.pyqtorch.convert_ops import convert_readout_noise
|
13
14
|
from qadence.mitigations.protocols import Mitigations
|
14
15
|
from qadence.noise.protocols import NoiseHandler
|
15
|
-
from qadence.types import
|
16
|
+
from qadence.types import ReadOutOptimization
|
16
17
|
|
17
18
|
|
18
19
|
def corrected_probas(p_corr: npt.NDArray, T: npt.NDArray, p_raw: npt.NDArray) -> np.double:
|
@@ -88,13 +89,18 @@ def mitigation_minimization(
|
|
88
89
|
Returns:
|
89
90
|
Mitigated counts computed by the algorithm
|
90
91
|
"""
|
91
|
-
|
92
|
-
if protocol != NoiseProtocol.READOUT:
|
93
|
-
raise ValueError("Specify a noise source of type NoiseProtocol.READOUT.")
|
94
|
-
noise_matrices = options.get("noise_matrix", options["confusion_matrices"])
|
95
|
-
optimization_type = mitigation.options.get("optimization_type", ReadOutOptimization.MLE)
|
92
|
+
|
96
93
|
n_qubits = len(list(samples[0].keys())[0])
|
94
|
+
readout_noise = convert_readout_noise(n_qubits, noise)
|
95
|
+
if readout_noise is None:
|
96
|
+
raise ValueError("Specify a noise source of type NoiseProtocol.READOUT.")
|
97
97
|
n_shots = sum(samples[0].values())
|
98
|
+
noise_matrices = readout_noise.confusion_matrix
|
99
|
+
if noise_matrices.numel() == 0:
|
100
|
+
readout_noise.create_noise_matrix(n_shots)
|
101
|
+
noise_matrices = readout_noise.confusion_matrix
|
102
|
+
optimization_type = mitigation.options.get("optimization_type", ReadOutOptimization.MLE)
|
103
|
+
|
98
104
|
corrected_counters: list[Counter] = []
|
99
105
|
|
100
106
|
if optimization_type == ReadOutOptimization.CONSTRAINED:
|
qadence/ml_tools/__init__.py
CHANGED
@@ -1,16 +1,14 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
-
from .
|
3
|
+
from .callbacks.saveload import load_checkpoint, load_model, write_checkpoint
|
4
|
+
from .config import AnsatzConfig, FeatureMapConfig, TrainConfig
|
4
5
|
from .constructors import create_ansatz, create_fm_blocks, observable_from_config
|
5
6
|
from .data import DictDataLoader, InfiniteTensorDataset, OptimizeResult, to_dataloader
|
6
7
|
from .models import QNN
|
7
8
|
from .optimize_step import optimize_step as default_optimize_step
|
8
9
|
from .parameters import get_parameters, num_parameters, set_parameters
|
9
|
-
from .printing import print_metrics, write_tensorboard
|
10
|
-
from .saveload import load_checkpoint, load_model, write_checkpoint
|
11
10
|
from .tensors import numpy_to_tensor, promote_to, promote_to_tensor
|
12
|
-
from .
|
13
|
-
from .train_no_grad import train as train_gradient_free
|
11
|
+
from .trainer import Trainer
|
14
12
|
|
15
13
|
# Modules to be automatically added to the qadence namespace
|
16
14
|
__all__ = [
|
@@ -24,8 +22,6 @@ __all__ = [
|
|
24
22
|
"QNN",
|
25
23
|
"TrainConfig",
|
26
24
|
"OptimizeResult",
|
27
|
-
"
|
28
|
-
"train_with_grad",
|
29
|
-
"train_gradient_free",
|
25
|
+
"Trainer",
|
30
26
|
"write_checkpoint",
|
31
27
|
]
|
@@ -0,0 +1,30 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from .callback import (
|
4
|
+
Callback,
|
5
|
+
LoadCheckpoint,
|
6
|
+
LogHyperparameters,
|
7
|
+
LogModelTracker,
|
8
|
+
PlotMetrics,
|
9
|
+
PrintMetrics,
|
10
|
+
SaveBestCheckpoint,
|
11
|
+
SaveCheckpoint,
|
12
|
+
WriteMetrics,
|
13
|
+
)
|
14
|
+
from .callbackmanager import CallbacksManager
|
15
|
+
from .writer_registry import get_writer
|
16
|
+
|
17
|
+
# Modules to be automatically added to the qadence.ml_tools.callbacks namespace
|
18
|
+
__all__ = [
|
19
|
+
"CallbacksManager",
|
20
|
+
"Callback",
|
21
|
+
"LoadCheckpoint",
|
22
|
+
"LogHyperparameters",
|
23
|
+
"LogModelTracker",
|
24
|
+
"PlotMetrics",
|
25
|
+
"PrintMetrics",
|
26
|
+
"SaveBestCheckpoint",
|
27
|
+
"SaveCheckpoint",
|
28
|
+
"WriteMetrics",
|
29
|
+
"get_writer",
|
30
|
+
]
|