iqm-benchmarks 2.4__py3-none-any.whl → 2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of iqm-benchmarks might be problematic. Click here for more details.
- iqm/benchmarks/__init__.py +1 -0
- iqm/benchmarks/compressive_gst/__init__.py +1 -1
- iqm/benchmarks/compressive_gst/compressive_gst.py +45 -32
- iqm/benchmarks/compressive_gst/gst_analysis.py +26 -22
- iqm/benchmarks/entanglement/ghz.py +32 -9
- iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py +25 -25
- {iqm_benchmarks-2.4.dist-info → iqm_benchmarks-2.6.dist-info}/METADATA +14 -1
- {iqm_benchmarks-2.4.dist-info → iqm_benchmarks-2.6.dist-info}/RECORD +14 -14
- {iqm_benchmarks-2.4.dist-info → iqm_benchmarks-2.6.dist-info}/WHEEL +1 -1
- mGST/algorithm.py +27 -23
- mGST/low_level_jit.py +15 -25
- mGST/reporting/reporting.py +9 -6
- {iqm_benchmarks-2.4.dist-info → iqm_benchmarks-2.6.dist-info}/LICENSE +0 -0
- {iqm_benchmarks-2.4.dist-info → iqm_benchmarks-2.6.dist-info}/top_level.txt +0 -0
iqm/benchmarks/__init__.py
CHANGED
|
@@ -26,6 +26,7 @@ from .benchmark_definition import (
|
|
|
26
26
|
BenchmarkRunResult,
|
|
27
27
|
)
|
|
28
28
|
from .circuit_containers import BenchmarkCircuit, CircuitGroup, Circuits
|
|
29
|
+
from .compressive_gst.compressive_gst import CompressiveGST, GSTConfiguration
|
|
29
30
|
from .entanglement.ghz import GHZBenchmark, GHZConfiguration
|
|
30
31
|
from .quantum_volume.clops import CLOPSBenchmark, CLOPSConfiguration
|
|
31
32
|
from .quantum_volume.quantum_volume import QuantumVolumeBenchmark, QuantumVolumeConfiguration
|
|
@@ -34,7 +34,7 @@ from qiskit.circuit.library import CZGate, RGate
|
|
|
34
34
|
import xarray as xr
|
|
35
35
|
|
|
36
36
|
from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
|
|
37
|
-
from iqm.benchmarks.benchmark_definition import Benchmark, add_counts_to_dataset
|
|
37
|
+
from iqm.benchmarks.benchmark_definition import Benchmark, BenchmarkObservationIdentifier, add_counts_to_dataset
|
|
38
38
|
from iqm.benchmarks.circuit_containers import BenchmarkCircuit, CircuitGroup, Circuits
|
|
39
39
|
from iqm.benchmarks.compressive_gst.gst_analysis import mgst_analysis
|
|
40
40
|
from iqm.benchmarks.logging_config import qcvv_logger
|
|
@@ -72,7 +72,9 @@ class CompressiveGST(Benchmark):
|
|
|
72
72
|
self.pdim = 2**self.num_qubits
|
|
73
73
|
self.num_povm = self.pdim
|
|
74
74
|
|
|
75
|
-
self.gate_set, self.gate_labels, self.num_gates = parse_gate_set(
|
|
75
|
+
self.gate_set, self.gate_labels, self.num_gates = parse_gate_set(
|
|
76
|
+
configuration, self.num_qubits, self.qubit_layouts
|
|
77
|
+
)
|
|
76
78
|
|
|
77
79
|
if configuration.opt_method not in ["GD", "SFN", "auto"]:
|
|
78
80
|
raise ValueError("Invalid optimization method, valid options are: GD, SFN, auto")
|
|
@@ -96,10 +98,8 @@ class CompressiveGST(Benchmark):
|
|
|
96
98
|
else:
|
|
97
99
|
self.batch_size = configuration.batch_size
|
|
98
100
|
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
np.empty((self.configuration.num_circuits, self.num_povm)),
|
|
102
|
-
) # format used by mGST
|
|
101
|
+
# Circuit format used by mGST
|
|
102
|
+
self.J = np.empty((self.configuration.num_circuits, self.num_povm))
|
|
103
103
|
self.bootstrap_results = List[Tuple[np.ndarray]] # List of GST outcomes from bootstrapping
|
|
104
104
|
|
|
105
105
|
@staticmethod
|
|
@@ -157,10 +157,8 @@ class CompressiveGST(Benchmark):
|
|
|
157
157
|
drop_final_rz=False,
|
|
158
158
|
)
|
|
159
159
|
# Saving raw and transpiled circuits in a consistent format with other benchmarks
|
|
160
|
-
self.transpiled_circuits.circuit_groups.append(CircuitGroup(name=str(qubits), circuits=
|
|
161
|
-
self.untranspiled_circuits.circuit_groups.append(
|
|
162
|
-
CircuitGroup(name=str(qubits), circuits=transpiled_qc_list)
|
|
163
|
-
)
|
|
160
|
+
self.transpiled_circuits.circuit_groups.append(CircuitGroup(name=str(qubits), circuits=transpiled_qc_list))
|
|
161
|
+
self.untranspiled_circuits.circuit_groups.append(CircuitGroup(name=str(qubits), circuits=raw_qc_list))
|
|
164
162
|
|
|
165
163
|
def add_configuration_to_dataset(self, dataset): # CHECK
|
|
166
164
|
"""
|
|
@@ -287,6 +285,7 @@ class GSTConfiguration(BenchmarkConfigurationBase):
|
|
|
287
285
|
convergence_criteria: Union[str, List[float]] = [4, 1e-4]
|
|
288
286
|
batch_size: Union[str, int] = "auto"
|
|
289
287
|
bootstrap_samples: int = 0
|
|
288
|
+
testing: bool = False
|
|
290
289
|
|
|
291
290
|
|
|
292
291
|
def parse_layouts(qubit_layouts: Union[List[int], List[List[int]]]) -> List[List[int]]:
|
|
@@ -297,7 +296,7 @@ def parse_layouts(qubit_layouts: Union[List[int], List[List[int]]]) -> List[List
|
|
|
297
296
|
The qubit_layouts on the backend where the gates are defined on
|
|
298
297
|
|
|
299
298
|
Returns:
|
|
300
|
-
qubit_layouts: List[List[
|
|
299
|
+
qubit_layouts: List[List[int]]
|
|
301
300
|
A properly typed qubit_layout if no Error was raised
|
|
302
301
|
"""
|
|
303
302
|
if all(isinstance(qubits, int) for qubits in qubit_layouts):
|
|
@@ -316,7 +315,7 @@ def parse_layouts(qubit_layouts: Union[List[int], List[List[int]]]) -> List[List
|
|
|
316
315
|
|
|
317
316
|
|
|
318
317
|
def parse_gate_set(
|
|
319
|
-
configuration: GSTConfiguration, num_qubits
|
|
318
|
+
configuration: GSTConfiguration, num_qubits: int, qubit_layouts: List[List[int]]
|
|
320
319
|
) -> Tuple[List[QuantumCircuit], Dict[str, Dict[int, str]], int]:
|
|
321
320
|
"""
|
|
322
321
|
Handles different gate set inputs and produces a valid gate set
|
|
@@ -326,12 +325,14 @@ def parse_gate_set(
|
|
|
326
325
|
Configuration class containing variables
|
|
327
326
|
num_qubits: int
|
|
328
327
|
The number of qubits on which the gate set is defined
|
|
328
|
+
qubit_layouts: List[List[int]]
|
|
329
|
+
A List of physical qubit layouts, as specified by integer labels, where the benchmark is meant to be run.
|
|
329
330
|
|
|
330
331
|
Returns:
|
|
331
332
|
gate_set: List[QuantumCircuit]
|
|
332
333
|
A list of gates defined as quantum circuit objects
|
|
333
|
-
|
|
334
|
-
|
|
334
|
+
gate_labels_dict: Dict[str, Dict[int, str]]
|
|
335
|
+
The names of gates, i.e. "Rx(pi/2)" for a pi/2 rotation around the x-axis.
|
|
335
336
|
num_gates: int
|
|
336
337
|
The number of gates in the gate set
|
|
337
338
|
|
|
@@ -347,23 +348,27 @@ def parse_gate_set(
|
|
|
347
348
|
"1QXYI, 2QXYCZ, 2QXYCZ_extended, 3QXYCZ."
|
|
348
349
|
)
|
|
349
350
|
if configuration.gate_set in ["1QXYI", "2QXYCZ", "2QXYCZ_extended", "3QXYCZ"]:
|
|
350
|
-
gate_set,
|
|
351
|
-
|
|
351
|
+
gate_set, gate_label_dict, num_gates = create_predefined_gate_set(
|
|
352
|
+
configuration.gate_set, num_qubits, qubit_layouts
|
|
353
|
+
)
|
|
354
|
+
return gate_set, gate_label_dict, num_gates
|
|
352
355
|
|
|
353
356
|
if isinstance(configuration.gate_set, list):
|
|
357
|
+
gate_label_dict = {}
|
|
354
358
|
gate_set = configuration.gate_set
|
|
355
359
|
num_gates = len(gate_set)
|
|
356
360
|
if configuration.gate_labels is None:
|
|
357
361
|
gate_labels = {i: f"Gate %i" % i for i in range(num_gates)}
|
|
358
362
|
else:
|
|
359
|
-
if configuration.gate_labels:
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
363
|
+
if len(configuration.gate_labels) != num_gates:
|
|
364
|
+
raise ValueError(
|
|
365
|
+
f"The number of gate labels (%i) does not match the number of gates (%i)"
|
|
366
|
+
% (len(configuration.gate_labels), num_gates)
|
|
367
|
+
)
|
|
368
|
+
gate_labels = dict(enumerate(configuration.gate_labels))
|
|
369
|
+
for qubit_layout in qubit_layouts:
|
|
370
|
+
gate_label_dict.update({BenchmarkObservationIdentifier(qubit_layout).string_identifier: gate_labels})
|
|
371
|
+
return gate_set, gate_label_dict, num_gates
|
|
367
372
|
|
|
368
373
|
raise ValueError(
|
|
369
374
|
f"Invalid gate set, choose among 1QXYI, 2QXYCZ, 2QXYCZ_extended,"
|
|
@@ -371,7 +376,9 @@ def parse_gate_set(
|
|
|
371
376
|
)
|
|
372
377
|
|
|
373
378
|
|
|
374
|
-
def create_predefined_gate_set(
|
|
379
|
+
def create_predefined_gate_set(
|
|
380
|
+
gate_set: Union[str, List[Any]], num_qubits: int, qubit_layouts: List[List[int]]
|
|
381
|
+
) -> Tuple[List[QuantumCircuit], Dict[str, Dict[int, str]], int]:
|
|
375
382
|
"""Create a list of quantum circuits corresponding to a predefined gate set.
|
|
376
383
|
|
|
377
384
|
The circuits are assigned to the specified qubit_layouts on the backend only during transipilation, so the qubit labels
|
|
@@ -383,7 +390,7 @@ def create_predefined_gate_set(gate_set, num_qubits) -> Tuple[List[QuantumCircui
|
|
|
383
390
|
Returns:
|
|
384
391
|
gates: List[QuantumCircuit]
|
|
385
392
|
The gate set as a list of circuits
|
|
386
|
-
gate_labels_dict: Dict[int, str]
|
|
393
|
+
gate_labels_dict: Dict[str, Dict[int, str]]
|
|
387
394
|
The names of gates, i.e. "Rx(pi/2)" for a pi/2 rotation around the x-axis.
|
|
388
395
|
num_gates: int
|
|
389
396
|
The number of gates in the gate set
|
|
@@ -428,10 +435,10 @@ def create_predefined_gate_set(gate_set, num_qubits) -> Tuple[List[QuantumCircui
|
|
|
428
435
|
"Rx(pi/2)",
|
|
429
436
|
"Ry(pi/2)",
|
|
430
437
|
"Ry(pi/2)",
|
|
431
|
-
"Rx(pi/2)
|
|
432
|
-
"Rx(pi/2)
|
|
433
|
-
"Ry(pi/2)
|
|
434
|
-
"Ry(pi/2)
|
|
438
|
+
"Rx(pi/2)-Rx(pi/2)",
|
|
439
|
+
"Rx(pi/2)-Ry(pi/2)",
|
|
440
|
+
"Ry(pi/2)-Rx(pi/2)",
|
|
441
|
+
"Ry(pi/2)-Ry(pi/2)",
|
|
435
442
|
"CZ",
|
|
436
443
|
]
|
|
437
444
|
elif gate_set == "3QXYCZ":
|
|
@@ -459,6 +466,12 @@ def create_predefined_gate_set(gate_set, num_qubits) -> Tuple[List[QuantumCircui
|
|
|
459
466
|
gates = add_idle_gates(gates, unmapped_qubits, gate_qubits)
|
|
460
467
|
gates = [remove_idle_wires(qc) for qc in gates]
|
|
461
468
|
|
|
462
|
-
gate_label_dict =
|
|
463
|
-
|
|
469
|
+
gate_label_dict = {}
|
|
470
|
+
for qubit_layout in qubit_layouts:
|
|
471
|
+
layout_label_dict = dict(enumerate(gate_labels))
|
|
472
|
+
iqm_qubits = [f"QB{q + 1}" for q in qubit_layout]
|
|
473
|
+
gate_qubits_iqm = [(iqm_qubits[q] for q in qubits) for qubits in gate_qubits]
|
|
474
|
+
for key, value in layout_label_dict.items():
|
|
475
|
+
layout_label_dict[key] = value + ":" + "-".join(gate_qubits_iqm[key])
|
|
476
|
+
gate_label_dict.update({BenchmarkObservationIdentifier(qubit_layout).string_identifier: layout_label_dict})
|
|
464
477
|
return gates, gate_label_dict, len(gates)
|
|
@@ -85,6 +85,7 @@ def bootstrap_errors(
|
|
|
85
85
|
E: ndarray,
|
|
86
86
|
rho: ndarray,
|
|
87
87
|
target_mdl: Model,
|
|
88
|
+
identifier: str,
|
|
88
89
|
parametric: bool = False,
|
|
89
90
|
) -> tuple[Any, Any, Any, Any, Any]:
|
|
90
91
|
"""Resamples circuit outcomes a number of times and computes GST estimates for each repetition
|
|
@@ -113,6 +114,8 @@ def bootstrap_errors(
|
|
|
113
114
|
Current initial state estimate
|
|
114
115
|
target_mdl : pygsti model object
|
|
115
116
|
The target gate set
|
|
117
|
+
identifier : str
|
|
118
|
+
The string identifier of the current benchmark
|
|
116
119
|
parametric : bool
|
|
117
120
|
If set to True, parametric bootstrapping is used, else non-parametric bootstrapping. Default: False
|
|
118
121
|
|
|
@@ -175,7 +178,7 @@ def bootstrap_errors(
|
|
|
175
178
|
dataset.attrs["J"],
|
|
176
179
|
y_sampled,
|
|
177
180
|
target_mdl,
|
|
178
|
-
dataset.attrs["gate_labels"],
|
|
181
|
+
dataset.attrs["gate_labels"][identifier],
|
|
179
182
|
)
|
|
180
183
|
df_g_list.append(df_g.values)
|
|
181
184
|
df_o_list.append(df_o.values)
|
|
@@ -291,7 +294,7 @@ def generate_unit_rank_gate_results(
|
|
|
291
294
|
reporting.number_to_str(
|
|
292
295
|
df_g.values[i, 0], [percentiles_g_high[i, 0], percentiles_g_low[i, 0]], precision=5
|
|
293
296
|
)
|
|
294
|
-
for i in range(len(dataset.attrs["gate_labels"]))
|
|
297
|
+
for i in range(len(dataset.attrs["gate_labels"][identifier]))
|
|
295
298
|
],
|
|
296
299
|
r"Diamond distance": [
|
|
297
300
|
reporting.number_to_str(
|
|
@@ -330,7 +333,7 @@ def generate_unit_rank_gate_results(
|
|
|
330
333
|
)
|
|
331
334
|
|
|
332
335
|
df_g_rotation.columns = [f"h_%s" % label for label in pauli_labels]
|
|
333
|
-
df_g_rotation.rename(index=dataset.attrs["gate_labels"], inplace=True)
|
|
336
|
+
df_g_rotation.rename(index=dataset.attrs["gate_labels"][identifier], inplace=True)
|
|
334
337
|
|
|
335
338
|
else:
|
|
336
339
|
df_g_final = DataFrame(
|
|
@@ -358,10 +361,11 @@ def generate_unit_rank_gate_results(
|
|
|
358
361
|
).T
|
|
359
362
|
)
|
|
360
363
|
df_g_rotation.columns = [f"h_%s" % label for label in pauli_labels]
|
|
361
|
-
|
|
364
|
+
df_g_rotation.rename(index=dataset.attrs["gate_labels"][identifier], inplace=True)
|
|
365
|
+
df_g_final.rename(index=dataset.attrs["gate_labels"][identifier], inplace=True)
|
|
362
366
|
|
|
363
|
-
fig_g = dataframe_to_figure(df_g_final, dataset.attrs["gate_labels"])
|
|
364
|
-
fig_rotation = dataframe_to_figure(df_g_rotation, dataset.attrs["gate_labels"])
|
|
367
|
+
fig_g = dataframe_to_figure(df_g_final, dataset.attrs["gate_labels"][identifier])
|
|
368
|
+
fig_rotation = dataframe_to_figure(df_g_rotation, dataset.attrs["gate_labels"][identifier])
|
|
365
369
|
return df_g_final, df_g_rotation, fig_g, fig_rotation
|
|
366
370
|
|
|
367
371
|
|
|
@@ -403,7 +407,7 @@ def generate_gate_results(
|
|
|
403
407
|
identifier = BenchmarkObservationIdentifier(qubit_layout).string_identifier
|
|
404
408
|
n_evals = np.min([max_evals, dataset.attrs["pdim"] ** 2])
|
|
405
409
|
X_opt_pp, _, _ = compatibility.std2pp(X_opt, E_opt, rho_opt)
|
|
406
|
-
df_g_evals = reporting.generate_Choi_EV_table(X_opt, n_evals, dataset.attrs["gate_labels"])
|
|
410
|
+
df_g_evals = reporting.generate_Choi_EV_table(X_opt, n_evals, dataset.attrs["gate_labels"][identifier])
|
|
407
411
|
|
|
408
412
|
if dataset.attrs["bootstrap_samples"] > 0:
|
|
409
413
|
X_array, E_array, rho_array, df_g_array, _ = dataset.attrs["results_layout_" + identifier]["bootstrap_data"]
|
|
@@ -419,7 +423,7 @@ def generate_gate_results(
|
|
|
419
423
|
]
|
|
420
424
|
bootstrap_evals = np.array(
|
|
421
425
|
[
|
|
422
|
-
reporting.generate_Choi_EV_table(X_array_std[i], n_evals, dataset.attrs["gate_labels"])
|
|
426
|
+
reporting.generate_Choi_EV_table(X_array_std[i], n_evals, dataset.attrs["gate_labels"][identifier])
|
|
423
427
|
for i in range(dataset.attrs["bootstrap_samples"])
|
|
424
428
|
]
|
|
425
429
|
)
|
|
@@ -466,15 +470,15 @@ def generate_gate_results(
|
|
|
466
470
|
{
|
|
467
471
|
"Avg. gate fidelity": [
|
|
468
472
|
reporting.number_to_str(df_g.values[i, 0].copy(), precision=5)
|
|
469
|
-
for i in range(len(dataset.attrs["gate_labels"]))
|
|
473
|
+
for i in range(len(dataset.attrs["gate_labels"][identifier]))
|
|
470
474
|
],
|
|
471
475
|
"Diamond distance": [
|
|
472
476
|
reporting.number_to_str(df_g.values[i, 1].copy(), precision=5)
|
|
473
|
-
for i in range(len(dataset.attrs["gate_labels"]))
|
|
477
|
+
for i in range(len(dataset.attrs["gate_labels"][identifier]))
|
|
474
478
|
],
|
|
475
479
|
"Unitarity": [
|
|
476
480
|
reporting.number_to_str(reporting.unitarities(X_opt_pp)[i], precision=5)
|
|
477
|
-
for i in range(len(dataset.attrs["gate_labels"]))
|
|
481
|
+
for i in range(len(dataset.attrs["gate_labels"][identifier]))
|
|
478
482
|
],
|
|
479
483
|
# "Entanglemen fidelity to depol. channel": [reporting.number_to_str(reporting.eff_depol_params(X_opt_pp)[i], precision=5)
|
|
480
484
|
# for i in range(len(gate_labels))],
|
|
@@ -490,10 +494,10 @@ def generate_gate_results(
|
|
|
490
494
|
]
|
|
491
495
|
|
|
492
496
|
df_g_evals_final = DataFrame(eval_strs).T
|
|
493
|
-
df_g_evals_final.rename(index=dataset.attrs["gate_labels"], inplace=True)
|
|
497
|
+
df_g_evals_final.rename(index=dataset.attrs["gate_labels"][identifier], inplace=True)
|
|
494
498
|
|
|
495
|
-
fig_g = dataframe_to_figure(df_g_final, dataset.attrs["gate_labels"])
|
|
496
|
-
fig_choi = dataframe_to_figure(df_g_evals_final, dataset.attrs["gate_labels"])
|
|
499
|
+
fig_g = dataframe_to_figure(df_g_final, dataset.attrs["gate_labels"][identifier])
|
|
500
|
+
fig_choi = dataframe_to_figure(df_g_evals_final, dataset.attrs["gate_labels"][identifier])
|
|
497
501
|
return df_g_final, df_g_evals_final, fig_g, fig_choi
|
|
498
502
|
|
|
499
503
|
|
|
@@ -570,7 +574,7 @@ def pandas_results_to_observations(
|
|
|
570
574
|
"""
|
|
571
575
|
observation_list: list[BenchmarkObservation] = []
|
|
572
576
|
err = dataset.attrs["bootstrap_samples"] > 0
|
|
573
|
-
for idx, gate_label in enumerate(dataset.attrs["gate_labels"].values()):
|
|
577
|
+
for idx, gate_label in enumerate(dataset.attrs["gate_labels"][identifier.string_identifier].values()):
|
|
574
578
|
observation_list.extend(
|
|
575
579
|
[
|
|
576
580
|
BenchmarkObservation(
|
|
@@ -639,7 +643,7 @@ def dataset_counts_to_mgst_format(dataset: xr.Dataset, qubit_layout: List[int])
|
|
|
639
643
|
return y
|
|
640
644
|
|
|
641
645
|
|
|
642
|
-
def
|
|
646
|
+
def run_mGST_wrapper(
|
|
643
647
|
dataset: xr.Dataset, y: ndarray
|
|
644
648
|
) -> tuple[ndarray, ndarray, ndarray, ndarray, ndarray, ndarray, ndarray, ndarray]:
|
|
645
649
|
"""Wrapper function for mGST algorithm execution which prepares an initialization and sets the alg. parameters
|
|
@@ -718,7 +722,7 @@ def run_mGST(
|
|
|
718
722
|
threshold_multiplier=dataset.attrs["convergence_criteria"][0],
|
|
719
723
|
target_rel_prec=dataset.attrs["convergence_criteria"][1],
|
|
720
724
|
init=init_params,
|
|
721
|
-
testing=
|
|
725
|
+
testing=dataset.attrs["testing"],
|
|
722
726
|
)
|
|
723
727
|
|
|
724
728
|
return K, X, E, rho, K_target, X_target, E_target, rho_target
|
|
@@ -745,7 +749,7 @@ def mgst_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
|
745
749
|
|
|
746
750
|
# Main GST reconstruction
|
|
747
751
|
start_timer = perf_counter()
|
|
748
|
-
K, X, E, rho, K_target, X_target, E_target, rho_target =
|
|
752
|
+
K, X, E, rho, K_target, X_target, E_target, rho_target = run_mGST_wrapper(dataset, y)
|
|
749
753
|
main_gst_time = perf_counter() - start_timer
|
|
750
754
|
|
|
751
755
|
# Gauge optimization
|
|
@@ -756,7 +760,7 @@ def mgst_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
|
756
760
|
|
|
757
761
|
# Quick report
|
|
758
762
|
df_g, _ = reporting.quick_report(
|
|
759
|
-
X_opt, E_opt, rho_opt, dataset.attrs["J"], y, target_mdl, dataset.attrs["gate_labels"]
|
|
763
|
+
X_opt, E_opt, rho_opt, dataset.attrs["J"], y, target_mdl, dataset.attrs["gate_labels"][identifier]
|
|
760
764
|
)
|
|
761
765
|
|
|
762
766
|
# Gate set in the Pauli basis
|
|
@@ -779,11 +783,11 @@ def mgst_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
|
779
783
|
|
|
780
784
|
### Bootstrap
|
|
781
785
|
if dataset.attrs["bootstrap_samples"] > 0:
|
|
782
|
-
bootstrap_results = bootstrap_errors(dataset, y, K, X, E, rho, target_mdl)
|
|
786
|
+
bootstrap_results = bootstrap_errors(dataset, y, K, X, E, rho, target_mdl, identifier)
|
|
783
787
|
dataset.attrs["results_layout_" + identifier].update({"bootstrap_data": bootstrap_results})
|
|
784
788
|
|
|
785
789
|
_, df_o_full = reporting.report(
|
|
786
|
-
X_opt, E_opt, rho_opt, dataset.attrs["J"], y, target_mdl, dataset.attrs["gate_labels"]
|
|
790
|
+
X_opt, E_opt, rho_opt, dataset.attrs["J"], y, target_mdl, dataset.attrs["gate_labels"][identifier]
|
|
787
791
|
)
|
|
788
792
|
df_o_final, fig_o = generate_non_gate_results(dataset, qubit_layout, df_o_full)
|
|
789
793
|
|
|
@@ -820,7 +824,7 @@ def mgst_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
|
820
824
|
X_opt_pp,
|
|
821
825
|
X_target_pp,
|
|
822
826
|
basis_labels=pauli_labels,
|
|
823
|
-
gate_labels=dataset.attrs["gate_labels"],
|
|
827
|
+
gate_labels=dataset.attrs["gate_labels"][identifier],
|
|
824
828
|
return_fig=True,
|
|
825
829
|
)
|
|
826
830
|
for i, figure in enumerate(figures):
|
|
@@ -296,7 +296,7 @@ def fidelity_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
|
296
296
|
value=fidelity[1],
|
|
297
297
|
)
|
|
298
298
|
)
|
|
299
|
-
plots = {"All layout fidelities": plot_fidelities(observation_list, qubit_layouts)}
|
|
299
|
+
plots = {"All layout fidelities": plot_fidelities(observation_list, dataset, qubit_layouts)}
|
|
300
300
|
return BenchmarkAnalysisResult(dataset=dataset, observations=observation_list, plots=plots)
|
|
301
301
|
|
|
302
302
|
|
|
@@ -541,13 +541,17 @@ def get_cx_map(qubit_layout: List[int], graph: networkx.Graph) -> list[list[int]
|
|
|
541
541
|
return cx_map
|
|
542
542
|
|
|
543
543
|
|
|
544
|
-
def plot_fidelities(
|
|
544
|
+
def plot_fidelities(
|
|
545
|
+
observations: List[BenchmarkObservation], dataset: xr.Dataset, qubit_layouts: List[List[int]]
|
|
546
|
+
) -> Figure:
|
|
545
547
|
"""Plots all the fidelities stored in the observations into a single plot of fidelity vs. number of qubits
|
|
546
548
|
|
|
547
549
|
Parameters
|
|
548
550
|
----------
|
|
549
551
|
observations: List[BenchmarkObservation]
|
|
550
552
|
A list of Observations, each assumed to be a fidelity
|
|
553
|
+
dataset: xr.Dataset
|
|
554
|
+
The experiment dataset containing results and metadata
|
|
551
555
|
qubit_layouts
|
|
552
556
|
The list of qubit layouts as given by the user. This is used to name the layouts in order for identification
|
|
553
557
|
in the plot.
|
|
@@ -556,35 +560,51 @@ def plot_fidelities(observations: List[BenchmarkObservation], qubit_layouts: Lis
|
|
|
556
560
|
fig :Figure
|
|
557
561
|
The figure object with the fidelity plot.
|
|
558
562
|
"""
|
|
563
|
+
timestamp = dataset.attrs["execution_timestamp"]
|
|
564
|
+
backend_name = dataset.attrs["backend_name"]
|
|
565
|
+
|
|
559
566
|
fig, ax = plt.subplots()
|
|
560
567
|
layout_short = {str(qubit_layout): f" L{i}" for i, qubit_layout in enumerate(qubit_layouts)}
|
|
561
568
|
recorded_labels = []
|
|
569
|
+
x_positions = []
|
|
570
|
+
cmap = plt.cm.get_cmap("winter")
|
|
571
|
+
# colors = [cmap(0), cmap(1)]
|
|
562
572
|
for i, obs in enumerate(observations):
|
|
563
573
|
label = "With REM" if "rem" in obs.name else "Unmitigated"
|
|
564
574
|
if label in recorded_labels:
|
|
565
575
|
label = "_nolegend_"
|
|
566
576
|
else:
|
|
567
577
|
recorded_labels.append(label)
|
|
568
|
-
|
|
578
|
+
identifier = obs.identifier.string_identifier
|
|
579
|
+
x = len(
|
|
580
|
+
identifier.strip("[]").replace('"', "").replace(" ", "").split(",")
|
|
581
|
+
) # pylint: disable=inconsistent-quotes
|
|
569
582
|
y = obs.value
|
|
570
583
|
ax.errorbar(
|
|
571
584
|
x,
|
|
572
585
|
y,
|
|
573
586
|
yerr=obs.uncertainty,
|
|
574
587
|
capsize=4,
|
|
575
|
-
color=
|
|
588
|
+
color=cmap(0.85) if "rem" in obs.name else cmap(0.15),
|
|
576
589
|
label=label,
|
|
577
590
|
fmt="o",
|
|
578
591
|
alpha=1,
|
|
592
|
+
mec="black",
|
|
579
593
|
markersize=5,
|
|
580
594
|
)
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
ax.
|
|
595
|
+
x_positions.append(x)
|
|
596
|
+
ax.annotate(layout_short[identifier], (x, y))
|
|
597
|
+
|
|
598
|
+
ax.set_xticks(x_positions, labels=[str(x) for x in x_positions])
|
|
599
|
+
ax.grid()
|
|
600
|
+
|
|
601
|
+
ax.axhline(0.5, linestyle="--", color="red", label="GME threshold")
|
|
602
|
+
ax.set_ylim((0, 1))
|
|
603
|
+
ax.set_title(f"GHZ fidelities of all qubit layouts\nbackend: {backend_name} --- {timestamp}")
|
|
585
604
|
ax.set_xlabel("Number of qubits")
|
|
586
605
|
ax.set_ylabel("Fidelity")
|
|
587
|
-
ax.legend(framealpha=0.5)
|
|
606
|
+
ax.legend(framealpha=0.5, fontsize=8)
|
|
607
|
+
plt.gcf().set_dpi(250)
|
|
588
608
|
plt.close()
|
|
589
609
|
return fig
|
|
590
610
|
|
|
@@ -625,6 +645,7 @@ class GHZBenchmark(Benchmark):
|
|
|
625
645
|
self.mit_shots = configuration.mit_shots
|
|
626
646
|
self.cal_url = configuration.cal_url
|
|
627
647
|
self.timestamp = strftime("%Y%m%d-%H%M%S")
|
|
648
|
+
self.execution_timestamp = ""
|
|
628
649
|
|
|
629
650
|
def generate_native_ghz(self, qubit_layout: List[int], qubit_count: int, routine: str) -> CircuitGroup:
|
|
630
651
|
"""
|
|
@@ -793,12 +814,14 @@ class GHZBenchmark(Benchmark):
|
|
|
793
814
|
else:
|
|
794
815
|
dataset.attrs[key] = value
|
|
795
816
|
dataset.attrs[f"backend_name"] = self.backend.name
|
|
817
|
+
dataset.attrs[f"execution_timestamp"] = self.execution_timestamp
|
|
796
818
|
dataset.attrs["fidelity_routine"] = self.fidelity_routine
|
|
797
819
|
|
|
798
820
|
def execute(self, backend) -> xr.Dataset:
|
|
799
821
|
"""
|
|
800
822
|
Executes the benchmark.
|
|
801
823
|
"""
|
|
824
|
+
self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
802
825
|
aux_custom_qubits_array = cast(List[List[int]], self.custom_qubits_array).copy()
|
|
803
826
|
dataset = xr.Dataset()
|
|
804
827
|
|
|
@@ -10,8 +10,8 @@ import warnings
|
|
|
10
10
|
|
|
11
11
|
import numpy as np
|
|
12
12
|
from qiskit import transpile
|
|
13
|
-
from qiskit.quantum_info import random_clifford, random_pauli
|
|
14
|
-
from qiskit_aer import Aer
|
|
13
|
+
from qiskit.quantum_info import Clifford, random_clifford, random_pauli
|
|
14
|
+
from qiskit_aer import Aer, AerSimulator
|
|
15
15
|
from scipy.spatial.distance import hamming
|
|
16
16
|
import xarray as xr
|
|
17
17
|
|
|
@@ -262,9 +262,13 @@ def generate_pauli_dressed_mrb_circuits(
|
|
|
262
262
|
pauli_dressed_circuits_untranspiled: List[QuantumCircuit] = []
|
|
263
263
|
pauli_dressed_circuits_transpiled: List[QuantumCircuit] = []
|
|
264
264
|
|
|
265
|
+
sim_method = "stabilizer"
|
|
266
|
+
simulator = AerSimulator(method=sim_method)
|
|
267
|
+
|
|
265
268
|
for _ in range(pauli_samples_per_circ):
|
|
266
269
|
# Initialize the quantum circuit object
|
|
267
270
|
circ = QuantumCircuit(num_qubits)
|
|
271
|
+
circ_untransp = QuantumCircuit(num_qubits)
|
|
268
272
|
# Sample all the random Paulis
|
|
269
273
|
paulis = [random_pauli(num_qubits) for _ in range(depth + 1)]
|
|
270
274
|
|
|
@@ -282,6 +286,7 @@ def generate_pauli_dressed_mrb_circuits(
|
|
|
282
286
|
)
|
|
283
287
|
circ.barrier()
|
|
284
288
|
circ.compose(cycle_layers[k], inplace=True)
|
|
289
|
+
circ_untransp.compose(cycle_layers[k], inplace=True)
|
|
285
290
|
circ.barrier()
|
|
286
291
|
|
|
287
292
|
# Apply middle Pauli
|
|
@@ -307,9 +312,6 @@ def generate_pauli_dressed_mrb_circuits(
|
|
|
307
312
|
for i in range(num_qubits):
|
|
308
313
|
circ.compose(clifford_layer[i].to_instruction().inverse(), qubits=[i], inplace=True)
|
|
309
314
|
|
|
310
|
-
# Add measurements
|
|
311
|
-
circ.measure_all()
|
|
312
|
-
|
|
313
315
|
# Transpile to backend - no optimize SQG should be used!
|
|
314
316
|
if isinstance(backend_arg, str):
|
|
315
317
|
retrieved_backend = get_iqm_backend(backend_arg)
|
|
@@ -317,6 +319,13 @@ def generate_pauli_dressed_mrb_circuits(
|
|
|
317
319
|
assert isinstance(backend_arg, IQMBackendBase)
|
|
318
320
|
retrieved_backend = backend_arg
|
|
319
321
|
|
|
322
|
+
circ_untransp = circ.copy()
|
|
323
|
+
# Add measurements to untranspiled - after!
|
|
324
|
+
circ_untranspiled = transpile(Clifford(circ_untransp).to_circuit(), simulator)
|
|
325
|
+
circ_untranspiled.measure_all()
|
|
326
|
+
|
|
327
|
+
# Add measurements to transpiled - before!
|
|
328
|
+
circ.measure_all()
|
|
320
329
|
circ_transpiled = transpile(
|
|
321
330
|
circ,
|
|
322
331
|
backend=retrieved_backend,
|
|
@@ -325,7 +334,7 @@ def generate_pauli_dressed_mrb_circuits(
|
|
|
325
334
|
routing_method=routing_method,
|
|
326
335
|
)
|
|
327
336
|
|
|
328
|
-
pauli_dressed_circuits_untranspiled.append(
|
|
337
|
+
pauli_dressed_circuits_untranspiled.append(circ_untranspiled)
|
|
329
338
|
pauli_dressed_circuits_transpiled.append(circ_transpiled)
|
|
330
339
|
|
|
331
340
|
# Store the circuit
|
|
@@ -418,14 +427,14 @@ def mrb_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
|
418
427
|
observations = {}
|
|
419
428
|
dataset = run.dataset.copy(deep=True)
|
|
420
429
|
|
|
421
|
-
shots = dataset.attrs["shots"]
|
|
430
|
+
# shots = dataset.attrs["shots"]
|
|
422
431
|
num_circuit_samples = dataset.attrs["num_circuit_samples"]
|
|
423
432
|
num_pauli_samples = dataset.attrs["num_pauli_samples"]
|
|
424
433
|
|
|
425
434
|
density_2q_gates = dataset.attrs["density_2q_gates"]
|
|
426
435
|
two_qubit_gate_ensemble = dataset.attrs["two_qubit_gate_ensemble"]
|
|
427
436
|
|
|
428
|
-
max_gates_per_batch = dataset.attrs["max_gates_per_batch"]
|
|
437
|
+
# max_gates_per_batch = dataset.attrs["max_gates_per_batch"]
|
|
429
438
|
|
|
430
439
|
# Analyze the results for each qubit layout of the experiment dataset
|
|
431
440
|
qubits_array = dataset.attrs["qubits_array"]
|
|
@@ -445,8 +454,9 @@ def mrb_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
|
445
454
|
else:
|
|
446
455
|
assigned_mrb_depths = {str(qubits_array[i]): [2 * m for m in depths_array[i]] for i in range(len(depths_array))}
|
|
447
456
|
|
|
448
|
-
|
|
449
|
-
|
|
457
|
+
mrb_sim_circuits = run.circuits["untranspiled_circuits"]
|
|
458
|
+
sim_method = "stabilizer"
|
|
459
|
+
simulator = AerSimulator(method=sim_method) # Aer.get_backend("stabilizer")
|
|
450
460
|
|
|
451
461
|
all_noisy_counts: Dict[str, Dict[int, List[Dict[str, int]]]] = {}
|
|
452
462
|
all_noiseless_counts: Dict[str, Dict[int, List[Dict[str, int]]]] = {}
|
|
@@ -465,22 +475,12 @@ def mrb_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
|
465
475
|
all_noisy_counts[str(qubits)][depth] = xrvariable_to_counts(
|
|
466
476
|
dataset, identifier, num_circuit_samples * num_pauli_samples
|
|
467
477
|
)
|
|
468
|
-
|
|
469
478
|
qcvv_logger.info(f"Depth {depth}")
|
|
470
|
-
# Execute the quantum circuits on the simulated, ideal backend
|
|
471
|
-
# pylint: disable=unbalanced-tuple-unpacking
|
|
472
|
-
all_noiseless_jobs, _ = submit_execute(
|
|
473
|
-
{tuple(qubits): transpiled_circuits[f"{str(qubits)}_depth_{str(depth)}"].circuits},
|
|
474
|
-
simulator,
|
|
475
|
-
shots,
|
|
476
|
-
calset_id=None,
|
|
477
|
-
max_gates_per_batch=max_gates_per_batch,
|
|
478
|
-
)
|
|
479
479
|
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
)
|
|
480
|
+
mrb_circs = mrb_sim_circuits[f"{str(qubits)}_depth_{str(depth)}"].circuits
|
|
481
|
+
|
|
482
|
+
qcvv_logger.info("Getting simulation counts")
|
|
483
|
+
all_noiseless_counts[str(qubits)][depth] = simulator.run(mrb_circs).result().get_counts()
|
|
484
484
|
|
|
485
485
|
# Compute polarizations for the current depth
|
|
486
486
|
polarizations[depth] = compute_polarizations(
|
|
@@ -695,7 +695,7 @@ class MirrorRandomizedBenchmarking(Benchmark):
|
|
|
695
695
|
mrb_untranspiled_circuits_lists: Dict[int, List[QuantumCircuit]] = {}
|
|
696
696
|
time_circuit_generation[str(qubits)] = 0
|
|
697
697
|
for depth in assigned_mrb_depths[str(qubits)]:
|
|
698
|
-
qcvv_logger.info(f"Depth {depth}")
|
|
698
|
+
qcvv_logger.info(f"Depth {depth} - Generating all circuits")
|
|
699
699
|
mrb_circuits[depth], elapsed_time = generate_fixed_depth_mrb_circuits(
|
|
700
700
|
qubits,
|
|
701
701
|
self.num_circuit_samples,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: iqm-benchmarks
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.6
|
|
4
4
|
Summary: A package for implementation of Quantum Characterization, Verification and Validation (QCVV) techniques on IQM's hardware at gate level abstraction
|
|
5
5
|
Author-email: IQM Finland Oy <developers@meetiqm.com>, Aniket Rath <aniket.rath@meetiqm.com>, Jami Rönkkö <jami@meetiqm.com>, Pedro Figueroa Romero <pedro.romero@meetiqm.com>, Vicente Pina Canelles <vicente.pina@meetiqm.com>, Raphael Brieger <raphael.brieger@meetiqm.com>, Stefan Seegerer <stefan.seegerer@meetiqm.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/iqm-finland/iqm-benchmarks
|
|
@@ -95,6 +95,19 @@ If you have already installed `iqm-benchmarks` and want to get the latest releas
|
|
|
95
95
|
pip install iqm-benchmarks --upgrade
|
|
96
96
|
```
|
|
97
97
|
|
|
98
|
+
## Optional dependencies
|
|
99
|
+
|
|
100
|
+
Optional dependencies like compressive gate set tomography and jupyter notebooks can be installed as follows:
|
|
101
|
+
```bash
|
|
102
|
+
pip install "iqm-benchmarks[mgst, examples]"
|
|
103
|
+
```
|
|
104
|
+
Current optional dependencies are:
|
|
105
|
+
* `examples`: Jupyter notebooks
|
|
106
|
+
* `mgst`: Compressive gate set tomography
|
|
107
|
+
* `develop`: Development tools
|
|
108
|
+
* `test`: Code testing and Linting
|
|
109
|
+
* `docs`: Documentation building
|
|
110
|
+
|
|
98
111
|
## Development mode _(latest changes: recommended)_
|
|
99
112
|
|
|
100
113
|
To install in development mode with all required dependencies, you can instead clone the [repository](https://www.github.com/iqm-finland/iqm-benchmarks) and from the project directory run
|
|
@@ -1,15 +1,15 @@
|
|
|
1
|
-
iqm/benchmarks/__init__.py,sha256=
|
|
1
|
+
iqm/benchmarks/__init__.py,sha256=sFDsukGeEKJfTO3kaBli0bz8gSewemon35yHAx7bOQY,2279
|
|
2
2
|
iqm/benchmarks/benchmark.py,sha256=SGhBcSxLPUu-cVXAjG4Db2TRobFCRBYoE1NtTDK1lJg,4432
|
|
3
3
|
iqm/benchmarks/benchmark_definition.py,sha256=AZkvANrf0_0glbq_P_uo_YqbBU9IZa2gJlMVz6qT6VU,10500
|
|
4
4
|
iqm/benchmarks/circuit_containers.py,sha256=anEtZEsodYqOX-34oZRmuKGeEpp_VfgG5045Mz4-4hI,7562
|
|
5
5
|
iqm/benchmarks/logging_config.py,sha256=U7olP5Kr75AcLJqNODf9VBhJLVqIvA4AYR6J39D5rww,1052
|
|
6
6
|
iqm/benchmarks/readout_mitigation.py,sha256=7FlbSH-RJTtQuRYLChwkQV_vBv0ZfMQTH519cAbyxQ4,12252
|
|
7
7
|
iqm/benchmarks/utils.py,sha256=BNbPeNNiFfE72Y-coVjLK3O7y7-j1Ag-l_qyBQsxXRY,20278
|
|
8
|
-
iqm/benchmarks/compressive_gst/__init__.py,sha256=
|
|
9
|
-
iqm/benchmarks/compressive_gst/compressive_gst.py,sha256=
|
|
10
|
-
iqm/benchmarks/compressive_gst/gst_analysis.py,sha256=
|
|
8
|
+
iqm/benchmarks/compressive_gst/__init__.py,sha256=LneifgYXtcwo2jcXo7GdUEHL6_peipukShhkrdaTRCA,929
|
|
9
|
+
iqm/benchmarks/compressive_gst/compressive_gst.py,sha256=LyhuKCq7UvmkcDhgzRm1QuK95eepJlerxqNXuj7llxc,22146
|
|
10
|
+
iqm/benchmarks/compressive_gst/gst_analysis.py,sha256=wMsomKcD5bUhfzAsi9NGw5YMkkcZ-pOFYsZvuCyGHRM,35282
|
|
11
11
|
iqm/benchmarks/entanglement/__init__.py,sha256=9T7prOwqMmFWdb4t6ETAHZXKK5o6FvU2DvVb6WhNi-U,682
|
|
12
|
-
iqm/benchmarks/entanglement/ghz.py,sha256=
|
|
12
|
+
iqm/benchmarks/entanglement/ghz.py,sha256=e97DMjH-uAuoO7cqoDS_6k7yDr-DjU9soWL2GyTgp8U,40257
|
|
13
13
|
iqm/benchmarks/optimization/__init__.py,sha256=_ajW_OibYLCtzU5AUv5c2zuuVYn8ZNeZUcUUSIGt51M,747
|
|
14
14
|
iqm/benchmarks/optimization/qscore.py,sha256=6I13YbFvFq1RcX4mYTw6S4ALb4Ix9t7gjlRzZbaTARM,34038
|
|
15
15
|
iqm/benchmarks/quantum_volume/__init__.py,sha256=i-Q4SpDWELBw7frXnxm1j4wJRcxbIyrS5uEK_v06YHo,951
|
|
@@ -25,19 +25,19 @@ iqm/benchmarks/randomized_benchmarking/clifford_rb/clifford_rb.py,sha256=q8bHcrV
|
|
|
25
25
|
iqm/benchmarks/randomized_benchmarking/interleaved_rb/__init__.py,sha256=sq6MgN_hwlpkOj10vyCU4e6eKSX-oLcF2L9na6W2Gt4,681
|
|
26
26
|
iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py,sha256=PlHn1J8VPaJF5csNH8jxcifz_MdisOEPU54kU-FYoLY,26920
|
|
27
27
|
iqm/benchmarks/randomized_benchmarking/mirror_rb/__init__.py,sha256=ZekEqI_89nXzGO1vjM-b5Uwwicy59M4fYHXfA-f0MIg,674
|
|
28
|
-
iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py,sha256=
|
|
28
|
+
iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py,sha256=AmPAp80wWc_6ao4OMZBUNGX0_WlSnz9utN4436ZcjJ0,33990
|
|
29
29
|
mGST/LICENSE,sha256=TtHNq55cUcbglb7uhVudeBLUh_qPdUoAEvU0BBwFz-k,1098
|
|
30
30
|
mGST/README.md,sha256=v_5kw253csHF4-RfE-44KqFmBXIsSMRmOtN0AUPrRxE,5050
|
|
31
31
|
mGST/additional_fns.py,sha256=_SEJ10FRNM7_CroysT8hCLZTfpm6ZhEIDCY5zPTnhjo,31390
|
|
32
|
-
mGST/algorithm.py,sha256=
|
|
32
|
+
mGST/algorithm.py,sha256=ikedzOYC6M0FEaBsbtcWstl2FYQ9eW7i9P4sLw9pRps,26327
|
|
33
33
|
mGST/compatibility.py,sha256=00DsPnNfOtrQcDTvxBDs-0aMhmuXmOIIxl_Ohy-Emkg,8920
|
|
34
|
-
mGST/low_level_jit.py,sha256=
|
|
34
|
+
mGST/low_level_jit.py,sha256=czEk_GV8rlDUD4a5dJOgoTv5_83QuXAEwmSJAMKelRw,26540
|
|
35
35
|
mGST/optimization.py,sha256=YHwkzIkYvsZOPjclR-BCQWh24jeqjuXp0BB0WX5Lwow,10559
|
|
36
36
|
mGST/qiskit_interface.py,sha256=L4H-4SdhP_bjSFFvpQoF1E7EyGbIJ_CI_y4a7_YEwmU,10102
|
|
37
37
|
mGST/reporting/figure_gen.py,sha256=6Xd8vwfy09hLY1YbJY6TRevuMsQSU4MsWqemly3ZO0I,12970
|
|
38
|
-
mGST/reporting/reporting.py,sha256
|
|
39
|
-
iqm_benchmarks-2.
|
|
40
|
-
iqm_benchmarks-2.
|
|
41
|
-
iqm_benchmarks-2.
|
|
42
|
-
iqm_benchmarks-2.
|
|
43
|
-
iqm_benchmarks-2.
|
|
38
|
+
mGST/reporting/reporting.py,sha256=We1cccz9BKbITYcSlZHdmBGdjMWAa1xNZe5tKP-yh_E,26004
|
|
39
|
+
iqm_benchmarks-2.6.dist-info/LICENSE,sha256=2Ncb40-hqkTil78RPv3-YiJfKaJ8te9USJgliKqIdSY,11558
|
|
40
|
+
iqm_benchmarks-2.6.dist-info/METADATA,sha256=sKrWhTqD0WfOnCXSxidvds1IR2e74v0Gxa_E6eUMxsk,9506
|
|
41
|
+
iqm_benchmarks-2.6.dist-info/WHEEL,sha256=A3WOREP4zgxI0fKrHUG8DC8013e3dK3n7a6HDbcEIwE,91
|
|
42
|
+
iqm_benchmarks-2.6.dist-info/top_level.txt,sha256=3G23Z-1LGf-IOzTCUl6QwWqiQ3USz25Zt90Ihq192to,9
|
|
43
|
+
iqm_benchmarks-2.6.dist-info/RECORD,,
|
mGST/algorithm.py
CHANGED
|
@@ -3,7 +3,6 @@ The main algorithm and functions that perform iteration steps
|
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
5
|
from decimal import Decimal
|
|
6
|
-
import sys
|
|
7
6
|
import time
|
|
8
7
|
from warnings import warn
|
|
9
8
|
|
|
@@ -11,8 +10,10 @@ import numpy as np
|
|
|
11
10
|
import numpy.linalg as la
|
|
12
11
|
from scipy.linalg import eig, eigh
|
|
13
12
|
from scipy.optimize import minimize
|
|
14
|
-
from tqdm import
|
|
13
|
+
from tqdm import trange
|
|
14
|
+
from tqdm.contrib.logging import logging_redirect_tqdm
|
|
15
15
|
|
|
16
|
+
from iqm.benchmarks.logging_config import qcvv_logger
|
|
16
17
|
from mGST.additional_fns import batch, random_gs, transp
|
|
17
18
|
from mGST.low_level_jit import ddA_derivs, ddB_derivs, ddM, dK, dK_dMdM, objf
|
|
18
19
|
from mGST.optimization import (
|
|
@@ -683,7 +684,7 @@ def run_mGST(
|
|
|
683
684
|
)
|
|
684
685
|
|
|
685
686
|
success = False
|
|
686
|
-
|
|
687
|
+
qcvv_logger.info(f"Starting mGST optimization...")
|
|
687
688
|
|
|
688
689
|
if init:
|
|
689
690
|
K, E = (init[0], init[1])
|
|
@@ -699,35 +700,38 @@ def run_mGST(
|
|
|
699
700
|
res_list = [objf(X, E, rho, J, y)]
|
|
700
701
|
|
|
701
702
|
for i in range(max_inits):
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
703
|
+
with logging_redirect_tqdm(loggers=[qcvv_logger]):
|
|
704
|
+
for _ in trange(max_iter):
|
|
705
|
+
yb, Jb = batch(y, J, bsize)
|
|
706
|
+
K, X, E, rho, A, B = optimize(yb, Jb, d, r, rK, n_povm, method, K, rho, A, B, fixed_elements)
|
|
707
|
+
res_list.append(objf(X, E, rho, J, y))
|
|
708
|
+
if res_list[-1] < delta:
|
|
709
|
+
qcvv_logger.info(f"Batch optimization successful, improving estimate over full data....")
|
|
710
|
+
success = True
|
|
711
|
+
break
|
|
710
712
|
if testing:
|
|
711
713
|
plot_objf(res_list, delta, f"Objective function for batch optimization")
|
|
712
714
|
if success:
|
|
713
715
|
break
|
|
714
|
-
|
|
716
|
+
qcvv_logger.info(f"Run ", i, f"failed, trying new initialization...")
|
|
715
717
|
|
|
716
718
|
if not success and max_inits > 0:
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
719
|
+
qcvv_logger.info(f"Success threshold not reached, attempting optimization over full data set...")
|
|
720
|
+
with logging_redirect_tqdm(loggers=[qcvv_logger]):
|
|
721
|
+
for _ in trange(final_iter):
|
|
722
|
+
K, X, E, rho, A, B = optimize(y, J, d, r, rK, n_povm, method, K, rho, A, B, fixed_elements)
|
|
723
|
+
res_list.append(objf(X, E, rho, J, y))
|
|
724
|
+
if np.abs(res_list[-2] - res_list[-1]) < delta * target_rel_prec:
|
|
725
|
+
break
|
|
723
726
|
if testing:
|
|
724
727
|
plot_objf(res_list, delta, f"Objective function over batches and full data")
|
|
725
728
|
if success or (res_list[-1] < delta):
|
|
726
|
-
|
|
729
|
+
qcvv_logger.info(f"Convergence criterion satisfied")
|
|
727
730
|
else:
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
731
|
+
qcvv_logger.info(
|
|
732
|
+
f"Convergence criterion not satisfied,inspect results and consider increasing max_iter or using new initializations.",
|
|
733
|
+
)
|
|
734
|
+
qcvv_logger.info(
|
|
735
|
+
f"Final objective {Decimal(res_list[-1]):.2e} in time {(time.time() - t0):.2f}s",
|
|
732
736
|
)
|
|
733
737
|
return K, X, E, rho, res_list
|
mGST/low_level_jit.py
CHANGED
|
@@ -242,7 +242,7 @@ def Mp_norm_lower(X_true, E_true, rho_true, X, E, rho, J, n_povm, p):
|
|
|
242
242
|
|
|
243
243
|
@njit(cache=True)
|
|
244
244
|
def dK(X, K, E, rho, J, y, d, r, rK):
|
|
245
|
-
"""Compute the derivative of the
|
|
245
|
+
"""Compute the derivative of the objective function with respect to the Kraus tensor K.
|
|
246
246
|
|
|
247
247
|
This function calculates the derivative of the Kraus operator K, based on the
|
|
248
248
|
input matrices X, E, and rho, as well as the isometry condition.
|
|
@@ -271,7 +271,7 @@ def dK(X, K, E, rho, J, y, d, r, rK):
|
|
|
271
271
|
Returns
|
|
272
272
|
-------
|
|
273
273
|
numpy.ndarray
|
|
274
|
-
The derivative
|
|
274
|
+
The derivative objective function with respect to the Kraus tensor K,
|
|
275
275
|
reshaped to (d, rK, pdim, pdim), and scaled by 2/m/n_povm.
|
|
276
276
|
"""
|
|
277
277
|
K = K.reshape(d, rK, -1)
|
|
@@ -295,7 +295,8 @@ def dK(X, K, E, rho, J, y, d, r, rK):
|
|
|
295
295
|
|
|
296
296
|
@njit(cache=True)
|
|
297
297
|
def dK_dMdM(X, K, E, rho, J, y, d, r, rK):
|
|
298
|
-
"""Compute the derivatives of
|
|
298
|
+
"""Compute the derivatives of the objective function with respect to K and the
|
|
299
|
+
product of derivatives of the measurement map with respect to K.
|
|
299
300
|
|
|
300
301
|
This function calculates the derivatives of K, dM10, and dM11 based on the input matrices X,
|
|
301
302
|
matrix K, POVM elements E, density matrix rho, and target values y.
|
|
@@ -355,7 +356,7 @@ def dK_dMdM(X, K, E, rho, J, y, d, r, rK):
|
|
|
355
356
|
|
|
356
357
|
@njit(cache=True, parallel=False)
|
|
357
358
|
def ddM(X, K, E, rho, J, y, d, r, rK):
|
|
358
|
-
"""Compute the second derivative of the objective function with respect to
|
|
359
|
+
"""Compute the second derivative of the objective function with respect to the Kraus tensor K.
|
|
359
360
|
|
|
360
361
|
This function calculates the second derivative of the objective function for a given
|
|
361
362
|
set of input parameters.
|
|
@@ -481,7 +482,7 @@ def ddM(X, K, E, rho, J, y, d, r, rK):
|
|
|
481
482
|
|
|
482
483
|
@njit(parallel=True, cache=True)
|
|
483
484
|
def dA(X, A, B, J, y, r, pdim, n_povm):
|
|
484
|
-
"""Compute the derivative of
|
|
485
|
+
"""Compute the derivative of to the objective function with respect to the POVM tensor A
|
|
485
486
|
|
|
486
487
|
This function calculates the gradient of A for a given set of input parameters.
|
|
487
488
|
|
|
@@ -507,7 +508,7 @@ def dA(X, A, B, J, y, r, pdim, n_povm):
|
|
|
507
508
|
Returns
|
|
508
509
|
-------
|
|
509
510
|
dA : ndarray
|
|
510
|
-
Derivative of
|
|
511
|
+
Derivative of the objective function with respect to A.
|
|
511
512
|
"""
|
|
512
513
|
A = np.ascontiguousarray(A)
|
|
513
514
|
B = np.ascontiguousarray(B)
|
|
@@ -529,9 +530,7 @@ def dA(X, A, B, J, y, r, pdim, n_povm):
|
|
|
529
530
|
|
|
530
531
|
@njit(parallel=True, cache=True)
|
|
531
532
|
def dB(X, A, B, J, y, pdim):
|
|
532
|
-
"""Compute the derivative of
|
|
533
|
-
|
|
534
|
-
This function calculates the gradient of B for a given set of input parameters.
|
|
533
|
+
"""Compute the derivative of the objective function with respect to the state tensor B.
|
|
535
534
|
|
|
536
535
|
Parameters
|
|
537
536
|
----------
|
|
@@ -551,7 +550,7 @@ def dB(X, A, B, J, y, pdim):
|
|
|
551
550
|
Returns
|
|
552
551
|
-------
|
|
553
552
|
dB : ndarray
|
|
554
|
-
Derivative of
|
|
553
|
+
Derivative of the objective function with respect to the state tensor B.
|
|
555
554
|
"""
|
|
556
555
|
A = np.ascontiguousarray(A)
|
|
557
556
|
B = np.ascontiguousarray(B)
|
|
@@ -571,11 +570,7 @@ def dB(X, A, B, J, y, pdim):
|
|
|
571
570
|
|
|
572
571
|
@njit(parallel=True, cache=True)
|
|
573
572
|
def ddA_derivs(X, A, B, J, y, r, pdim, n_povm):
|
|
574
|
-
"""Calculate
|
|
575
|
-
|
|
576
|
-
This function computes the derivatives of the POVM element based on input matrices
|
|
577
|
-
A, B, and X, as well as the isometry condition. The derivatives are only dependent
|
|
578
|
-
on one POVM element, and different POVM elements are connected via the isometry condition.
|
|
573
|
+
"""Calculate all nonzero terms of the second derivatives with respect to the POVM tensor A.
|
|
579
574
|
|
|
580
575
|
Parameters
|
|
581
576
|
----------
|
|
@@ -600,9 +595,9 @@ def ddA_derivs(X, A, B, J, y, r, pdim, n_povm):
|
|
|
600
595
|
-------
|
|
601
596
|
tuple of numpy.ndarray
|
|
602
597
|
A tuple containing the computed derivatives:
|
|
603
|
-
- dA: The derivative
|
|
598
|
+
- dA: The derivative w.r.t. A
|
|
604
599
|
of shape (n_povm, pdim, pdim).
|
|
605
|
-
- dMdM: The product of the derivatives dM and dM, of shape (n_povm, r, r).
|
|
600
|
+
- dMdM: The product of the measurement map derivatives dM and dM, of shape (n_povm, r, r).
|
|
606
601
|
- dMconjdM: The product of the conjugate of dM and dM, of shape (n_povm, r, r).
|
|
607
602
|
- dconjdA: The product of the conjugate of dA, of shape (n_povm, r, r).
|
|
608
603
|
"""
|
|
@@ -613,7 +608,6 @@ def ddA_derivs(X, A, B, J, y, r, pdim, n_povm):
|
|
|
613
608
|
E[k] = (A[k].T.conj() @ A[k]).reshape(-1)
|
|
614
609
|
rho = (B @ B.T.conj()).reshape(-1)
|
|
615
610
|
dA_ = np.zeros((n_povm, pdim, pdim)).astype(np.complex128)
|
|
616
|
-
dM = np.zeros((pdim, pdim)).astype(np.complex128)
|
|
617
611
|
dMdM = np.zeros((n_povm, r, r)).astype(np.complex128)
|
|
618
612
|
dMconjdM = np.zeros((n_povm, r, r)).astype(np.complex128)
|
|
619
613
|
dconjdA = np.zeros((n_povm, r, r)).astype(np.complex128)
|
|
@@ -633,10 +627,7 @@ def ddA_derivs(X, A, B, J, y, r, pdim, n_povm):
|
|
|
633
627
|
|
|
634
628
|
@njit(parallel=True, cache=True)
|
|
635
629
|
def ddB_derivs(X, A, B, J, y, r, pdim):
|
|
636
|
-
"""Calculate
|
|
637
|
-
|
|
638
|
-
This function computes the derivatives of the isometry matrix B based on input matrices A and X,
|
|
639
|
-
as well as the isometry condition.
|
|
630
|
+
"""Calculate all nonzero terms of the second derivative with respect to the state tensor B.
|
|
640
631
|
|
|
641
632
|
Parameters
|
|
642
633
|
----------
|
|
@@ -659,11 +650,10 @@ def ddB_derivs(X, A, B, J, y, r, pdim):
|
|
|
659
650
|
-------
|
|
660
651
|
tuple of numpy.ndarray
|
|
661
652
|
A tuple containing the computed derivatives:
|
|
662
|
-
- dB: The derivative
|
|
663
|
-
of shape (pdim, pdim).
|
|
653
|
+
- dB: The derivative w.r.t. B, of shape (pdim, pdim).
|
|
664
654
|
- dMdM: The product of the derivatives dM and dM, of shape (r, r).
|
|
665
655
|
- dMconjdM: The product of the conjugate of dM and dM, of shape (r, r).
|
|
666
|
-
- dconjdB: The
|
|
656
|
+
- dconjdB: The mixed second derivative of by dB and dB*, of shape (r, r).
|
|
667
657
|
"""
|
|
668
658
|
n_povm = A.shape[0]
|
|
669
659
|
A = np.ascontiguousarray(A)
|
mGST/reporting/reporting.py
CHANGED
|
@@ -17,7 +17,7 @@ from pygsti.tools import change_basis
|
|
|
17
17
|
from pygsti.tools.optools import compute_povm_map
|
|
18
18
|
from qiskit.quantum_info import SuperOp
|
|
19
19
|
from qiskit.quantum_info.operators.measures import diamond_norm
|
|
20
|
-
from scipy.linalg import logm
|
|
20
|
+
from scipy.linalg import logm, schur
|
|
21
21
|
from scipy.optimize import linear_sum_assignment, minimize
|
|
22
22
|
|
|
23
23
|
from mGST import additional_fns, algorithm, compatibility, low_level_jit
|
|
@@ -339,17 +339,20 @@ def compute_sparsest_Pauli_Hamiltonian(U_set):
|
|
|
339
339
|
pdim = U_set.shape[1]
|
|
340
340
|
pp_vecs = []
|
|
341
341
|
|
|
342
|
-
for U in U_set:
|
|
343
|
-
|
|
342
|
+
for num, U in enumerate(U_set):
|
|
343
|
+
# Schur decomposition finds the unitary diagonalization of a unitary matrix, which is not always returned by np.linalg.eig
|
|
344
|
+
T, evecs = schur(U)
|
|
345
|
+
evals = np.diag(T)
|
|
344
346
|
Pauli_norms = []
|
|
347
|
+
log_evals = np.log(evals)
|
|
345
348
|
for i in range(2**pdim):
|
|
346
349
|
bits = low_level_jit.local_basis(i, 2, pdim)
|
|
347
|
-
evals_new = 1j *
|
|
350
|
+
evals_new = 1j * log_evals + 2 * np.pi * bits
|
|
348
351
|
H_new = evecs @ np.diag(evals_new) @ evecs.T.conj()
|
|
349
352
|
pp_vec = change_basis(H_new.reshape(-1), "std", "pp")
|
|
350
353
|
Pauli_norms.append(np.linalg.norm(pp_vec, ord=1))
|
|
351
354
|
opt_bits = low_level_jit.local_basis(np.argsort(Pauli_norms)[0], 2, pdim)
|
|
352
|
-
evals_opt = 1j *
|
|
355
|
+
evals_opt = 1j * log_evals + 2 * np.pi * opt_bits
|
|
353
356
|
H_opt = evecs @ np.diag(evals_opt) @ evecs.T.conj()
|
|
354
357
|
pp_vecs.append(change_basis(H_opt.reshape(-1), "std", "pp"))
|
|
355
358
|
pauli_coeffs = np.array(pp_vecs) / np.sqrt(pdim) / np.pi * 2
|
|
@@ -401,7 +404,7 @@ def phase_opt(X, K_t):
|
|
|
401
404
|
K_t = K_t.reshape(d, pdim, pdim)
|
|
402
405
|
K_opt = np.zeros(K.shape).astype(complex)
|
|
403
406
|
for i in range(d):
|
|
404
|
-
angle_opt = minimize(phase_err,
|
|
407
|
+
angle_opt = minimize(phase_err, 1, bounds=[[-np.pi, np.pi]], args=(K[i], K_t[i])).x
|
|
405
408
|
K_opt[i] = K[i] * np.exp(1j * angle_opt)
|
|
406
409
|
return K_opt
|
|
407
410
|
|
|
File without changes
|
|
File without changes
|