iqm-benchmarks 1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of iqm-benchmarks might be problematic. Click here for more details.
- iqm/benchmarks/__init__.py +31 -0
- iqm/benchmarks/benchmark.py +109 -0
- iqm/benchmarks/benchmark_definition.py +264 -0
- iqm/benchmarks/benchmark_experiment.py +163 -0
- iqm/benchmarks/compressive_gst/__init__.py +20 -0
- iqm/benchmarks/compressive_gst/compressive_gst.py +1029 -0
- iqm/benchmarks/entanglement/__init__.py +18 -0
- iqm/benchmarks/entanglement/ghz.py +802 -0
- iqm/benchmarks/logging_config.py +29 -0
- iqm/benchmarks/optimization/__init__.py +18 -0
- iqm/benchmarks/optimization/qscore.py +719 -0
- iqm/benchmarks/quantum_volume/__init__.py +21 -0
- iqm/benchmarks/quantum_volume/clops.py +726 -0
- iqm/benchmarks/quantum_volume/quantum_volume.py +854 -0
- iqm/benchmarks/randomized_benchmarking/__init__.py +18 -0
- iqm/benchmarks/randomized_benchmarking/clifford_1q.pkl +0 -0
- iqm/benchmarks/randomized_benchmarking/clifford_2q.pkl +0 -0
- iqm/benchmarks/randomized_benchmarking/clifford_rb/__init__.py +19 -0
- iqm/benchmarks/randomized_benchmarking/clifford_rb/clifford_rb.py +386 -0
- iqm/benchmarks/randomized_benchmarking/interleaved_rb/__init__.py +19 -0
- iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py +555 -0
- iqm/benchmarks/randomized_benchmarking/mirror_rb/__init__.py +19 -0
- iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py +810 -0
- iqm/benchmarks/randomized_benchmarking/multi_lmfit.py +86 -0
- iqm/benchmarks/randomized_benchmarking/randomized_benchmarking_common.py +892 -0
- iqm/benchmarks/readout_mitigation.py +290 -0
- iqm/benchmarks/utils.py +521 -0
- iqm_benchmarks-1.3.dist-info/LICENSE +205 -0
- iqm_benchmarks-1.3.dist-info/METADATA +190 -0
- iqm_benchmarks-1.3.dist-info/RECORD +42 -0
- iqm_benchmarks-1.3.dist-info/WHEEL +5 -0
- iqm_benchmarks-1.3.dist-info/top_level.txt +2 -0
- mGST/LICENSE +21 -0
- mGST/README.md +54 -0
- mGST/additional_fns.py +962 -0
- mGST/algorithm.py +733 -0
- mGST/compatibility.py +238 -0
- mGST/low_level_jit.py +694 -0
- mGST/optimization.py +349 -0
- mGST/qiskit_interface.py +282 -0
- mGST/reporting/figure_gen.py +334 -0
- mGST/reporting/reporting.py +710 -0
|
@@ -0,0 +1,854 @@
|
|
|
1
|
+
# Copyright 2024 IQM Benchmarks developers
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""
|
|
16
|
+
Quantum Volume benchmark
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from copy import deepcopy
|
|
20
|
+
from time import strftime
|
|
21
|
+
from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Type
|
|
22
|
+
|
|
23
|
+
from matplotlib.figure import Figure
|
|
24
|
+
import matplotlib.pyplot as plt
|
|
25
|
+
from mthree.classes import QuasiCollection
|
|
26
|
+
from mthree.utils import expval
|
|
27
|
+
import numpy as np
|
|
28
|
+
from qiskit import QuantumCircuit
|
|
29
|
+
from qiskit.circuit.library import QuantumVolume
|
|
30
|
+
from qiskit_aer import Aer
|
|
31
|
+
import xarray as xr
|
|
32
|
+
|
|
33
|
+
# import iqm.diqe.executors.dynamical_decoupling.dd_high_level as dd
|
|
34
|
+
# from iqm.diqe.executors.dynamical_decoupling.dynamical_decoupling_core import DDStrategy
|
|
35
|
+
# from iqm.diqe.mapomatic import evaluate_costs, get_calibration_fidelities, get_circuit, matching_layouts
|
|
36
|
+
from iqm.benchmarks import AnalysisResult, Benchmark, RunResult
|
|
37
|
+
from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
|
|
38
|
+
from iqm.benchmarks.benchmark_definition import add_counts_to_dataset
|
|
39
|
+
from iqm.benchmarks.logging_config import qcvv_logger
|
|
40
|
+
from iqm.benchmarks.readout_mitigation import apply_readout_error_mitigation
|
|
41
|
+
from iqm.benchmarks.utils import ( # execute_with_dd,
|
|
42
|
+
count_native_gates,
|
|
43
|
+
perform_backend_transpilation,
|
|
44
|
+
retrieve_all_counts,
|
|
45
|
+
retrieve_all_job_metadata,
|
|
46
|
+
set_coupling_map,
|
|
47
|
+
sort_batches_by_final_layout,
|
|
48
|
+
submit_execute,
|
|
49
|
+
timeit,
|
|
50
|
+
xrvariable_to_counts,
|
|
51
|
+
)
|
|
52
|
+
from iqm.qiskit_iqm.iqm_backend import IQMBackendBase
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def compute_heavy_output_probabilities(
|
|
56
|
+
execution_results: List[Dict[str, int]],
|
|
57
|
+
ideal_heavy_outputs: List[Dict[str, float]],
|
|
58
|
+
) -> List[float]:
|
|
59
|
+
"""Compute the HOP of all quantum circuits.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
execution_results (List[Dict[str, int]]): counts from execution of all quantum circuits.
|
|
63
|
+
ideal_heavy_outputs (List[Dict[str, float]]): list of ideal heavy output dictionaries.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
List[float]: the HOP of all quantum circuits.
|
|
67
|
+
"""
|
|
68
|
+
qv_result = []
|
|
69
|
+
for result, heavy in zip(execution_results, ideal_heavy_outputs):
|
|
70
|
+
qv_result += [expval(result, heavy)]
|
|
71
|
+
|
|
72
|
+
return qv_result
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def cumulative_hop(hops: List[float]) -> List[float]:
|
|
76
|
+
"""
|
|
77
|
+
Computes the cumulative average heavy output probabilities of a QV experiment.
|
|
78
|
+
Args:
|
|
79
|
+
hops (List[float]): The individual heavy output probabilities for each trial.
|
|
80
|
+
Returns:
|
|
81
|
+
List[float]: cumulative average heavy output probabilities for all trials.
|
|
82
|
+
"""
|
|
83
|
+
c_h: List[float] = [np.mean(hops[: (i + 1)], dtype=float) for i in range(len(hops))]
|
|
84
|
+
return c_h
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def cumulative_std(hops: List[float]) -> List[float]:
|
|
88
|
+
"""Computes the cumulative standard deviation heavy output probabilities of a QV experiment.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
hops (List[float]): The individual heavy output probabilities for each trial.
|
|
92
|
+
Returns:
|
|
93
|
+
List[float]: cumulative standard deviation heavy output probabilities for all trials.
|
|
94
|
+
"""
|
|
95
|
+
c_h = cumulative_hop(hops)
|
|
96
|
+
c_s = [(c_h[i] * ((1 - c_h[i]) / len(hops[: (i + 1)]))) ** 0.5 for i in range(len(hops))]
|
|
97
|
+
return c_s
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def get_ideal_heavy_outputs(
|
|
101
|
+
qc_list: List[QuantumCircuit],
|
|
102
|
+
sorted_qc_list_indices: Dict[Tuple[int, ...], List[int]],
|
|
103
|
+
) -> List[Dict[str, float]]:
|
|
104
|
+
"""Calculate the heavy output bitrstrings of a list of quantum circuits.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
qc_list (List[QuantumCircuit]): the list of quantum circuits.
|
|
108
|
+
sorted_qc_list_indices (Dict[Tuple, List[int]]): dictionary of indices (integers) corresponding to those in the original (untranspiled) list of circuits, with keys being final physical qubit measurements
|
|
109
|
+
Returns:
|
|
110
|
+
List[Dict[str, float]]: the list of heavy output dictionaries of each of the quantum circuits.
|
|
111
|
+
"""
|
|
112
|
+
simulable_circuits = deepcopy(qc_list)
|
|
113
|
+
ideal_heavy_outputs: List[Dict[str, float]] = []
|
|
114
|
+
ideal_simulator = Aer.get_backend("statevector_simulator")
|
|
115
|
+
|
|
116
|
+
# Separate according to sorted indices
|
|
117
|
+
circuit_batches = {
|
|
118
|
+
k: [simulable_circuits[i] for i in sorted_qc_list_indices[k]] for k in sorted_qc_list_indices.keys()
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
for k in sorted(
|
|
122
|
+
circuit_batches.keys(),
|
|
123
|
+
key=lambda x: len(circuit_batches[x]),
|
|
124
|
+
reverse=True,
|
|
125
|
+
):
|
|
126
|
+
for qc in circuit_batches[k]:
|
|
127
|
+
qc.remove_final_measurements()
|
|
128
|
+
ideal_probabilities = ideal_simulator.run(qc).result().get_counts()
|
|
129
|
+
heavy_projectors = heavy_projector(ideal_probabilities)
|
|
130
|
+
if isinstance(heavy_projectors, list):
|
|
131
|
+
ideal_heavy_outputs.extend(heavy_projectors)
|
|
132
|
+
elif isinstance(heavy_projectors, dict):
|
|
133
|
+
ideal_heavy_outputs.append(heavy_projectors)
|
|
134
|
+
|
|
135
|
+
return ideal_heavy_outputs
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def get_rem_hops(
|
|
139
|
+
all_rem_quasidistro: List[List[QuasiCollection]], ideal_heavy_outputs: List[Dict[str, float]]
|
|
140
|
+
) -> List[float]:
|
|
141
|
+
"""Computes readout-error-mitigated heavy output probabilities.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
all_rem_quasidistro (List[List[QuasiCollection]]): The list of lists of quasiprobability distributions.
|
|
145
|
+
ideal_heavy_outputs (List[Dict[str, float]]): A list of the noiseless heavy output probability dictionaries.
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
List[float]: A list of readout-error-mitigated heavy output probabilities.
|
|
149
|
+
"""
|
|
150
|
+
qv_result_rem = []
|
|
151
|
+
for rem_quasidistro, heavy in zip(all_rem_quasidistro, ideal_heavy_outputs):
|
|
152
|
+
qv_result_rem += [expval(rem_quasidistro, heavy)]
|
|
153
|
+
return qv_result_rem
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def heavy_projector(probabilities: Dict[str, float]) -> Dict[str, float]:
|
|
157
|
+
"""
|
|
158
|
+
Project (select) the samples from a given probability distribution onto heavy outputs.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
probabilities (Dict[str, float]): A dictionary of bitstrings and associated probabilities.
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
Dict[str, float]: The dictionary of heavy output bitstrings, all with weight 1.
|
|
165
|
+
"""
|
|
166
|
+
median_prob = np.median(list(probabilities.values()))
|
|
167
|
+
heavy_outputs = {k: 1.0 for k, v in probabilities.items() if v > median_prob}
|
|
168
|
+
return heavy_outputs
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def is_successful(
|
|
172
|
+
heavy_output_probabilities: List[float],
|
|
173
|
+
num_sigmas: int = 2,
|
|
174
|
+
) -> bool:
|
|
175
|
+
"""Check whether a QV benchmark returned heavy output results over the threshold, therefore being successful.
|
|
176
|
+
|
|
177
|
+
This condition checks that the average of HOP is above the 2/3 threshold within the number of sigmas given in
|
|
178
|
+
the configuration.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
heavy_output_probabilities (List[float]): the HOP of all quantum circuits.
|
|
182
|
+
num_sigmas (int): the number of sigmas to check
|
|
183
|
+
Returns:
|
|
184
|
+
bool: whether the QV benchmark was successful.
|
|
185
|
+
"""
|
|
186
|
+
avg = np.mean(heavy_output_probabilities)
|
|
187
|
+
std = (avg * (1 - avg) / len(heavy_output_probabilities)) ** 0.5
|
|
188
|
+
|
|
189
|
+
return bool((avg - std * num_sigmas) > 2.0 / 3.0)
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def plot_hop_threshold(
|
|
193
|
+
qubits: List[int],
|
|
194
|
+
depth: int,
|
|
195
|
+
qv_result: List[float],
|
|
196
|
+
qv_results_type: str,
|
|
197
|
+
num_sigmas: int,
|
|
198
|
+
backend_name: str,
|
|
199
|
+
timestamp: str,
|
|
200
|
+
in_volumetric: bool = False,
|
|
201
|
+
plot_rem: bool = False,
|
|
202
|
+
) -> Tuple[str, Figure]:
|
|
203
|
+
"""Generate the figure representing each HOP, the average and the threshold.
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
qubits (List[int]): the list of qubit labels.
|
|
207
|
+
depth (int): the depth of the QV circuit.
|
|
208
|
+
qv_result (List[float]): the list of HOP.
|
|
209
|
+
qv_results_type (str): whether results come from vanilla or DD execution.
|
|
210
|
+
num_sigmas (int): the number of sigmas to plot.
|
|
211
|
+
backend_name (str): the name of the backend.
|
|
212
|
+
timestamp (str): the execution timestamp.
|
|
213
|
+
in_volumetric (bool): whether the QV benchmark is being executed in the context of a volumetric benchmark.
|
|
214
|
+
Defaults to False.
|
|
215
|
+
plot_rem (bool): whether the plot corresponds to REM corrected data.
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
str: the name of the figure.
|
|
219
|
+
Figure: the figure.
|
|
220
|
+
"""
|
|
221
|
+
cumul_hop = cumulative_hop(qv_result)
|
|
222
|
+
cumul_std = cumulative_std(qv_result)
|
|
223
|
+
|
|
224
|
+
fig = plt.figure()
|
|
225
|
+
ax = plt.axes()
|
|
226
|
+
|
|
227
|
+
plt.axhline(2.0 / 3.0, color="red", linestyle="dashed", label="Threshold")
|
|
228
|
+
|
|
229
|
+
plt.scatter(
|
|
230
|
+
np.arange(len(qv_result)),
|
|
231
|
+
qv_result,
|
|
232
|
+
marker=".",
|
|
233
|
+
s=6,
|
|
234
|
+
alpha=0.7,
|
|
235
|
+
label="Individual HOP",
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
y_up: List[float] = [cumul_hop[i] + num_sigmas * cumul_std[i] for i in range(len(cumul_hop))]
|
|
239
|
+
|
|
240
|
+
y_down: List[float] = [cumul_hop[i] - num_sigmas * cumul_std[i] for i in range(len(cumul_hop))]
|
|
241
|
+
|
|
242
|
+
plt.fill_between(
|
|
243
|
+
np.arange(len(qv_result)),
|
|
244
|
+
y_up,
|
|
245
|
+
y_down,
|
|
246
|
+
color="b",
|
|
247
|
+
alpha=0.125,
|
|
248
|
+
label=rf"Cumulative {num_sigmas}$\sigma$",
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
plt.plot(cumul_hop, color=(0.0, 1.0, 0.5, 1.0), linewidth=2, label="Cumulative HOP")
|
|
252
|
+
|
|
253
|
+
plt.ylim(min(qv_result), max(qv_result))
|
|
254
|
+
ax.set_ylabel("Heavy Output Probability (HOP)")
|
|
255
|
+
ax.set_xlabel("QV Circuit Samples (N)")
|
|
256
|
+
plt.legend(loc="lower right")
|
|
257
|
+
|
|
258
|
+
plt.margins(x=0, y=0)
|
|
259
|
+
|
|
260
|
+
if in_volumetric:
|
|
261
|
+
plt.title(
|
|
262
|
+
f"Quantum Volume ({len(qubits)} qubits, {depth} depth)\nBackend: {backend_name} / {timestamp}",
|
|
263
|
+
fontsize=9,
|
|
264
|
+
)
|
|
265
|
+
fig_name = f"{len(qubits)}_qubits_{depth}_depth"
|
|
266
|
+
else:
|
|
267
|
+
if plot_rem:
|
|
268
|
+
plt.title(
|
|
269
|
+
f"Quantum Volume ({qv_results_type}) with REM on {len(qubits)} qubits ({str(qubits)})\nBackend: {backend_name} / {timestamp}"
|
|
270
|
+
)
|
|
271
|
+
fig_name = f"{qv_results_type}_REM_{len(qubits)}_qubits_{str(qubits)}"
|
|
272
|
+
else:
|
|
273
|
+
plt.title(
|
|
274
|
+
f"Quantum Volume ({qv_results_type}) on {len(qubits)} qubits ({str(qubits)})\nBackend: {backend_name} / {timestamp}"
|
|
275
|
+
)
|
|
276
|
+
fig_name = f"{qv_results_type}_{len(qubits)}_qubits_{str(qubits)}"
|
|
277
|
+
|
|
278
|
+
plt.gcf().set_dpi(250)
|
|
279
|
+
plt.close()
|
|
280
|
+
|
|
281
|
+
return fig_name, fig
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
def qv_analysis(run: RunResult) -> AnalysisResult:
|
|
285
|
+
"""Analysis function for a Quantum Volume experiment
|
|
286
|
+
|
|
287
|
+
Args:
|
|
288
|
+
run (RunResult): A Quantum Volume experiment run for which analysis result is created
|
|
289
|
+
Returns:
|
|
290
|
+
AnalysisResult corresponding to Quantum Volume
|
|
291
|
+
"""
|
|
292
|
+
|
|
293
|
+
plots = {}
|
|
294
|
+
observations = {}
|
|
295
|
+
dataset = run.dataset
|
|
296
|
+
backend_name = dataset.attrs["backend_name"]
|
|
297
|
+
execution_timestamp = dataset.attrs["execution_timestamp"]
|
|
298
|
+
num_circuits = dataset.attrs["num_circuits"]
|
|
299
|
+
num_sigmas = dataset.attrs["num_sigmas"]
|
|
300
|
+
|
|
301
|
+
physical_layout = dataset.attrs["physical_layout"]
|
|
302
|
+
|
|
303
|
+
# Analyze the results for each qubit layout of the experiment dataset
|
|
304
|
+
qubit_layouts = dataset.attrs["custom_qubits_array"]
|
|
305
|
+
depth = {}
|
|
306
|
+
qv_results_type = {}
|
|
307
|
+
execution_results = {}
|
|
308
|
+
ideal_heavy_outputs = {}
|
|
309
|
+
rem = dataset.attrs["rem"]
|
|
310
|
+
|
|
311
|
+
for qubits_idx, qubits in enumerate(qubit_layouts):
|
|
312
|
+
qcvv_logger.info(f"Noiseless simulation and post-processing for layout {qubits}")
|
|
313
|
+
# Retrieve counts
|
|
314
|
+
execution_results[str(qubits)] = xrvariable_to_counts(dataset, str(qubits), num_circuits)
|
|
315
|
+
|
|
316
|
+
# Retrieve other dataset values
|
|
317
|
+
dataset_dictionary = dataset.attrs[qubits_idx]
|
|
318
|
+
sorted_qc_list_indices = dataset_dictionary["sorted_qc_list_indices"]
|
|
319
|
+
transpiled_circ_dataset = dataset.attrs["transpiled_circuits"][str(qubits)]
|
|
320
|
+
transpiled_qc_list = []
|
|
321
|
+
untranspiled_circ_dataset = dataset.attrs["untranspiled_circuits"][str(qubits)]
|
|
322
|
+
qc_list = []
|
|
323
|
+
for key in transpiled_circ_dataset: # Keys (final layouts) are the same for transp/untransp
|
|
324
|
+
transpiled_qc_list.extend(transpiled_circ_dataset[key])
|
|
325
|
+
qc_list.extend(untranspiled_circ_dataset[key])
|
|
326
|
+
|
|
327
|
+
qv_results_type[str(qubits)] = dataset_dictionary["qv_results_type"]
|
|
328
|
+
depth[str(qubits)] = len(qubits)
|
|
329
|
+
|
|
330
|
+
# Simulate the circuits and get the ideal heavy outputs
|
|
331
|
+
ideal_heavy_outputs[str(qubits)] = get_ideal_heavy_outputs(qc_list, sorted_qc_list_indices)
|
|
332
|
+
|
|
333
|
+
# Compute the HO probabilities
|
|
334
|
+
qv_result = compute_heavy_output_probabilities(execution_results[str(qubits)], ideal_heavy_outputs[str(qubits)])
|
|
335
|
+
|
|
336
|
+
processed_results = {
|
|
337
|
+
"average_heavy_output_probability": {
|
|
338
|
+
"value": cumulative_hop(qv_result)[-1],
|
|
339
|
+
"uncertainty": cumulative_std(qv_result)[-1],
|
|
340
|
+
},
|
|
341
|
+
"is_successful": {"value": str(is_successful(qv_result, num_sigmas)), "uncertainty": np.NaN},
|
|
342
|
+
"QV_result": {
|
|
343
|
+
"value": 2 ** len(qubits) if is_successful(qv_result, num_sigmas) else 1,
|
|
344
|
+
"uncertainty": np.NaN,
|
|
345
|
+
},
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
dataset.attrs[qubits_idx].update(
|
|
349
|
+
{
|
|
350
|
+
"sorted_qc_list_indices": (sorted_qc_list_indices if rem or physical_layout == "batching" else None),
|
|
351
|
+
"cumulative_average_heavy_output_probability": cumulative_hop(qv_result),
|
|
352
|
+
"cumulative_stddev_heavy_output_probability": cumulative_std(qv_result),
|
|
353
|
+
"heavy_output_probabilities": qv_result,
|
|
354
|
+
}
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
# UPDATE OBSERVATIONS
|
|
358
|
+
observations.update({qubits_idx: processed_results})
|
|
359
|
+
|
|
360
|
+
fig_name, fig = plot_hop_threshold(
|
|
361
|
+
qubits,
|
|
362
|
+
depth[str(qubits)],
|
|
363
|
+
qv_result,
|
|
364
|
+
qv_results_type[str(qubits)],
|
|
365
|
+
num_sigmas,
|
|
366
|
+
backend_name,
|
|
367
|
+
execution_timestamp,
|
|
368
|
+
plot_rem=False,
|
|
369
|
+
)
|
|
370
|
+
plots[fig_name] = fig
|
|
371
|
+
|
|
372
|
+
if not rem:
|
|
373
|
+
return AnalysisResult(dataset=dataset, plots=plots, observations=observations)
|
|
374
|
+
|
|
375
|
+
# When REM is set to True, do the post-processing with the adjusted quasi-probabilities
|
|
376
|
+
mit_shots = dataset.attrs["mit_shots"]
|
|
377
|
+
rem_quasidistros = dataset.attrs["REM_quasidistributions"]
|
|
378
|
+
for qubits_idx, qubits in enumerate(qubit_layouts):
|
|
379
|
+
qcvv_logger.info(f"REM post-processing for layout {qubits}")
|
|
380
|
+
# Retrieve
|
|
381
|
+
dataset_dictionary = dataset.attrs[qubits_idx]
|
|
382
|
+
|
|
383
|
+
qcvv_logger.info(f"Applying REM with {mit_shots} shots")
|
|
384
|
+
sorted_qc_list_indices = dataset_dictionary["sorted_qc_list_indices"]
|
|
385
|
+
|
|
386
|
+
qv_result_rem = get_rem_hops(
|
|
387
|
+
rem_quasidistros[f"REM_quasidist_{str(qubits)}"],
|
|
388
|
+
ideal_heavy_outputs[str(qubits)],
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
rem_results = {
|
|
392
|
+
"REM_average_heavy_output_probability": {
|
|
393
|
+
"value": cumulative_hop(qv_result_rem)[-1],
|
|
394
|
+
"uncertainty": cumulative_std(qv_result_rem)[-1],
|
|
395
|
+
},
|
|
396
|
+
"REM_is_successful": {"value": str(is_successful(qv_result_rem)), "uncertainty": np.NaN},
|
|
397
|
+
"REM_QV_result": {
|
|
398
|
+
"value": 2 ** len(qubits) if is_successful(qv_result_rem, num_sigmas) else 1,
|
|
399
|
+
"uncertainty": np.NaN,
|
|
400
|
+
},
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
dataset.attrs[qubits_idx].update(
|
|
404
|
+
{
|
|
405
|
+
"sorted_qc_list_indices": (sorted_qc_list_indices if physical_layout == "batching" else None),
|
|
406
|
+
"REM_cumulative_average_heavy_output_probability": cumulative_hop(qv_result_rem),
|
|
407
|
+
"REM_cumulative_stddev_heavy_output_probability": cumulative_std(qv_result_rem),
|
|
408
|
+
"REM_heavy_output_probabilities": qv_result_rem,
|
|
409
|
+
}
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
# UPDATE OBSERVATIONS
|
|
413
|
+
observations.update({qubits_idx: rem_results})
|
|
414
|
+
|
|
415
|
+
fig_name_rem, fig_rem = plot_hop_threshold(
|
|
416
|
+
qubits,
|
|
417
|
+
depth[str(qubits)],
|
|
418
|
+
qv_result_rem,
|
|
419
|
+
qv_results_type[str(qubits)],
|
|
420
|
+
num_sigmas,
|
|
421
|
+
backend_name,
|
|
422
|
+
execution_timestamp,
|
|
423
|
+
plot_rem=True,
|
|
424
|
+
)
|
|
425
|
+
plots[fig_name_rem] = fig_rem
|
|
426
|
+
|
|
427
|
+
return AnalysisResult(dataset=dataset, plots=plots, observations=observations)
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
class QuantumVolumeBenchmark(Benchmark):
|
|
431
|
+
"""
|
|
432
|
+
Quantum Volume reflects the deepest circuit a given number of qubits can execute with meaningful results.
|
|
433
|
+
"""
|
|
434
|
+
|
|
435
|
+
analysis_function = staticmethod(qv_analysis)
|
|
436
|
+
|
|
437
|
+
name: str = "quantum_volume"
|
|
438
|
+
|
|
439
|
+
def __init__(self, backend_arg: IQMBackendBase | str, configuration: "QuantumVolumeConfiguration"):
|
|
440
|
+
"""Construct the QuantumVolumeBenchmark class.
|
|
441
|
+
|
|
442
|
+
Args:
|
|
443
|
+
backend_arg (IQMBackendBase): the backend to execute the benchmark on
|
|
444
|
+
configuration (QuantumVolumeConfiguration): the configuration of the benchmark
|
|
445
|
+
"""
|
|
446
|
+
super().__init__(backend_arg, configuration)
|
|
447
|
+
|
|
448
|
+
self.backend_configuration_name = backend_arg if isinstance(backend_arg, str) else backend_arg.name
|
|
449
|
+
|
|
450
|
+
self.num_circuits = configuration.num_circuits
|
|
451
|
+
self.num_sigmas = configuration.num_sigmas
|
|
452
|
+
self.choose_qubits_routine = configuration.choose_qubits_routine
|
|
453
|
+
|
|
454
|
+
# self.mapomatic_num_qubits = configuration.mapomatic_num_qubits
|
|
455
|
+
# self.mapomatic_num_layouts = configuration.mapomatic_num_layouts
|
|
456
|
+
# self.mapomatic_qv_samples = configuration.mapomatic_qv_samples
|
|
457
|
+
|
|
458
|
+
# self.dynamical_decoupling = configuration.dynamical_decoupling
|
|
459
|
+
# self.dd_strategy = configuration.dd_strategy
|
|
460
|
+
|
|
461
|
+
self.qiskit_optim_level = configuration.qiskit_optim_level
|
|
462
|
+
self.optimize_sqg = configuration.optimize_sqg
|
|
463
|
+
|
|
464
|
+
self.rem = configuration.rem
|
|
465
|
+
self.mit_shots = configuration.mit_shots
|
|
466
|
+
|
|
467
|
+
self.session_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
468
|
+
self.execution_timestamp = ""
|
|
469
|
+
|
|
470
|
+
# The only difference between "custom" and "mapomatic" choose_qubits_routine is
|
|
471
|
+
# that the custom qubit layout array is generated either by user-input or by mapomatic
|
|
472
|
+
if self.choose_qubits_routine == "custom":
|
|
473
|
+
if configuration.custom_qubits_array is None:
|
|
474
|
+
raise ValueError(
|
|
475
|
+
"If the `choose_qubits_custom` routine is chosen, a `custom_qubits_array` must be specified in `QuantumVolumeConfiguration`."
|
|
476
|
+
)
|
|
477
|
+
self.custom_qubits_array = configuration.custom_qubits_array
|
|
478
|
+
# elif self.choose_qubits_routine == "mapomatic":
|
|
479
|
+
# self.custom_qubits_array = self.get_mapomatic_average_qv_scores()
|
|
480
|
+
else:
|
|
481
|
+
raise ValueError("The `custom_qubits_array` variable must be either 'custom' or 'mapomatic'")
|
|
482
|
+
|
|
483
|
+
def add_all_meta_to_dataset(self, dataset: xr.Dataset):
|
|
484
|
+
"""Adds all configuration metadata and circuits to the dataset variable
|
|
485
|
+
|
|
486
|
+
Args:
|
|
487
|
+
dataset (xr.Dataset): The xarray dataset
|
|
488
|
+
"""
|
|
489
|
+
dataset.attrs["session_timestamp"] = self.session_timestamp
|
|
490
|
+
dataset.attrs["execution_timestamp"] = self.execution_timestamp
|
|
491
|
+
dataset.attrs["backend_configuration_name"] = self.backend_configuration_name
|
|
492
|
+
dataset.attrs["backend_name"] = self.backend.name
|
|
493
|
+
|
|
494
|
+
for key, value in self.configuration:
|
|
495
|
+
if key == "benchmark": # Avoid saving the class object
|
|
496
|
+
dataset.attrs[key] = value.name
|
|
497
|
+
else:
|
|
498
|
+
dataset.attrs[key] = value
|
|
499
|
+
# Defined outside configuration - if any
|
|
500
|
+
|
|
501
|
+
@timeit
|
|
502
|
+
def add_all_circuits_to_dataset(self, dataset: xr.Dataset):
|
|
503
|
+
"""Adds all generated circuits during execution to the dataset variable
|
|
504
|
+
|
|
505
|
+
Args:
|
|
506
|
+
dataset (xr.Dataset): The xarray dataset
|
|
507
|
+
|
|
508
|
+
Returns:
|
|
509
|
+
|
|
510
|
+
"""
|
|
511
|
+
qcvv_logger.info(f"Adding all circuits to the dataset")
|
|
512
|
+
dataset.attrs["untranspiled_circuits"] = self.untranspiled_circuits
|
|
513
|
+
dataset.attrs["transpiled_circuits"] = self.transpiled_circuits
|
|
514
|
+
|
|
515
|
+
# def get_mapomatic_average_qv_scores(self) -> List[List[int]]:
|
|
516
|
+
# """Estimate the average mapomatic scores for N quantum volume circuit samples
|
|
517
|
+
# Returns:
|
|
518
|
+
# List[List[object]]: the mapomatic layout scores sorted from the smallest mean*(1+std/sqrt(N)) cost score
|
|
519
|
+
# """
|
|
520
|
+
# qv_circ_samples = self.mapomatic_qv_samples
|
|
521
|
+
#
|
|
522
|
+
# # Get calibration data once
|
|
523
|
+
# token = input("Trying to access IQM calibration data\nEnter your IQM token (without quote marks): ")
|
|
524
|
+
# calibration_data = get_calibration_fidelities(self.backend.architecture.name, token)
|
|
525
|
+
#
|
|
526
|
+
# qcvv_logger.info(
|
|
527
|
+
# "Evaluating matching layouts for QV",
|
|
528
|
+
# )
|
|
529
|
+
#
|
|
530
|
+
# all_scores: Dict[Tuple, List[float]] = {}
|
|
531
|
+
# for mapomatic_qv_sample in range(qv_circ_samples):
|
|
532
|
+
# qcvv_logger.info(
|
|
533
|
+
# f"Estimating layout costs for QV circuit sample {mapomatic_qv_sample+1}"
|
|
534
|
+
# )
|
|
535
|
+
# # Generate a representative quantum circuit sample
|
|
536
|
+
# qv_circ = get_circuit("quantum_volume", self.mapomatic_num_qubits)
|
|
537
|
+
# # Get all matching layouts
|
|
538
|
+
# layouts = matching_layouts(
|
|
539
|
+
# qv_circ,
|
|
540
|
+
# self.backend,
|
|
541
|
+
# self.backend.coupling_map,
|
|
542
|
+
# self.qiskit_optim_level,
|
|
543
|
+
# )
|
|
544
|
+
# # Evaluate all layouts
|
|
545
|
+
# scores = evaluate_costs(
|
|
546
|
+
# layouts,
|
|
547
|
+
# qv_circ,
|
|
548
|
+
# self.backend,
|
|
549
|
+
# calibration_data,
|
|
550
|
+
# self.qiskit_optim_level,
|
|
551
|
+
# )
|
|
552
|
+
# for score in scores:
|
|
553
|
+
# all_scores.setdefault(tuple(cast(list[int], score[0])), []).extend([cast(float, score[1])])
|
|
554
|
+
# qv_circ.clear()
|
|
555
|
+
#
|
|
556
|
+
# # Consider mean and std of the cost scores as mean*(1+std/sqrt(N))
|
|
557
|
+
# all_scores_mean_plus_std = {
|
|
558
|
+
# s_k: np.mean(s_v) * (1 + np.std(s_v) / np.sqrt(qv_circ_samples)) for s_k, s_v in all_scores.items()
|
|
559
|
+
# }
|
|
560
|
+
# # Turn into an array ([[tuple_layout0, score0], [tuple_layout1,score1], ... ])
|
|
561
|
+
# sorted_scores_mean_plus_std = list(sorted(all_scores_mean_plus_std.items(), key=lambda y: y[1]))
|
|
562
|
+
# # Return indices to list type for layouts ([[list_layout0, score0], [list_layout1,score1], ... ])
|
|
563
|
+
# layout_meanscore = [[list(s[0]), s[1]] for s in sorted_scores_mean_plus_std]
|
|
564
|
+
#
|
|
565
|
+
# mapomatic_qv_layouts = [x[0] for x in layout_meanscore[: self.mapomatic_num_layouts]]
|
|
566
|
+
# mapomatic_qv_costs = [x[1] for x in layout_meanscore[: self.mapomatic_num_layouts]]
|
|
567
|
+
#
|
|
568
|
+
# qcvv_logger.info(
|
|
569
|
+
# f"Will execute QV on layouts {mapomatic_qv_layouts}, of mapomatic average costs {mapomatic_qv_costs}"
|
|
570
|
+
# )
|
|
571
|
+
#
|
|
572
|
+
# return mapomatic_qv_layouts
|
|
573
|
+
|
|
574
|
+
@staticmethod
|
|
575
|
+
def generate_single_circuit(
|
|
576
|
+
num_qubits: int,
|
|
577
|
+
depth: Optional[int] = None,
|
|
578
|
+
classical_permutation: bool = True,
|
|
579
|
+
) -> QuantumCircuit:
|
|
580
|
+
"""Generate a single QV quantum circuit, with measurements at the end.
|
|
581
|
+
|
|
582
|
+
Args:
|
|
583
|
+
num_qubits (int): number of qubits of the circuit
|
|
584
|
+
depth (Optional[int]): The depth of the QV circuit. Defaults to None, which makes it equal to the number of
|
|
585
|
+
qubits.
|
|
586
|
+
classical_permutation (bool, optional): Whether permutations are classical, avoiding swapping layers.
|
|
587
|
+
* Defaults to True.
|
|
588
|
+
|
|
589
|
+
Returns:
|
|
590
|
+
QuantumCircuit: the QV quantum circuit.
|
|
591
|
+
"""
|
|
592
|
+
qc = QuantumVolume(num_qubits, depth=depth, classical_permutation=classical_permutation).decompose()
|
|
593
|
+
qc.measure_all()
|
|
594
|
+
return qc
|
|
595
|
+
|
|
596
|
+
@timeit
|
|
597
|
+
def generate_circuit_list(
|
|
598
|
+
self,
|
|
599
|
+
num_qubits: int,
|
|
600
|
+
depth: Optional[int] = None,
|
|
601
|
+
classical_permutations: bool = True,
|
|
602
|
+
) -> List[QuantumCircuit]:
|
|
603
|
+
"""Generate a list of QV quantum circuits, with measurements at the end.
|
|
604
|
+
|
|
605
|
+
Args:
|
|
606
|
+
num_qubits (int): the number of qubits of the circuits.
|
|
607
|
+
depth (Optional[int]): The depth of the QV circuit. Defaults to None, which makes it equal to the number of qubits.
|
|
608
|
+
classical_permutations (bool, optional): Whether permutations are classical, avoiding swapping layers.
|
|
609
|
+
* Defaults to True.
|
|
610
|
+
|
|
611
|
+
Returns:
|
|
612
|
+
List[QuantumCircuit]: the list of QV quantum circuits.
|
|
613
|
+
"""
|
|
614
|
+
qc_list = [
|
|
615
|
+
self.generate_single_circuit(num_qubits, depth=depth, classical_permutation=classical_permutations)
|
|
616
|
+
for _ in range(self.num_circuits)
|
|
617
|
+
]
|
|
618
|
+
|
|
619
|
+
return qc_list
|
|
620
|
+
|
|
621
|
+
def get_rem_quasidistro(
|
|
622
|
+
self,
|
|
623
|
+
sorted_transpiled_qc_list: Dict[Tuple, List[QuantumCircuit]],
|
|
624
|
+
sorted_qc_list_indices: Dict[Tuple, List[int]],
|
|
625
|
+
execution_results: List[Dict[str, int]],
|
|
626
|
+
mit_shots: int,
|
|
627
|
+
) -> List[List[QuasiCollection]]:
|
|
628
|
+
"""Computes readout-error-mitigated quasiprobabilities.
|
|
629
|
+
|
|
630
|
+
Args:
|
|
631
|
+
sorted_transpiled_qc_list (Dict[Tuple, List[QuantumCircuit]]): A dictionary of lists of quantum circuits, indexed by qubiy layouts.
|
|
632
|
+
sorted_qc_list_indices (Dict[Tuple, List[int]]): dictionary of indices (integers) corresponding to those in the original (untranspiled) list of circuits, with keys being final physical qubit measurements.
|
|
633
|
+
execution_results (List[Dict[str, int]]): counts from execution of all quantum circuits.
|
|
634
|
+
mit_shots (int): The number of measurement shots to estimate the readout calibration errors.
|
|
635
|
+
Returns:
|
|
636
|
+
A list of lists of quasiprobabilities.
|
|
637
|
+
"""
|
|
638
|
+
all_rem_quasidistro = []
|
|
639
|
+
for k in sorted(
|
|
640
|
+
sorted_transpiled_qc_list.keys(),
|
|
641
|
+
key=lambda x: len(sorted_transpiled_qc_list[x]),
|
|
642
|
+
reverse=True,
|
|
643
|
+
):
|
|
644
|
+
counts_corresp_to_circs_k = [execution_results[i] for i in sorted_qc_list_indices[k]]
|
|
645
|
+
all_rem_quasidistro_batch_k, _ = apply_readout_error_mitigation(
|
|
646
|
+
self.backend, sorted_transpiled_qc_list[k], counts_corresp_to_circs_k, mit_shots
|
|
647
|
+
)
|
|
648
|
+
all_rem_quasidistro += all_rem_quasidistro_batch_k
|
|
649
|
+
|
|
650
|
+
return all_rem_quasidistro
|
|
651
|
+
|
|
652
|
+
def submit_single_qv_job(
|
|
653
|
+
self,
|
|
654
|
+
backend: IQMBackendBase,
|
|
655
|
+
qubits: Sequence[int],
|
|
656
|
+
sorted_transpiled_qc_list: Dict[Tuple[int, ...], List[QuantumCircuit]],
|
|
657
|
+
) -> Dict[str, Any]:
|
|
658
|
+
"""
|
|
659
|
+
Submit jobs for execution in the specified IQMBackend.
|
|
660
|
+
Args:
|
|
661
|
+
backend (IQMBackendBase): the IQM backend to submit the job.
|
|
662
|
+
qubits (List[int]): the qubits to identify the submitted job.
|
|
663
|
+
sorted_transpiled_qc_list (Dict[str, List[QuantumCircuit]]): qubits to submit jobs to.
|
|
664
|
+
Returns:
|
|
665
|
+
Dict with qubit layout, submitted job objects, type (vanilla/DD) and submission time.
|
|
666
|
+
"""
|
|
667
|
+
time_submit = 0
|
|
668
|
+
execution_jobs = None
|
|
669
|
+
# qv_results_type = "dynamical decoupling"
|
|
670
|
+
# if not self.dynamical_decoupling:
|
|
671
|
+
qv_results_type = "vanilla"
|
|
672
|
+
# Send to execute on backend
|
|
673
|
+
execution_jobs, time_submit = submit_execute(
|
|
674
|
+
sorted_transpiled_qc_list,
|
|
675
|
+
backend,
|
|
676
|
+
self.shots,
|
|
677
|
+
self.calset_id,
|
|
678
|
+
max_gates_per_batch=self.max_gates_per_batch,
|
|
679
|
+
)
|
|
680
|
+
# else:
|
|
681
|
+
# DD IN DIQE VERSION PREVENTS SUBMITTING JOBS DYNAMICALLY:
|
|
682
|
+
# I.E., IT AUTOMATICALLY RETRIEVES COUNTS
|
|
683
|
+
# TODO: change this when job manager for DD in Pulla is updated! # pylint: disable=fixme
|
|
684
|
+
# raise ValueError("Dynamical decoupling is not yet enabled in the new base")
|
|
685
|
+
# qcvv_logger.info(
|
|
686
|
+
# f"Now executing {self.num_circuits} circuits with default strategy Dynamical Decoupling"
|
|
687
|
+
# )
|
|
688
|
+
# execution_results, time_retrieve = execute_with_dd(
|
|
689
|
+
# self.backend, transpiled_qc_list, self.shots, self.dd_strategy
|
|
690
|
+
# )
|
|
691
|
+
qv_results = {
|
|
692
|
+
"qubits": qubits,
|
|
693
|
+
"jobs": execution_jobs,
|
|
694
|
+
"qv_results_type": qv_results_type,
|
|
695
|
+
"time_submit": time_submit,
|
|
696
|
+
}
|
|
697
|
+
return qv_results
|
|
698
|
+
|
|
699
|
+
def execute(self, backend: IQMBackendBase) -> xr.Dataset:
|
|
700
|
+
"""Executes the benchmark."""
|
|
701
|
+
|
|
702
|
+
self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
703
|
+
|
|
704
|
+
dataset = xr.Dataset()
|
|
705
|
+
self.add_all_meta_to_dataset(dataset)
|
|
706
|
+
|
|
707
|
+
# Submit jobs for all qubit layouts first
|
|
708
|
+
all_qv_jobs: List[Dict[str, Any]] = []
|
|
709
|
+
time_circuit_generation = {}
|
|
710
|
+
time_transpilation = {}
|
|
711
|
+
time_batching = {}
|
|
712
|
+
sorted_qc_list_indices = {}
|
|
713
|
+
|
|
714
|
+
# Initialize the variable to contain the QV circuits of each layout
|
|
715
|
+
self.untranspiled_circuits: Dict[str, Dict[Tuple, List[QuantumCircuit]]] = {}
|
|
716
|
+
self.transpiled_circuits: Dict[str, Dict[Tuple, List[QuantumCircuit]]] = {}
|
|
717
|
+
all_op_counts = {}
|
|
718
|
+
|
|
719
|
+
for qubits in self.custom_qubits_array: # NB: jobs will be submitted for qubit layouts in the specified order
|
|
720
|
+
self.untranspiled_circuits[str(qubits)] = {}
|
|
721
|
+
self.transpiled_circuits[str(qubits)] = {}
|
|
722
|
+
num_qubits = len(qubits)
|
|
723
|
+
depth = num_qubits
|
|
724
|
+
qcvv_logger.info(f"Executing QV on qubits {qubits}")
|
|
725
|
+
|
|
726
|
+
qc_list, time_circuit_generation[str(qubits)] = self.generate_circuit_list(num_qubits, depth=depth)
|
|
727
|
+
qcvv_logger.info(f"Successfully generated all {self.num_circuits} circuits to be executed")
|
|
728
|
+
# Set the coupling map
|
|
729
|
+
coupling_map = set_coupling_map(qubits, backend, self.physical_layout)
|
|
730
|
+
# Perform transpilation to backend
|
|
731
|
+
qcvv_logger.info(f'Will transpile according to "{self.physical_layout}" physical layout')
|
|
732
|
+
transpiled_qc_list, time_transpilation[str(qubits)] = perform_backend_transpilation(
|
|
733
|
+
qc_list,
|
|
734
|
+
backend=backend,
|
|
735
|
+
qubits=qubits,
|
|
736
|
+
coupling_map=coupling_map,
|
|
737
|
+
qiskit_optim_level=self.qiskit_optim_level,
|
|
738
|
+
optimize_sqg=self.optimize_sqg,
|
|
739
|
+
routing_method=self.routing_method,
|
|
740
|
+
)
|
|
741
|
+
# Batching
|
|
742
|
+
sorted_transpiled_qc_list: Dict[Tuple[int, ...], List[QuantumCircuit]] = {}
|
|
743
|
+
time_batching[str(qubits)] = 0
|
|
744
|
+
if self.physical_layout == "fixed":
|
|
745
|
+
sorted_transpiled_qc_list = {tuple(qubits): transpiled_qc_list}
|
|
746
|
+
sorted_qc_list_indices[str(qubits)] = {tuple(qubits): list(range(len(qc_list)))}
|
|
747
|
+
elif self.physical_layout == "batching":
|
|
748
|
+
# Sort circuits according to their final measurement mappings
|
|
749
|
+
(
|
|
750
|
+
sorted_transpiled_qc_list,
|
|
751
|
+
sorted_qc_list_indices[str(qubits)],
|
|
752
|
+
), time_batching[
|
|
753
|
+
str(qubits)
|
|
754
|
+
] = sort_batches_by_final_layout(transpiled_qc_list)
|
|
755
|
+
else:
|
|
756
|
+
raise ValueError("physical_layout must either be \"fixed\" or \"batching\"")
|
|
757
|
+
|
|
758
|
+
self.untranspiled_circuits[str(qubits)].update({tuple(qubits): qc_list})
|
|
759
|
+
self.transpiled_circuits[str(qubits)].update(sorted_transpiled_qc_list)
|
|
760
|
+
|
|
761
|
+
# Count operations
|
|
762
|
+
all_op_counts[str(qubits)] = count_native_gates(backend, transpiled_qc_list)
|
|
763
|
+
|
|
764
|
+
# Submit
|
|
765
|
+
all_qv_jobs.append(self.submit_single_qv_job(backend, qubits, sorted_transpiled_qc_list))
|
|
766
|
+
qcvv_logger.info(f"Job for layout {qubits} submitted successfully!")
|
|
767
|
+
|
|
768
|
+
# Retrieve counts of jobs for all qubit layouts
|
|
769
|
+
all_job_metadata = {}
|
|
770
|
+
for job_idx, job_dict in enumerate(all_qv_jobs):
|
|
771
|
+
qubits = job_dict["qubits"]
|
|
772
|
+
# Retrieve counts
|
|
773
|
+
execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], str(qubits))
|
|
774
|
+
# Retrieve all job meta data
|
|
775
|
+
all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
|
|
776
|
+
|
|
777
|
+
# Export all to dataset
|
|
778
|
+
dataset.attrs.update(
|
|
779
|
+
{
|
|
780
|
+
job_idx: {
|
|
781
|
+
"qubits": qubits,
|
|
782
|
+
"qv_results_type": job_dict["qv_results_type"],
|
|
783
|
+
"time_circuit_generation": time_circuit_generation[str(qubits)],
|
|
784
|
+
"time_transpilation": time_transpilation[str(qubits)],
|
|
785
|
+
"time_batching": time_batching[str(qubits)],
|
|
786
|
+
"time_submit": job_dict["time_submit"],
|
|
787
|
+
"time_retrieve": time_retrieve,
|
|
788
|
+
"all_job_metadata": all_job_metadata,
|
|
789
|
+
"sorted_qc_list_indices": sorted_qc_list_indices[str(qubits)],
|
|
790
|
+
"operation_counts": all_op_counts[str(qubits)],
|
|
791
|
+
}
|
|
792
|
+
}
|
|
793
|
+
)
|
|
794
|
+
|
|
795
|
+
qcvv_logger.info(f"Adding counts of {qubits} run to the dataset")
|
|
796
|
+
dataset, _ = add_counts_to_dataset(execution_results, str(qubits), dataset)
|
|
797
|
+
|
|
798
|
+
self.add_all_circuits_to_dataset(dataset)
|
|
799
|
+
|
|
800
|
+
if self.rem:
|
|
801
|
+
rem_quasidistros = {}
|
|
802
|
+
for qubits in self.custom_qubits_array:
|
|
803
|
+
exec_counts = xrvariable_to_counts(dataset, str(qubits), self.num_circuits)
|
|
804
|
+
rem_quasidistros[f"REM_quasidist_{str(qubits)}"] = self.get_rem_quasidistro(
|
|
805
|
+
self.transpiled_circuits[str(qubits)],
|
|
806
|
+
sorted_qc_list_indices[str(qubits)],
|
|
807
|
+
exec_counts,
|
|
808
|
+
self.mit_shots,
|
|
809
|
+
)
|
|
810
|
+
dataset.attrs.update({"REM_quasidistributions": rem_quasidistros})
|
|
811
|
+
|
|
812
|
+
qcvv_logger.info(f"QV experiment execution concluded !")
|
|
813
|
+
return dataset
|
|
814
|
+
|
|
815
|
+
|
|
816
|
+
class QuantumVolumeConfiguration(BenchmarkConfigurationBase):
|
|
817
|
+
"""Quantum Volume configuration.
|
|
818
|
+
|
|
819
|
+
Attributes:
|
|
820
|
+
benchmark (Type[Benchmark]): QuantumVolumeBenchmark
|
|
821
|
+
num_circuits (int): The number of circuits to use.
|
|
822
|
+
* Should be at least 100 for a meaningful QV experiment.
|
|
823
|
+
num_sigmas (int): The number of sample standard deviations to consider with for the threshold criteria.
|
|
824
|
+
* Default by consensus is 2
|
|
825
|
+
choose_qubits_routine (Literal["custom"]): The routine to select qubit layouts.
|
|
826
|
+
* Default is "custom".
|
|
827
|
+
custom_qubits_array (Optional[Sequence[Sequence[int]]]): The physical qubit layouts to perform the benchmark on.
|
|
828
|
+
* Default is [[0, 2]].
|
|
829
|
+
qiskit_optim_level (int): The Qiskit transpilation optimization level.
|
|
830
|
+
* Default is 3.
|
|
831
|
+
optimize_sqg (bool): Whether Single Qubit Gate Optimization is performed upon transpilation.
|
|
832
|
+
* Default is True.
|
|
833
|
+
routing_method (Literal["basic", "lookahead", "stochastic", "sabre", "none"]): The Qiskit transpilation routing method to use.
|
|
834
|
+
* Default is "sabre".
|
|
835
|
+
physical_layout (Literal["fixed", "batching"]): Whether the coupling map is restricted to qubits in the input layout or not.
|
|
836
|
+
- "fixed": Restricts the coupling map to only the specified qubits.
|
|
837
|
+
- "batching": Considers the full coupling map of the backend and circuit execution is batched per final layout.
|
|
838
|
+
* Default is "fixed"
|
|
839
|
+
rem (bool): Whether Readout Error Mitigation is applied in post-processing.
|
|
840
|
+
When set to True, both results (readout-unmitigated and -mitigated) are produced.
|
|
841
|
+
- Default is True.
|
|
842
|
+
mit_shots (int): The measurement shots to use for readout calibration.
|
|
843
|
+
* Default is 1_000.
|
|
844
|
+
"""
|
|
845
|
+
|
|
846
|
+
benchmark: Type[Benchmark] = QuantumVolumeBenchmark
|
|
847
|
+
num_circuits: int
|
|
848
|
+
num_sigmas: int = 2
|
|
849
|
+
choose_qubits_routine: Literal["custom"] = "custom"
|
|
850
|
+
custom_qubits_array: Sequence[Sequence[int]]
|
|
851
|
+
qiskit_optim_level: int = 3
|
|
852
|
+
optimize_sqg: bool = True
|
|
853
|
+
rem: bool = True
|
|
854
|
+
mit_shots: int = 1_000
|