iqm-benchmarks 1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of iqm-benchmarks might be problematic. Click here for more details.
- iqm/benchmarks/__init__.py +31 -0
- iqm/benchmarks/benchmark.py +109 -0
- iqm/benchmarks/benchmark_definition.py +264 -0
- iqm/benchmarks/benchmark_experiment.py +163 -0
- iqm/benchmarks/compressive_gst/__init__.py +20 -0
- iqm/benchmarks/compressive_gst/compressive_gst.py +1029 -0
- iqm/benchmarks/entanglement/__init__.py +18 -0
- iqm/benchmarks/entanglement/ghz.py +802 -0
- iqm/benchmarks/logging_config.py +29 -0
- iqm/benchmarks/optimization/__init__.py +18 -0
- iqm/benchmarks/optimization/qscore.py +719 -0
- iqm/benchmarks/quantum_volume/__init__.py +21 -0
- iqm/benchmarks/quantum_volume/clops.py +726 -0
- iqm/benchmarks/quantum_volume/quantum_volume.py +854 -0
- iqm/benchmarks/randomized_benchmarking/__init__.py +18 -0
- iqm/benchmarks/randomized_benchmarking/clifford_1q.pkl +0 -0
- iqm/benchmarks/randomized_benchmarking/clifford_2q.pkl +0 -0
- iqm/benchmarks/randomized_benchmarking/clifford_rb/__init__.py +19 -0
- iqm/benchmarks/randomized_benchmarking/clifford_rb/clifford_rb.py +386 -0
- iqm/benchmarks/randomized_benchmarking/interleaved_rb/__init__.py +19 -0
- iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py +555 -0
- iqm/benchmarks/randomized_benchmarking/mirror_rb/__init__.py +19 -0
- iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py +810 -0
- iqm/benchmarks/randomized_benchmarking/multi_lmfit.py +86 -0
- iqm/benchmarks/randomized_benchmarking/randomized_benchmarking_common.py +892 -0
- iqm/benchmarks/readout_mitigation.py +290 -0
- iqm/benchmarks/utils.py +521 -0
- iqm_benchmarks-1.3.dist-info/LICENSE +205 -0
- iqm_benchmarks-1.3.dist-info/METADATA +190 -0
- iqm_benchmarks-1.3.dist-info/RECORD +42 -0
- iqm_benchmarks-1.3.dist-info/WHEEL +5 -0
- iqm_benchmarks-1.3.dist-info/top_level.txt +2 -0
- mGST/LICENSE +21 -0
- mGST/README.md +54 -0
- mGST/additional_fns.py +962 -0
- mGST/algorithm.py +733 -0
- mGST/compatibility.py +238 -0
- mGST/low_level_jit.py +694 -0
- mGST/optimization.py +349 -0
- mGST/qiskit_interface.py +282 -0
- mGST/reporting/figure_gen.py +334 -0
- mGST/reporting/reporting.py +710 -0
|
@@ -0,0 +1,726 @@
|
|
|
1
|
+
# Copyright 2024 IQM Benchmarks developers
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""
|
|
16
|
+
CLOPS benchmark
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from datetime import datetime
|
|
20
|
+
from math import floor, pi
|
|
21
|
+
from time import perf_counter, strftime
|
|
22
|
+
from typing import Any, Dict, List, Sequence, Tuple, Type
|
|
23
|
+
|
|
24
|
+
import matplotlib as mpl
|
|
25
|
+
from matplotlib.figure import Figure
|
|
26
|
+
import matplotlib.pyplot as plt
|
|
27
|
+
import numpy as np
|
|
28
|
+
from qiskit import QuantumCircuit
|
|
29
|
+
from qiskit.circuit import ParameterVector
|
|
30
|
+
import xarray as xr
|
|
31
|
+
|
|
32
|
+
from iqm.benchmarks import Benchmark
|
|
33
|
+
from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
|
|
34
|
+
from iqm.benchmarks.benchmark_definition import AnalysisResult, RunResult
|
|
35
|
+
from iqm.benchmarks.logging_config import qcvv_logger
|
|
36
|
+
from iqm.benchmarks.utils import (
|
|
37
|
+
count_2q_layers,
|
|
38
|
+
count_native_gates,
|
|
39
|
+
perform_backend_transpilation,
|
|
40
|
+
retrieve_all_counts,
|
|
41
|
+
retrieve_all_job_metadata,
|
|
42
|
+
set_coupling_map,
|
|
43
|
+
sort_batches_by_final_layout,
|
|
44
|
+
submit_execute,
|
|
45
|
+
timeit,
|
|
46
|
+
)
|
|
47
|
+
from iqm.qiskit_iqm.iqm_backend import IQMBackendBase
|
|
48
|
+
from iqm.qiskit_iqm.iqm_transpilation import optimize_single_qubit_gates
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def plot_times(clops_data: xr.Dataset, observations: Dict[int, Dict[str, Dict[str, float]]]) -> Tuple[str, Figure]:
|
|
52
|
+
"""Generate a figure representing the different elapsed times in the CLOPS experiment.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
clops_data (xr.Dataset): The dataset including elapsed time data from the CLOPS experiment
|
|
56
|
+
observations (Dict[int, Dict[str, Dict[str, float]]]): The observations from the analysis of the CLOPS experiment.
|
|
57
|
+
Returns:
|
|
58
|
+
str: the name of the figure.
|
|
59
|
+
Figure: the figure.
|
|
60
|
+
"""
|
|
61
|
+
# Define the keys for different categories of times
|
|
62
|
+
job_keys = ["submit_total", "compile_total", "execution_total"]
|
|
63
|
+
user_keys = ["user_submit_total", "user_retrieve_total", "assign_parameters_total"]
|
|
64
|
+
total_keys = ["job_total"]
|
|
65
|
+
|
|
66
|
+
# Define variables for dataset values
|
|
67
|
+
qubits = clops_data.attrs["qubits"]
|
|
68
|
+
num_qubits = clops_data.attrs["num_qubits"]
|
|
69
|
+
backend_name = clops_data.attrs["backend_name"]
|
|
70
|
+
execution_timestamp = clops_data.attrs["execution_timestamp"]
|
|
71
|
+
clops_time = float(clops_data.attrs["clops_time"])
|
|
72
|
+
|
|
73
|
+
all_data = {}
|
|
74
|
+
all_data.update(clops_data.attrs)
|
|
75
|
+
all_data.update(observations[1])
|
|
76
|
+
|
|
77
|
+
# Define colors
|
|
78
|
+
cmap = mpl.pyplot.get_cmap("winter")
|
|
79
|
+
colors = [cmap(i) for i in np.linspace(0, 1, len(job_keys) + len(user_keys) + len(total_keys) + 1)]
|
|
80
|
+
|
|
81
|
+
# Plotting parameters
|
|
82
|
+
fontsize = 6
|
|
83
|
+
sep = 1
|
|
84
|
+
barsize = 1
|
|
85
|
+
alpha = 0.4
|
|
86
|
+
|
|
87
|
+
fig, ax1 = plt.subplots()
|
|
88
|
+
ax2 = ax1.twinx()
|
|
89
|
+
|
|
90
|
+
fig_name = f"{num_qubits}_qubits_{tuple(qubits)}"
|
|
91
|
+
|
|
92
|
+
# Plot total CLOPS time
|
|
93
|
+
x_t = ax1.bar(4 * sep, clops_time, barsize, zorder=0, label="clops time", color=(colors[-1], alpha), edgecolor="k")
|
|
94
|
+
ax1.bar_label(x_t, fmt=f"clops time: {clops_time:.2f}", fontsize=fontsize)
|
|
95
|
+
|
|
96
|
+
# Plot user keys
|
|
97
|
+
for i, (key, cumulative_value) in enumerate(zip(user_keys, np.cumsum([all_data[k] for k in user_keys]))):
|
|
98
|
+
x = ax1.bar(
|
|
99
|
+
3 * sep,
|
|
100
|
+
cumulative_value,
|
|
101
|
+
barsize,
|
|
102
|
+
zorder=1 - i / 10,
|
|
103
|
+
label=key,
|
|
104
|
+
color=(colors[len(job_keys) + len(total_keys) + i], alpha),
|
|
105
|
+
edgecolor="k",
|
|
106
|
+
)
|
|
107
|
+
ax1.bar_label(x, fmt=f"{key.replace('_total', ' ').replace('_', ' ')}: {all_data[key]:.2f}", fontsize=fontsize)
|
|
108
|
+
|
|
109
|
+
# Plot total keys
|
|
110
|
+
for i, (key, cumulative_value) in enumerate(zip(total_keys, np.cumsum([all_data[k] for k in total_keys]))):
|
|
111
|
+
x = ax1.bar(
|
|
112
|
+
2 * sep,
|
|
113
|
+
cumulative_value,
|
|
114
|
+
barsize,
|
|
115
|
+
zorder=1 - i / 10,
|
|
116
|
+
label=key,
|
|
117
|
+
color=(colors[len(job_keys) + i], alpha),
|
|
118
|
+
edgecolor="k",
|
|
119
|
+
)
|
|
120
|
+
ax1.bar_label(x, fmt=f"{key.replace('_total', ' ')}: {all_data[key]:.2f}", fontsize=fontsize)
|
|
121
|
+
|
|
122
|
+
# Plot job keys
|
|
123
|
+
for i, (key, cumulative_value) in enumerate(zip(job_keys, np.cumsum([all_data[k] for k in job_keys]))):
|
|
124
|
+
x = ax1.bar(
|
|
125
|
+
sep, cumulative_value, barsize, zorder=2 - i / 10, label=key, color=(colors[i], alpha), edgecolor="k"
|
|
126
|
+
)
|
|
127
|
+
ax1.bar_label(x, label_type="edge", fmt=f"{key.replace('_total', ' ')}: {all_data[key]:.2f}", fontsize=fontsize)
|
|
128
|
+
|
|
129
|
+
# Remove top and bottom spines
|
|
130
|
+
ax1.spines["top"].set_visible(False)
|
|
131
|
+
ax1.spines["bottom"].set_visible(False)
|
|
132
|
+
ax2.spines["top"].set_visible(False)
|
|
133
|
+
ax2.spines["bottom"].set_visible(False)
|
|
134
|
+
|
|
135
|
+
# Set axis labels and limits
|
|
136
|
+
ax1.set_ylabel("Total CLOPS time (seconds)")
|
|
137
|
+
ax2.set_ylabel("Total CLOPS time (%)")
|
|
138
|
+
ax1.set_ylim(-0.2, clops_time + 1)
|
|
139
|
+
ax2.set_ylim(-0.2, 100)
|
|
140
|
+
|
|
141
|
+
# Set x-ticks and labels
|
|
142
|
+
time_types = ["Remote (components)", "Remote (total)", "Wall-time (components)", "Wall-time (total)"]
|
|
143
|
+
ax1.set_xticks([i * sep + 1 for i in range(4)], time_types, fontsize=fontsize)
|
|
144
|
+
|
|
145
|
+
# Set plot title
|
|
146
|
+
if all_data["clops_h"]["value"] == 0:
|
|
147
|
+
plt.title(
|
|
148
|
+
f"Total execution times for CLOPS experiment\n"
|
|
149
|
+
f"{backend_name}, {execution_timestamp}\n"
|
|
150
|
+
f"CLOPS_v {all_data['clops_v']['value']} on qubits {qubits}\n"
|
|
151
|
+
)
|
|
152
|
+
else:
|
|
153
|
+
plt.title(
|
|
154
|
+
f"Total execution times for CLOPS experiment\n"
|
|
155
|
+
f"{backend_name}, {execution_timestamp}\n"
|
|
156
|
+
f"CLOPS_v {all_data['clops_v']['value']} / CLOPS_h {all_data['clops_h']['value']} on qubits {qubits}\n"
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
plt.gcf().set_dpi(250)
|
|
160
|
+
plt.close()
|
|
161
|
+
|
|
162
|
+
return fig_name, fig
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def retrieve_clops_elapsed_times(job_meta: Dict[str, Dict[str, Any]]) -> Dict[str, float]:
|
|
166
|
+
"""Retrieve the elapsed times from the CLOPS job metadata
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
job_meta (Dict[Dict[str, Any]]): A dictionary of CLOPS jobs metadata
|
|
170
|
+
Returns:
|
|
171
|
+
Dict[str, float]: A dictionary of elapsed times of all CLOPS jobs
|
|
172
|
+
"""
|
|
173
|
+
all_job_elapsed: Dict = {}
|
|
174
|
+
totals_keys: Dict = {}
|
|
175
|
+
for u_index, update in enumerate(job_meta):
|
|
176
|
+
all_job_elapsed[update] = {}
|
|
177
|
+
for b_index, batch in enumerate(job_meta[update].keys()):
|
|
178
|
+
# ["timestamps"] might be empty if backend is a simulator
|
|
179
|
+
if job_meta[update][batch]["timestamps"] is not None:
|
|
180
|
+
x = job_meta[update][batch]["timestamps"]
|
|
181
|
+
job_time_format = "%Y-%m-%dT%H:%M:%S.%f%z" # Is it possible to extract this automatically?
|
|
182
|
+
compile_f = datetime.strptime(x["compile_end"], job_time_format)
|
|
183
|
+
compile_i = datetime.strptime(x["compile_start"], job_time_format)
|
|
184
|
+
submit_f = datetime.strptime(x["submit_end"], job_time_format)
|
|
185
|
+
submit_i = datetime.strptime(x["submit_start"], job_time_format)
|
|
186
|
+
execution_f = datetime.strptime(x["execution_end"], job_time_format)
|
|
187
|
+
execution_i = datetime.strptime(x["execution_start"], job_time_format)
|
|
188
|
+
job_f = datetime.strptime(x["job_end"], job_time_format)
|
|
189
|
+
job_i = datetime.strptime(x["job_start"], job_time_format)
|
|
190
|
+
|
|
191
|
+
all_job_elapsed[update][batch] = {
|
|
192
|
+
"job_total": job_f - job_i,
|
|
193
|
+
"compile_total": compile_f - compile_i,
|
|
194
|
+
"submit_total": submit_f - submit_i,
|
|
195
|
+
"execution_total": execution_f - execution_i,
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
# Save the keys, will be needed later
|
|
199
|
+
totals_keys = all_job_elapsed[update][batch].keys()
|
|
200
|
+
|
|
201
|
+
# Update metadata file - need to turn values ("timedelta" type) into str
|
|
202
|
+
job_meta["update_" + str(u_index + 1)]["batch_job_" + str(b_index + 1)].update(
|
|
203
|
+
{k: str(all_job_elapsed[update][batch][k]) for k in totals_keys}
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
# Add up totals
|
|
207
|
+
overall_elapsed: Dict = {}
|
|
208
|
+
# If backend is a simulator
|
|
209
|
+
if not totals_keys:
|
|
210
|
+
return overall_elapsed
|
|
211
|
+
|
|
212
|
+
overall_elapsed = {k: 0 for k in totals_keys}
|
|
213
|
+
overall_elapsed_per_update: Dict = {}
|
|
214
|
+
for update in job_meta:
|
|
215
|
+
overall_elapsed_per_update[update] = {k: 0 for k in totals_keys}
|
|
216
|
+
for batch in job_meta[update].keys():
|
|
217
|
+
for k in totals_keys:
|
|
218
|
+
overall_elapsed_per_update[update][k] += all_job_elapsed[update][batch][k].total_seconds()
|
|
219
|
+
for k in totals_keys:
|
|
220
|
+
overall_elapsed[k] += overall_elapsed_per_update[update][k]
|
|
221
|
+
|
|
222
|
+
return overall_elapsed
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def clops_analysis(run: RunResult) -> AnalysisResult:
|
|
226
|
+
"""Analysis function for a CLOPS (v or h) experiment
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
run (RunResult): A CLOPS experiment run for which analysis result is created
|
|
230
|
+
Returns:
|
|
231
|
+
AnalysisResult corresponding to CLOPS
|
|
232
|
+
"""
|
|
233
|
+
plots: Dict[str, Any] = {}
|
|
234
|
+
observations = {}
|
|
235
|
+
dataset = run.dataset
|
|
236
|
+
|
|
237
|
+
# Retrieve dataset values
|
|
238
|
+
# backend_name = dataset.attrs["backend_configuration_name"]
|
|
239
|
+
num_circuits = dataset.attrs["num_circuits"]
|
|
240
|
+
num_updates = dataset.attrs["num_updates"]
|
|
241
|
+
num_shots = dataset.attrs["num_shots"]
|
|
242
|
+
depth = dataset.attrs["depth"]
|
|
243
|
+
|
|
244
|
+
clops_h_bool = dataset.attrs["clops_h_bool"]
|
|
245
|
+
|
|
246
|
+
all_job_meta = dataset.attrs["job_meta_per_update"]
|
|
247
|
+
|
|
248
|
+
clops_time = dataset.attrs["clops_time"]
|
|
249
|
+
all_times_parameter_assign = dataset.attrs["all_times_parameter_assign"]
|
|
250
|
+
all_times_submit = dataset.attrs["all_times_submit"]
|
|
251
|
+
all_times_retrieve = dataset.attrs["all_times_retrieve"]
|
|
252
|
+
|
|
253
|
+
transpiled_qc_list = []
|
|
254
|
+
for _, value in dataset.attrs["transpiled_circuits"].items():
|
|
255
|
+
transpiled_qc_list.extend(value)
|
|
256
|
+
|
|
257
|
+
# CLOPS_V
|
|
258
|
+
clops_v: float = num_circuits * num_updates * num_shots * depth / clops_time
|
|
259
|
+
|
|
260
|
+
# CLOPS_H
|
|
261
|
+
clops_h: float = 0.0
|
|
262
|
+
counts_depths = {}
|
|
263
|
+
time_count_layers: float = 0.0
|
|
264
|
+
if clops_h_bool:
|
|
265
|
+
# CLOPS_h: count the number of layers made of parallel 2Q gates in the transpiled circuits
|
|
266
|
+
clops_h_depths: List[int]
|
|
267
|
+
qcvv_logger.info("Counting the number of parallel 2Q layer depths in each circuit")
|
|
268
|
+
clops_h_depths, time_count_layers = count_2q_layers( # pylint: disable=unbalanced-tuple-unpacking
|
|
269
|
+
transpiled_qc_list
|
|
270
|
+
)
|
|
271
|
+
counts_depths = {f"depth_{u}": clops_h_depths.count(u) for u in sorted(set(clops_h_depths))}
|
|
272
|
+
|
|
273
|
+
clops_h = num_circuits * num_updates * num_shots * np.mean(clops_h_depths) / clops_time
|
|
274
|
+
|
|
275
|
+
processed_results = {
|
|
276
|
+
"clops_v": {"value": int(clops_v), "uncertainty": np.NaN},
|
|
277
|
+
"clops_h": {"value": int(clops_h), "uncertainty": np.NaN},
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
dataset.attrs.update(
|
|
281
|
+
{
|
|
282
|
+
"assign_parameters_total": sum(all_times_parameter_assign.values()),
|
|
283
|
+
"user_submit_total": sum(all_times_submit.values()),
|
|
284
|
+
"user_retrieve_total": sum(all_times_retrieve.values()),
|
|
285
|
+
"parallel_2q_layers": counts_depths,
|
|
286
|
+
"time_count_layers": time_count_layers,
|
|
287
|
+
}
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
# UPDATE OBSERVATIONS
|
|
291
|
+
observations.update({1: processed_results})
|
|
292
|
+
|
|
293
|
+
# PLOT
|
|
294
|
+
# Get all execution elapsed times
|
|
295
|
+
overall_elapsed = retrieve_clops_elapsed_times(all_job_meta) # will be {} if backend is a simulator
|
|
296
|
+
if overall_elapsed:
|
|
297
|
+
qcvv_logger.info("Total elapsed times from job execution metadata:")
|
|
298
|
+
for k in overall_elapsed.keys():
|
|
299
|
+
dataset.attrs[k] = overall_elapsed[k]
|
|
300
|
+
if overall_elapsed[k] > 60.0:
|
|
301
|
+
qcvv_logger.info(f'\t"{k}": {overall_elapsed[k] / 60.0:.2f} min')
|
|
302
|
+
else:
|
|
303
|
+
qcvv_logger.info(f'\t"{k}": {overall_elapsed[k]:.2f} sec')
|
|
304
|
+
|
|
305
|
+
fig_name, fig = plot_times(dataset, observations)
|
|
306
|
+
plots[fig_name] = fig
|
|
307
|
+
else:
|
|
308
|
+
plots["no_backend_elapsed"] = "There is no elapsed-time data associated to jobs (e.g., execution on simulator)"
|
|
309
|
+
|
|
310
|
+
# Sort the final dataset
|
|
311
|
+
dataset.attrs = dict(sorted(dataset.attrs.items()))
|
|
312
|
+
|
|
313
|
+
return AnalysisResult(dataset=dataset, plots=plots, observations=observations)
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
class CLOPSBenchmark(Benchmark):
|
|
317
|
+
"""
|
|
318
|
+
CLOPS reflect the speed of execution of parametrized QV circuits.
|
|
319
|
+
"""
|
|
320
|
+
|
|
321
|
+
analysis_function = staticmethod(clops_analysis)
|
|
322
|
+
|
|
323
|
+
name: str = "clops"
|
|
324
|
+
|
|
325
|
+
def __init__(self, backend_arg: IQMBackendBase | str, configuration: "CLOPSConfiguration"):
|
|
326
|
+
"""Construct the QuantumVolumeBenchmark class.
|
|
327
|
+
|
|
328
|
+
Args:
|
|
329
|
+
backend_arg (IQMBackendBase | str): the backend to execute the benchmark on
|
|
330
|
+
configuration (QuantumVolumeConfiguration): the configuration of the benchmark
|
|
331
|
+
"""
|
|
332
|
+
super().__init__(backend_arg, configuration)
|
|
333
|
+
|
|
334
|
+
# EXPERIMENT
|
|
335
|
+
self.backend_configuration_name = backend_arg if isinstance(backend_arg, str) else backend_arg.name
|
|
336
|
+
self.qubits = configuration.qubits
|
|
337
|
+
self.num_qubits = len(self.qubits)
|
|
338
|
+
self.depth = self.num_qubits
|
|
339
|
+
|
|
340
|
+
self.u_per_layer = floor(self.num_qubits / 2)
|
|
341
|
+
self.num_parameters = self.depth * self.u_per_layer * 15
|
|
342
|
+
self.param_vector = ParameterVector("p", length=self.num_parameters)
|
|
343
|
+
|
|
344
|
+
self.num_circuits = configuration.num_circuits
|
|
345
|
+
self.num_updates = configuration.num_updates
|
|
346
|
+
self.num_shots = configuration.num_shots
|
|
347
|
+
|
|
348
|
+
self.qiskit_optim_level = configuration.qiskit_optim_level
|
|
349
|
+
self.optimize_sqg = configuration.optimize_sqg
|
|
350
|
+
|
|
351
|
+
# POST-EXPERIMENT AND VARIABLES TO STORE
|
|
352
|
+
self.clops_h_bool = configuration.clops_h_bool
|
|
353
|
+
|
|
354
|
+
self.parameters_per_update: Dict[str, List[float]] = {}
|
|
355
|
+
self.counts_per_update: Dict[str, Dict[str, int]] = {}
|
|
356
|
+
self.job_meta_per_update: Dict[str, Dict[str, Dict[str, Any]]] = {}
|
|
357
|
+
|
|
358
|
+
self.time_circuit_generate: float = 0.0
|
|
359
|
+
self.time_transpile: float = 0.0
|
|
360
|
+
self.time_sort_batches: float = 0.0
|
|
361
|
+
|
|
362
|
+
self.session_timestamp = strftime("%Y-%m-%d_%H:%M:%S")
|
|
363
|
+
self.execution_timestamp: str = ""
|
|
364
|
+
|
|
365
|
+
def add_all_meta_to_dataset(self, dataset: xr.Dataset):
|
|
366
|
+
"""Adds all configuration metadata and circuits to the dataset variable
|
|
367
|
+
|
|
368
|
+
Args:
|
|
369
|
+
dataset (xr.Dataset): The xarray dataset
|
|
370
|
+
"""
|
|
371
|
+
|
|
372
|
+
dataset.attrs["session_timestamp"] = self.session_timestamp
|
|
373
|
+
dataset.attrs["execution_timestamp"] = self.execution_timestamp
|
|
374
|
+
dataset.attrs["backend_configuration_name"] = self.backend_configuration_name
|
|
375
|
+
dataset.attrs["backend_name"] = self.backend.name
|
|
376
|
+
|
|
377
|
+
for key, value in self.configuration:
|
|
378
|
+
if key == "benchmark": # Avoid saving the class object
|
|
379
|
+
dataset.attrs[key] = value.name
|
|
380
|
+
else:
|
|
381
|
+
dataset.attrs[key] = value
|
|
382
|
+
|
|
383
|
+
# Defined outside configuration
|
|
384
|
+
dataset.attrs["num_qubits"] = self.num_qubits
|
|
385
|
+
dataset.attrs["depth"] = self.depth
|
|
386
|
+
dataset.attrs["u_per_layer"] = self.u_per_layer
|
|
387
|
+
dataset.attrs["num_parameters"] = self.num_parameters
|
|
388
|
+
|
|
389
|
+
def add_all_circuits_to_dataset(self, dataset: xr.Dataset):
|
|
390
|
+
"""Adds all generated circuits during execution to the dataset variable
|
|
391
|
+
|
|
392
|
+
Args:
|
|
393
|
+
dataset (xr.Dataset): The xarray dataset
|
|
394
|
+
|
|
395
|
+
Returns:
|
|
396
|
+
|
|
397
|
+
"""
|
|
398
|
+
qcvv_logger.info(f"Adding all circuits to the dataset")
|
|
399
|
+
dataset.attrs["untranspiled_circuits"] = self.untranspiled_circuits
|
|
400
|
+
dataset.attrs["transpiled_circuits"] = self.transpiled_circuits
|
|
401
|
+
|
|
402
|
+
def append_parameterized_unitary(
|
|
403
|
+
self,
|
|
404
|
+
qc: QuantumCircuit,
|
|
405
|
+
q0: int,
|
|
406
|
+
q1: int,
|
|
407
|
+
layer: int,
|
|
408
|
+
pair: int,
|
|
409
|
+
) -> None:
|
|
410
|
+
"""Append a decomposed, parametrized SU(4) gate using CX gates to the given quantum circuit.
|
|
411
|
+
|
|
412
|
+
Args:
|
|
413
|
+
qc (QuantumCircuit): the quantum circuit to append the SU(4) gate to
|
|
414
|
+
q0 (int): the first qubit involved in the gate
|
|
415
|
+
q1 (int): the second qubit involved in the gate
|
|
416
|
+
layer (int): the QV layer the gate belongs to
|
|
417
|
+
pair (int): the pair index corresponding to the gate
|
|
418
|
+
"""
|
|
419
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 0], q0)
|
|
420
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 1], q1)
|
|
421
|
+
qc.sx(q0)
|
|
422
|
+
qc.sx(q1)
|
|
423
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 2], q0)
|
|
424
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 3], q1)
|
|
425
|
+
qc.sx(q0)
|
|
426
|
+
qc.sx(q1)
|
|
427
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 4], q0)
|
|
428
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 5], q1)
|
|
429
|
+
qc.cx(q1, q0)
|
|
430
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 6], q0)
|
|
431
|
+
qc.sx(q1)
|
|
432
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 7], q1)
|
|
433
|
+
qc.cx(q1, q0)
|
|
434
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 8], q0)
|
|
435
|
+
qc.sx(q1)
|
|
436
|
+
qc.cx(q1, q0)
|
|
437
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 9], q0)
|
|
438
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 12], q1)
|
|
439
|
+
qc.sx(q0)
|
|
440
|
+
qc.sx(q1)
|
|
441
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 10], q0)
|
|
442
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 13], q1)
|
|
443
|
+
qc.sx(q0)
|
|
444
|
+
qc.sx(q1)
|
|
445
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 11], q0)
|
|
446
|
+
qc.rz(self.param_vector[15 * (pair + self.u_per_layer * layer) + 14], q1)
|
|
447
|
+
|
|
448
|
+
def generate_single_circuit(self) -> QuantumCircuit:
|
|
449
|
+
"""Generate a single parametrized QV quantum circuit, with measurements at the end.
|
|
450
|
+
|
|
451
|
+
Returns:
|
|
452
|
+
QuantumCircuit: the QV quantum circuit.
|
|
453
|
+
"""
|
|
454
|
+
self.u_per_layer = floor(self.num_qubits / 2)
|
|
455
|
+
qc = QuantumCircuit(self.num_qubits)
|
|
456
|
+
|
|
457
|
+
qubits = qc.qubits
|
|
458
|
+
|
|
459
|
+
for layer in range(self.depth):
|
|
460
|
+
perm = np.random.permutation(self.num_qubits)
|
|
461
|
+
for pair in range(self.u_per_layer):
|
|
462
|
+
q0 = qubits[perm[pair * 2]]
|
|
463
|
+
q1 = qubits[perm[pair * 2 + 1]]
|
|
464
|
+
|
|
465
|
+
self.append_parameterized_unitary(qc, q0, q1, layer, pair)
|
|
466
|
+
|
|
467
|
+
qc.measure_all()
|
|
468
|
+
|
|
469
|
+
return qc
|
|
470
|
+
|
|
471
|
+
@timeit
|
|
472
|
+
def generate_circuit_list(
|
|
473
|
+
self,
|
|
474
|
+
) -> List[QuantumCircuit]:
|
|
475
|
+
"""Generate a list of parametrized QV quantum circuits, with measurements at the end.
|
|
476
|
+
|
|
477
|
+
Returns:
|
|
478
|
+
List[QuantumCircuit]: the list of parametrized QV quantum circuits.
|
|
479
|
+
"""
|
|
480
|
+
qc_list = [self.generate_single_circuit() for _ in range(self.num_circuits)]
|
|
481
|
+
return qc_list
|
|
482
|
+
|
|
483
|
+
def generate_random_parameters(self) -> np.ndarray:
|
|
484
|
+
"""Generate an array of as many random parameters as needed by the QV template circuits.
|
|
485
|
+
|
|
486
|
+
Returns:
|
|
487
|
+
np.ndarray[np.float64]: the array of random parameters
|
|
488
|
+
"""
|
|
489
|
+
return np.random.uniform(low=-pi, high=pi, size=self.num_parameters)
|
|
490
|
+
|
|
491
|
+
@timeit
|
|
492
|
+
def assign_random_parameters_to_all(
|
|
493
|
+
self,
|
|
494
|
+
dict_parametrized_circs: Dict[Tuple, List[QuantumCircuit]],
|
|
495
|
+
optimize_sqg: bool,
|
|
496
|
+
) -> Tuple[List[List[float]], Dict[Tuple, List[QuantumCircuit]]]:
|
|
497
|
+
"""
|
|
498
|
+
|
|
499
|
+
Args:
|
|
500
|
+
dict_parametrized_circs (Dict[Tuple, List[QuantumCircuit]]): Dictionary with list of int (qubits) as keys and lists of parametrized quantum circuits as values
|
|
501
|
+
optimize_sqg (bool): Whether single qubit gate optimization is applied
|
|
502
|
+
Returns:
|
|
503
|
+
A tuple of dictionaries:
|
|
504
|
+
- lists of list of float parameter values corresponding to param updates
|
|
505
|
+
- dictionary with lists of int (qubits) as keys and lists of quantum circuits as values
|
|
506
|
+
"""
|
|
507
|
+
# Store parametrized circuits in a separate dictionary
|
|
508
|
+
sorted_dict_parametrized: Dict[Tuple, List[QuantumCircuit]] = {k: [] for k in dict_parametrized_circs.keys()}
|
|
509
|
+
param_values: List[List[float]] = []
|
|
510
|
+
for k in dict_parametrized_circs.keys():
|
|
511
|
+
for qc in dict_parametrized_circs[k]:
|
|
512
|
+
# Update parameters
|
|
513
|
+
parameters = self.generate_random_parameters()
|
|
514
|
+
|
|
515
|
+
# NDonis can't use optimize_sqg as is, yet -> complains about MOVE gate not being IQM native!
|
|
516
|
+
if optimize_sqg and self.backend.name != "IQMNdonisBackend":
|
|
517
|
+
sorted_dict_parametrized[k].append(
|
|
518
|
+
optimize_single_qubit_gates( # Optimize SQG seems worth it AFTER assignment
|
|
519
|
+
qc.assign_parameters(
|
|
520
|
+
dict(zip(qc.parameters, parameters)),
|
|
521
|
+
inplace=False, # Leave the template intact for next updates
|
|
522
|
+
)
|
|
523
|
+
)
|
|
524
|
+
)
|
|
525
|
+
else:
|
|
526
|
+
sorted_dict_parametrized[k].append(
|
|
527
|
+
qc.assign_parameters(
|
|
528
|
+
dict(zip(qc.parameters, parameters)),
|
|
529
|
+
inplace=False, # Leave the template intact for next updates
|
|
530
|
+
)
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
param_values.append(list(parameters))
|
|
534
|
+
|
|
535
|
+
return param_values, sorted_dict_parametrized
|
|
536
|
+
|
|
537
|
+
def clops_cycle(
|
|
538
|
+
self,
|
|
539
|
+
backend: IQMBackendBase,
|
|
540
|
+
sorted_transpiled_qc_list: Dict[Tuple, List[QuantumCircuit]],
|
|
541
|
+
update: int,
|
|
542
|
+
) -> Tuple[float, float, float]:
|
|
543
|
+
"""Executes a single CLOPS cycle (parameter assignment and execution) for the given update
|
|
544
|
+
Args:
|
|
545
|
+
backend (IQMBackendBase): the backend to execute the jobs with
|
|
546
|
+
sorted_transpiled_qc_list (Dict[str, List[QuantumCircuit]]): A dictionary of lists of transpiled quantum circuits
|
|
547
|
+
update (int): The current cycle update
|
|
548
|
+
Returns:
|
|
549
|
+
Tuple[float, float, float]: The elapsed times for parameter assignment, submission and retrieval of jobs
|
|
550
|
+
"""
|
|
551
|
+
# Assign parameters to all quantum circuits
|
|
552
|
+
qcvv_logger.info(
|
|
553
|
+
f"Update {(update + 1)}/{self.num_updates}\nAssigning random parameters to all {self.num_circuits} circuits"
|
|
554
|
+
)
|
|
555
|
+
(all_param_updates, sorted_transpiled_qc_list_parametrized), time_parameter_assign = (
|
|
556
|
+
self.assign_random_parameters_to_all(sorted_transpiled_qc_list, self.optimize_sqg)
|
|
557
|
+
)
|
|
558
|
+
|
|
559
|
+
qcvv_logger.info(f"Executing the corresponding circuit batch")
|
|
560
|
+
# Submit all circuits to execute
|
|
561
|
+
all_jobs, time_submit = submit_execute(
|
|
562
|
+
sorted_transpiled_qc_list_parametrized,
|
|
563
|
+
backend,
|
|
564
|
+
self.num_shots,
|
|
565
|
+
self.calset_id,
|
|
566
|
+
max_gates_per_batch=self.max_gates_per_batch,
|
|
567
|
+
)
|
|
568
|
+
|
|
569
|
+
qcvv_logger.info(f"Retrieving counts")
|
|
570
|
+
# Retrieve counts - the precise outputs do not matter
|
|
571
|
+
all_counts, time_retrieve = retrieve_all_counts(all_jobs)
|
|
572
|
+
# Save counts - ensures counts were received and can be inspected
|
|
573
|
+
self.parameters_per_update["parameters_update_" + str(update + 1)] = all_param_updates
|
|
574
|
+
self.counts_per_update["counts_update_" + str(update + 1)] = all_counts
|
|
575
|
+
# Retrieve and save all job metadata
|
|
576
|
+
all_job_metadata = retrieve_all_job_metadata(all_jobs)
|
|
577
|
+
self.job_meta_per_update["update_" + str(update + 1)] = all_job_metadata
|
|
578
|
+
|
|
579
|
+
return time_parameter_assign, time_submit, time_retrieve
|
|
580
|
+
|
|
581
|
+
def generate_transpiled_clops_templates(self) -> Dict[Tuple, List[QuantumCircuit]]:
|
|
582
|
+
"""Generates CLOPS circuit templates transpiled to the backend's physical layout
|
|
583
|
+
|
|
584
|
+
Returns:
|
|
585
|
+
Dict[str, QuantumCircuit]: a dictionary of quantum circuits with keys being str(qubit layout)
|
|
586
|
+
"""
|
|
587
|
+
# Generate the list of QV circuits
|
|
588
|
+
self.untranspiled_circuits = {}
|
|
589
|
+
self.transpiled_circuits = {}
|
|
590
|
+
|
|
591
|
+
qc_list, self.time_circuit_generate = self.generate_circuit_list()
|
|
592
|
+
|
|
593
|
+
coupling_map = set_coupling_map(self.qubits, self.backend, self.physical_layout)
|
|
594
|
+
# Perform transpilation to backend
|
|
595
|
+
qcvv_logger.info(
|
|
596
|
+
f'Will transpile all {self.num_circuits} circuits according to "{self.physical_layout}" physical layout'
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
transpiled_qc_list, self.time_transpile = perform_backend_transpilation(
|
|
600
|
+
qc_list,
|
|
601
|
+
self.backend,
|
|
602
|
+
self.qubits,
|
|
603
|
+
coupling_map=coupling_map,
|
|
604
|
+
qiskit_optim_level=self.qiskit_optim_level,
|
|
605
|
+
optimize_sqg=True,
|
|
606
|
+
routing_method=self.routing_method,
|
|
607
|
+
)
|
|
608
|
+
|
|
609
|
+
# Batching
|
|
610
|
+
sorted_transpiled_qc_list = {}
|
|
611
|
+
if self.physical_layout == "fixed":
|
|
612
|
+
sorted_transpiled_qc_list = {tuple(self.qubits): transpiled_qc_list}
|
|
613
|
+
elif self.physical_layout == "batching":
|
|
614
|
+
# Sort circuits according to their final measurement mappings
|
|
615
|
+
(sorted_transpiled_qc_list, _), self.time_sort_batches = sort_batches_by_final_layout(transpiled_qc_list)
|
|
616
|
+
|
|
617
|
+
self.untranspiled_circuits.update({tuple(self.qubits): qc_list})
|
|
618
|
+
self.transpiled_circuits.update(sorted_transpiled_qc_list)
|
|
619
|
+
|
|
620
|
+
return sorted_transpiled_qc_list
|
|
621
|
+
|
|
622
|
+
def execute(self, backend: IQMBackendBase) -> xr.Dataset:
|
|
623
|
+
"""Executes the benchmark"""
|
|
624
|
+
|
|
625
|
+
self.execution_timestamp = strftime("%Y-%m-%d_%H:%M:%S")
|
|
626
|
+
|
|
627
|
+
dataset = xr.Dataset()
|
|
628
|
+
self.add_all_meta_to_dataset(dataset)
|
|
629
|
+
|
|
630
|
+
qcvv_logger.info(
|
|
631
|
+
"NB: CLOPS should be estimated with same qubit layout and optional inputs used to establish QV!"
|
|
632
|
+
)
|
|
633
|
+
if self.num_circuits != 100 or self.num_updates != 10 or self.num_shots != 100:
|
|
634
|
+
qcvv_logger.info(
|
|
635
|
+
f"NB: CLOPS parameters, by definition, are [num_circuits=100, num_updates=10, num_shots=100]"
|
|
636
|
+
f" You chose"
|
|
637
|
+
f" [num_circuits={self.num_circuits}, num_updates={self.num_updates}, num_shots={self.num_shots}]."
|
|
638
|
+
)
|
|
639
|
+
|
|
640
|
+
qcvv_logger.info(
|
|
641
|
+
f"Now generating {self.num_circuits} parametrized circuit templates on qubits {self.qubits}",
|
|
642
|
+
)
|
|
643
|
+
|
|
644
|
+
sorted_transpiled_qc_list = self.generate_transpiled_clops_templates()
|
|
645
|
+
|
|
646
|
+
# *********************************************
|
|
647
|
+
# Start CLOPS timer
|
|
648
|
+
# *********************************************
|
|
649
|
+
start_clops_timer = perf_counter()
|
|
650
|
+
qcvv_logger.info(f"CLOPS time started")
|
|
651
|
+
all_times_parameter_assign = {}
|
|
652
|
+
all_times_submit = {}
|
|
653
|
+
all_times_retrieve = {}
|
|
654
|
+
for n in range(self.num_updates):
|
|
655
|
+
time_parameter_assign, time_submit, time_retrieve = self.clops_cycle(backend, sorted_transpiled_qc_list, n)
|
|
656
|
+
all_times_parameter_assign["update_" + str(n + 1)] = time_parameter_assign
|
|
657
|
+
all_times_submit["update_" + str(n + 1)] = time_submit
|
|
658
|
+
all_times_retrieve["update_" + str(n + 1)] = time_retrieve
|
|
659
|
+
# *********************************************
|
|
660
|
+
# End CLOPS timer
|
|
661
|
+
# *********************************************
|
|
662
|
+
end_clops_timer = perf_counter()
|
|
663
|
+
|
|
664
|
+
# COUNT OPERATIONS
|
|
665
|
+
all_op_counts = count_native_gates(backend, [x for y in list(sorted_transpiled_qc_list.values()) for x in y])
|
|
666
|
+
|
|
667
|
+
dataset.attrs.update(
|
|
668
|
+
{
|
|
669
|
+
"clops_time": end_clops_timer - start_clops_timer,
|
|
670
|
+
"all_times_parameter_assign": all_times_parameter_assign,
|
|
671
|
+
"all_times_submit": all_times_submit,
|
|
672
|
+
"all_times_retrieve": all_times_retrieve,
|
|
673
|
+
"time_circuit_generate": self.time_circuit_generate,
|
|
674
|
+
"time_transpile": self.time_transpile,
|
|
675
|
+
"time_sort_batches": self.time_sort_batches,
|
|
676
|
+
"parameters_per_update": self.parameters_per_update,
|
|
677
|
+
"job_meta_per_update": self.job_meta_per_update,
|
|
678
|
+
"counts_per_update": self.counts_per_update,
|
|
679
|
+
"operation_counts": all_op_counts,
|
|
680
|
+
}
|
|
681
|
+
)
|
|
682
|
+
|
|
683
|
+
self.add_all_circuits_to_dataset(dataset)
|
|
684
|
+
|
|
685
|
+
return dataset
|
|
686
|
+
|
|
687
|
+
|
|
688
|
+
class CLOPSConfiguration(BenchmarkConfigurationBase):
|
|
689
|
+
"""CLOPS configuration.
|
|
690
|
+
|
|
691
|
+
Attributes:
|
|
692
|
+
benchmark (Type[Benchmark]): CLOPS Benchmark.
|
|
693
|
+
qubits (Sequence[int]): The Sequence (List or Tuple) of physical qubit labels in which to run the benchmark.
|
|
694
|
+
* The physical qubit layout should correspond to the one used to establish QV.
|
|
695
|
+
num_circuits (int): The number of parametrized circuit layouts.
|
|
696
|
+
* By definition of arXiv:2110.14108 [quant-ph] set to 100.
|
|
697
|
+
num_updates (int): The number of parameter assignment updates per circuit layout to perform.
|
|
698
|
+
* By definition of arXiv:2110.14108 [quant-ph] set to 10.
|
|
699
|
+
num_shots (int): The number of measurement shots per circuit to perform.
|
|
700
|
+
* By definition of arXiv:2110.14108 [quant-ph] set to 100.
|
|
701
|
+
clops_h_bool (bool): Whether a CLOPS value with layer definition of CLOPS_H is estimated.
|
|
702
|
+
* Default is False
|
|
703
|
+
* This will not estimate a rigorous CLOPS_H value (as loosely defined in www.ibm.com/quantum/blog/quantum-metric-layer-fidelity)
|
|
704
|
+
qiskit_optim_level (int): The Qiskit transpilation optimization level.
|
|
705
|
+
* The optimization level should correspond to the one used to establish QV.
|
|
706
|
+
* Default is 3.
|
|
707
|
+
optimize_sqg (bool): Whether Single Qubit Gate Optimization is performed upon transpilation.
|
|
708
|
+
* The optimize_sqg value should correspond to the one used to establish QV.
|
|
709
|
+
* Default is True
|
|
710
|
+
routing_method (Literal["basic", "lookahead", "stochastic", "sabre", "none"]): The Qiskit transpilation routing method to use.
|
|
711
|
+
* The routing_method value should correspond to the one used to establish QV.
|
|
712
|
+
* Default is "sabre".
|
|
713
|
+
physical_layout (Literal["fixed", "batching"]): Whether the coupling map is restricted to qubits in the input layout or not.
|
|
714
|
+
- "fixed": Restricts the coupling map to only the specified qubits.
|
|
715
|
+
- "batching": Considers the full coupling map of the backend and circuit execution is batched per final layout.
|
|
716
|
+
* Default is "fixed".
|
|
717
|
+
"""
|
|
718
|
+
|
|
719
|
+
benchmark: Type[Benchmark] = CLOPSBenchmark
|
|
720
|
+
qubits: Sequence[int]
|
|
721
|
+
num_circuits: int = 100
|
|
722
|
+
num_updates: int = 10
|
|
723
|
+
num_shots: int = 100
|
|
724
|
+
clops_h_bool: bool = False
|
|
725
|
+
qiskit_optim_level: int = 3
|
|
726
|
+
optimize_sqg: bool = True
|