iqm-benchmarks 2.41__py3-none-any.whl → 2.43__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iqm/benchmarks/coherence/__init__.py +18 -0
- iqm/benchmarks/coherence/coherence.py +548 -0
- iqm/benchmarks/optimization/qscore.py +64 -71
- iqm/benchmarks/utils.py +22 -26
- {iqm_benchmarks-2.41.dist-info → iqm_benchmarks-2.43.dist-info}/METADATA +1 -1
- {iqm_benchmarks-2.41.dist-info → iqm_benchmarks-2.43.dist-info}/RECORD +9 -7
- {iqm_benchmarks-2.41.dist-info → iqm_benchmarks-2.43.dist-info}/WHEEL +0 -0
- {iqm_benchmarks-2.41.dist-info → iqm_benchmarks-2.43.dist-info}/licenses/LICENSE +0 -0
- {iqm_benchmarks-2.41.dist-info → iqm_benchmarks-2.43.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# Copyright 2024 IQM Benchmarks developers
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""
|
|
15
|
+
Estimation of the coherence properties of the qubits and computational resonator.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
from . import coherence
|
|
@@ -0,0 +1,548 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Coherence benchmark
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
from time import strftime
|
|
7
|
+
from typing import Dict, List, Tuple, Type
|
|
8
|
+
|
|
9
|
+
from matplotlib.figure import Figure
|
|
10
|
+
import matplotlib.pyplot as plt
|
|
11
|
+
import networkx as nx
|
|
12
|
+
import numpy as np
|
|
13
|
+
from qiskit import QuantumCircuit, transpile
|
|
14
|
+
from scipy.optimize import curve_fit
|
|
15
|
+
import xarray as xr
|
|
16
|
+
|
|
17
|
+
from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
|
|
18
|
+
from iqm.benchmarks.benchmark_definition import (
|
|
19
|
+
Benchmark,
|
|
20
|
+
BenchmarkAnalysisResult,
|
|
21
|
+
BenchmarkObservation,
|
|
22
|
+
BenchmarkObservationIdentifier,
|
|
23
|
+
BenchmarkRunResult,
|
|
24
|
+
add_counts_to_dataset,
|
|
25
|
+
)
|
|
26
|
+
from iqm.benchmarks.circuit_containers import BenchmarkCircuit, CircuitGroup, Circuits
|
|
27
|
+
from iqm.benchmarks.logging_config import qcvv_logger
|
|
28
|
+
from iqm.benchmarks.utils import ( # execute_with_dd,
|
|
29
|
+
perform_backend_transpilation,
|
|
30
|
+
retrieve_all_counts,
|
|
31
|
+
submit_execute,
|
|
32
|
+
xrvariable_to_counts,
|
|
33
|
+
)
|
|
34
|
+
from iqm.qiskit_iqm.iqm_backend import IQMBackendBase
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def exp_decay(t, A, T, C):
|
|
38
|
+
"""
|
|
39
|
+
Calculate the exponential decay at time t.
|
|
40
|
+
|
|
41
|
+
Parameters:
|
|
42
|
+
t (float or array-like): The time variable(s) at which to evaluate the decay.
|
|
43
|
+
A (float): The initial amplitude of the decay.
|
|
44
|
+
T (float): The time constant, which dictates the rate of decay.
|
|
45
|
+
C (float): The constant offset added to the decay.
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
float or array-like: The value(s) of the exponential decay at time t.
|
|
49
|
+
"""
|
|
50
|
+
return A * np.exp(-t / T) + C
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def plot_coherence(
|
|
54
|
+
amplitude: Dict[str, float],
|
|
55
|
+
backend_name: str,
|
|
56
|
+
delays: List[float],
|
|
57
|
+
offset: Dict[str, float],
|
|
58
|
+
qubit_set: List[int],
|
|
59
|
+
qubit_probs: dict[str, List[float]],
|
|
60
|
+
timestamp: str,
|
|
61
|
+
T_fit: Dict[str, float],
|
|
62
|
+
T_fit_err: Dict[str, float],
|
|
63
|
+
qubit_to_plot: List[int] | None = None,
|
|
64
|
+
coherence_exp: str = "t1",
|
|
65
|
+
) -> Tuple[str, Figure]:
|
|
66
|
+
"""
|
|
67
|
+
Plot coherence decay (T1 or T2_echo) for each qubit as subplots.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
amplitude_list: Fitted amplitudes (A) per qubit.
|
|
71
|
+
backend_name: Name of the backend used for the experiment.
|
|
72
|
+
delays: List of delay times used in the coherence experiments.
|
|
73
|
+
offset: Fitted offsets (C) for each qubit.
|
|
74
|
+
qubit_set: List of qubit indices involved in the experiment.
|
|
75
|
+
qubit_probs: Measured probabilities P(1) for each qubit at different delays.
|
|
76
|
+
timestamp: Timestamp for labeling the plot.
|
|
77
|
+
T_fit: Fitted coherence time (T) for each qubit.
|
|
78
|
+
T_fit_err: Fitted coherence time error for each qubit.
|
|
79
|
+
qubit_to_plot: Specific qubits to plot. If None, all qubits in `qubit_set` are plotted.
|
|
80
|
+
coherence_exp: Type of coherence experiment ('t1' or 't2_echo') for labeling and plotting logic.
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
Tuple[str, Figure]: Filename of the saved plot and the matplotlib figure object.
|
|
85
|
+
"""
|
|
86
|
+
if qubit_to_plot is not None:
|
|
87
|
+
num_qubits = len(qubit_to_plot)
|
|
88
|
+
else:
|
|
89
|
+
num_qubits = len(qubit_set)
|
|
90
|
+
|
|
91
|
+
ncols = 3
|
|
92
|
+
nrows = (num_qubits + ncols - 1) // ncols
|
|
93
|
+
|
|
94
|
+
fig, axs = plt.subplots(nrows, ncols, figsize=(4 * ncols, 3 * nrows), squeeze=False)
|
|
95
|
+
for idx, qubit in enumerate(qubit_to_plot or []):
|
|
96
|
+
row, col = divmod(idx, ncols)
|
|
97
|
+
ax = axs[row][col]
|
|
98
|
+
ydata = np.array(qubit_probs[str(qubit)])
|
|
99
|
+
A = amplitude[str(qubit)]
|
|
100
|
+
C = offset[str(qubit)]
|
|
101
|
+
T_val = T_fit[str(qubit)]
|
|
102
|
+
|
|
103
|
+
ax.plot(delays, ydata, "o", label="Measured P(1)", color="blue")
|
|
104
|
+
t_fit = np.linspace(min(delays), max(delays), 200)
|
|
105
|
+
fitted_curve = exp_decay(t_fit, A, T_val, C)
|
|
106
|
+
ax.plot(
|
|
107
|
+
t_fit,
|
|
108
|
+
fitted_curve,
|
|
109
|
+
"--",
|
|
110
|
+
color="orange",
|
|
111
|
+
label=f"Fit (T = {T_val * 1e6:.1f} ± {T_fit_err[str(qubit)] * 1e6:.1f} µs)",
|
|
112
|
+
)
|
|
113
|
+
tick_list = np.linspace(min(delays), max(delays), 5)
|
|
114
|
+
ax.set_xticks(tick_list)
|
|
115
|
+
ax.set_xticklabels([f"{d * 1e6:.0f}" for d in tick_list])
|
|
116
|
+
ax.set_title(f"Qubit {qubit}")
|
|
117
|
+
ax.set_xlabel("Delay (µs)")
|
|
118
|
+
ax.set_ylabel("|1> Populatiuon")
|
|
119
|
+
ax.grid(True)
|
|
120
|
+
ax.legend()
|
|
121
|
+
|
|
122
|
+
for j in range(idx + 1, nrows * ncols): # pylint: disable=undefined-loop-variable
|
|
123
|
+
row, col = divmod(j, ncols)
|
|
124
|
+
fig.delaxes(axs[row][col])
|
|
125
|
+
|
|
126
|
+
fig.suptitle(f"{coherence_exp.upper()}_decay_{backend_name}_{timestamp}", fontsize=14)
|
|
127
|
+
fig.tight_layout(rect=(0, 0.03, 1, 0.95))
|
|
128
|
+
|
|
129
|
+
fig_name = f"{coherence_exp}_{backend_name}_{timestamp}.png"
|
|
130
|
+
plt.close()
|
|
131
|
+
|
|
132
|
+
return fig_name, fig
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def calculate_probabilities(counts: dict[str, int], nqubits: int, coherence_exp: str) -> Tuple[List[float], int]:
|
|
136
|
+
"""
|
|
137
|
+
Calculate the number of times '0' was measured for each qubit based on the provided counts.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
counts: A dictionary where keys are bitstrings representing measurement outcomes and values are the counts of those outcomes.
|
|
141
|
+
nqubits: The number of qubits being measured.
|
|
142
|
+
coherence_exp: A string indicating the coherence experiment type ('t1' or other).
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
tuple: A tuple containing:
|
|
146
|
+
- A list of occurrences of measuring '0' for each qubit.
|
|
147
|
+
- The total number of shots (measurements).
|
|
148
|
+
"""
|
|
149
|
+
p0_per_qubit = [0.0 for _ in range(nqubits)]
|
|
150
|
+
total_shots = sum(counts.values())
|
|
151
|
+
for bitstring, count in counts.items():
|
|
152
|
+
for q in range(nqubits):
|
|
153
|
+
if coherence_exp == "t1":
|
|
154
|
+
if bitstring[::-1][q] == "1":
|
|
155
|
+
p0_per_qubit[q] += count
|
|
156
|
+
else:
|
|
157
|
+
if bitstring[::-1][q] == "0":
|
|
158
|
+
p0_per_qubit[q] += count
|
|
159
|
+
return p0_per_qubit, total_shots
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def fit_coherence_model(
|
|
163
|
+
qubit: int, probs: np.ndarray, delays: np.ndarray, coherence_exp: str
|
|
164
|
+
) -> Tuple[List[BenchmarkObservation], float, float, float, float]:
|
|
165
|
+
"""Fit the coherence model and return observations.
|
|
166
|
+
|
|
167
|
+
This function fits a coherence model to the provided probability data
|
|
168
|
+
and returns the fitted parameters along with their uncertainties.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
qubit : The index of the qubit being analyzed.
|
|
172
|
+
probs: An array of probability values corresponding to the qubit's coherence.
|
|
173
|
+
delays An array of delay times at which the probabilities were measured.
|
|
174
|
+
coherence_exp: A string indicating the type of coherence experiment ('t1' or 't2_echo').
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
A tuple containing:
|
|
178
|
+
- A list of BenchmarkObservation objects for the fitted parameters.
|
|
179
|
+
- The fitted decay time (T_fit).
|
|
180
|
+
- The uncertainty in the fitted decay time (T_fit_err).
|
|
181
|
+
- The fitted amplitude (A).
|
|
182
|
+
- The fitted offset (C).
|
|
183
|
+
"""
|
|
184
|
+
observations_per_qubit = []
|
|
185
|
+
ydata = probs
|
|
186
|
+
|
|
187
|
+
# Estimate initial parameters
|
|
188
|
+
A_guess = np.max([ydata[0] - ydata[-1], 0])
|
|
189
|
+
C_guess = ydata[-1]
|
|
190
|
+
T_guess = delays[len(delays) // 2]
|
|
191
|
+
p0 = [A_guess, T_guess, C_guess]
|
|
192
|
+
|
|
193
|
+
# Set parameter bounds:
|
|
194
|
+
# - A must be positive (decay amplitude)
|
|
195
|
+
# - T must be positive (no negative decay time)
|
|
196
|
+
# - C between 0 and 1 (physical population values)
|
|
197
|
+
bounds = ([0, 1e-6, 0], [1.2, 10.0, 1])
|
|
198
|
+
|
|
199
|
+
try:
|
|
200
|
+
popt, pcov = curve_fit( # pylint: disable=unbalanced-tuple-unpacking
|
|
201
|
+
exp_decay, delays, ydata, p0=p0, bounds=bounds, maxfev=10000
|
|
202
|
+
)
|
|
203
|
+
A, T_fit, C = popt
|
|
204
|
+
perr = np.sqrt(np.diag(pcov))
|
|
205
|
+
T_fit_err = perr[1]
|
|
206
|
+
|
|
207
|
+
except RuntimeError:
|
|
208
|
+
A, T_fit, C, T_fit_err = np.nan, np.nan, np.nan, np.nan
|
|
209
|
+
|
|
210
|
+
observations_per_qubit.extend(
|
|
211
|
+
[
|
|
212
|
+
BenchmarkObservation(
|
|
213
|
+
name="T1" if coherence_exp == "t1" else "T2_echo",
|
|
214
|
+
value=T_fit,
|
|
215
|
+
identifier=BenchmarkObservationIdentifier(qubit),
|
|
216
|
+
uncertainty=T_fit_err,
|
|
217
|
+
),
|
|
218
|
+
]
|
|
219
|
+
)
|
|
220
|
+
return observations_per_qubit, T_fit, T_fit_err, A, C
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def coherence_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
224
|
+
"""Analysis function for a coherence experiment
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
run (RunResult): A coherence experiment run for which analysis result is created.
|
|
228
|
+
Returns:
|
|
229
|
+
AnalysisResult corresponding to coherence experiment.
|
|
230
|
+
"""
|
|
231
|
+
|
|
232
|
+
plots = {}
|
|
233
|
+
observations: list[BenchmarkObservation] = []
|
|
234
|
+
dataset = run.dataset.copy(deep=True)
|
|
235
|
+
|
|
236
|
+
backend_name = dataset.attrs["backend_name"]
|
|
237
|
+
timestamp = dataset.attrs["execution_timestamp"]
|
|
238
|
+
delays = dataset.attrs["delay_list"]
|
|
239
|
+
coherence_exp = dataset.attrs["experiment"]
|
|
240
|
+
qubit_set = dataset.attrs["qubit_set"]
|
|
241
|
+
tot_circs = len(delays)
|
|
242
|
+
groups = dataset.attrs["group"]
|
|
243
|
+
all_counts_group: List[Dict[str, int]] = []
|
|
244
|
+
qubit_probs: Dict[str, List[float]] = {}
|
|
245
|
+
|
|
246
|
+
qubits_to_plot = dataset.attrs["qubits_to_plot"]
|
|
247
|
+
for group in groups:
|
|
248
|
+
identifier = BenchmarkObservationIdentifier(group)
|
|
249
|
+
all_counts_group = xrvariable_to_counts(dataset, identifier.string_identifier, tot_circs)
|
|
250
|
+
nqubits = len(group)
|
|
251
|
+
qubit_probs.update({str(q): [] for q in group})
|
|
252
|
+
|
|
253
|
+
for counts in all_counts_group:
|
|
254
|
+
p0_per_qubit, total_shots = calculate_probabilities(counts, nqubits, coherence_exp)
|
|
255
|
+
for q_idx, qubit in enumerate(group):
|
|
256
|
+
qubit_probs[str(qubit)].append(p0_per_qubit[q_idx] / total_shots)
|
|
257
|
+
|
|
258
|
+
qubit_set = [item for sublist in groups for item in sublist]
|
|
259
|
+
amplitude = {str(qubit): 0.0 for qubit in qubit_set}
|
|
260
|
+
offset = {str(qubit): 0.0 for qubit in qubit_set}
|
|
261
|
+
T_fit = {str(qubit): 0.0 for qubit in qubit_set}
|
|
262
|
+
T_fit_err = {str(qubit): 0.0 for qubit in qubit_set}
|
|
263
|
+
|
|
264
|
+
for qubit in qubit_set:
|
|
265
|
+
qubit_str = str(qubit)
|
|
266
|
+
probs = np.array(qubit_probs[qubit_str])
|
|
267
|
+
results = fit_coherence_model(qubit, probs, delays, coherence_exp)
|
|
268
|
+
observations.extend(results[0])
|
|
269
|
+
T_fit[qubit_str] = results[1]
|
|
270
|
+
T_fit_err[qubit_str] = results[2]
|
|
271
|
+
amplitude[qubit_str] = results[3]
|
|
272
|
+
offset[qubit_str] = results[4]
|
|
273
|
+
|
|
274
|
+
fig_name, fig = plot_coherence(
|
|
275
|
+
amplitude,
|
|
276
|
+
backend_name,
|
|
277
|
+
delays,
|
|
278
|
+
offset,
|
|
279
|
+
qubit_set,
|
|
280
|
+
qubit_probs,
|
|
281
|
+
timestamp,
|
|
282
|
+
T_fit,
|
|
283
|
+
T_fit_err,
|
|
284
|
+
qubits_to_plot,
|
|
285
|
+
coherence_exp,
|
|
286
|
+
)
|
|
287
|
+
plots[fig_name] = fig
|
|
288
|
+
|
|
289
|
+
return BenchmarkAnalysisResult(dataset=dataset, plots=plots, observations=observations)
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
class CoherenceBenchmark(Benchmark):
|
|
293
|
+
"""
|
|
294
|
+
This benchmark estimates the coherence properties of the qubits and computational resonator.
|
|
295
|
+
"""
|
|
296
|
+
|
|
297
|
+
analysis_function = staticmethod(coherence_analysis)
|
|
298
|
+
|
|
299
|
+
name: str = "coherence"
|
|
300
|
+
|
|
301
|
+
def __init__(self, backend_arg: IQMBackendBase, configuration: "CoherenceConfiguration"):
|
|
302
|
+
"""Construct the CoherenceBenchmark class.
|
|
303
|
+
|
|
304
|
+
Args:
|
|
305
|
+
backend_arg (IQMBackendBase): the backend to execute the benchmark on
|
|
306
|
+
configuration (CoherenceConfiguration): the configuration of the benchmark
|
|
307
|
+
"""
|
|
308
|
+
super().__init__(backend_arg, configuration)
|
|
309
|
+
|
|
310
|
+
self.backend_configuration_name = backend_arg if isinstance(backend_arg, str) else backend_arg.name
|
|
311
|
+
self.delays = configuration.delays
|
|
312
|
+
self.shots = configuration.shots
|
|
313
|
+
self.optimize_sqg = configuration.optimize_sqg
|
|
314
|
+
self.coherence_exp = configuration.coherence_exp
|
|
315
|
+
self.qiskit_optim_level = configuration.qiskit_optim_level
|
|
316
|
+
self.qubits_to_plot = configuration.qubits_to_plot
|
|
317
|
+
|
|
318
|
+
self.session_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
319
|
+
self.execution_timestamp = ""
|
|
320
|
+
|
|
321
|
+
# Initialize the variable to contain all coherence circuits
|
|
322
|
+
self.circuits = Circuits()
|
|
323
|
+
self.untranspiled_circuits = BenchmarkCircuit(name="untranspiled_circuits")
|
|
324
|
+
self.transpiled_circuits = BenchmarkCircuit(name="transpiled_circuits")
|
|
325
|
+
|
|
326
|
+
def generate_coherence_circuits(
|
|
327
|
+
self,
|
|
328
|
+
nqubits: int,
|
|
329
|
+
) -> list[QuantumCircuit]:
|
|
330
|
+
"""Generates coherence circuits for the given qubit set and delay times.
|
|
331
|
+
|
|
332
|
+
Args:
|
|
333
|
+
nqubits (int): Number of qubits to apply the coherence circuits on.
|
|
334
|
+
|
|
335
|
+
Returns:
|
|
336
|
+
list[QuantumCircuit]: List of generated coherence circuits.
|
|
337
|
+
"""
|
|
338
|
+
circuits = []
|
|
339
|
+
for delay in self.delays:
|
|
340
|
+
qc = QuantumCircuit(nqubits)
|
|
341
|
+
if self.coherence_exp == "t1":
|
|
342
|
+
self._generate_t1_circuits(qc, nqubits, delay)
|
|
343
|
+
elif self.coherence_exp == "t2_echo":
|
|
344
|
+
self._generate_t2_echo_circuits(qc, nqubits, delay)
|
|
345
|
+
qc.measure_all()
|
|
346
|
+
circuits.append(qc)
|
|
347
|
+
return circuits
|
|
348
|
+
|
|
349
|
+
def _generate_t1_circuits(self, qc: QuantumCircuit, nqubits: int, delay: float):
|
|
350
|
+
"""Generates T1 coherence circuits.
|
|
351
|
+
|
|
352
|
+
Args:
|
|
353
|
+
qc (QuantumCircuit): The quantum circuit to modify.
|
|
354
|
+
nqubits (int): Number of qubits.
|
|
355
|
+
delay (float): Delay time for the circuit.
|
|
356
|
+
"""
|
|
357
|
+
|
|
358
|
+
for qubit in range(nqubits):
|
|
359
|
+
qc.x(qubit)
|
|
360
|
+
qc.delay(int(delay * 1e9), qubit, unit="ns")
|
|
361
|
+
|
|
362
|
+
def _generate_t2_echo_circuits(self, qc: QuantumCircuit, nqubits: int, delay: float):
|
|
363
|
+
"""Generates T2 echo coherence circuits.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
qc (QuantumCircuit): The quantum circuit to modify.
|
|
367
|
+
nqubits (int): Number of qubits.
|
|
368
|
+
delay (float): Delay time for the circuit.
|
|
369
|
+
"""
|
|
370
|
+
half_delay = delay / 2
|
|
371
|
+
for qubit in range(nqubits):
|
|
372
|
+
qc.h(qubit)
|
|
373
|
+
qc.delay(int(half_delay * 1e9), qubit, unit="ns")
|
|
374
|
+
qc.x(qubit)
|
|
375
|
+
qc.delay(int(half_delay * 1e9), qubit, unit="ns")
|
|
376
|
+
qc.h(qubit)
|
|
377
|
+
|
|
378
|
+
def add_all_meta_to_dataset(self, dataset: xr.Dataset):
|
|
379
|
+
"""Adds all configuration metadata and circuits to the dataset variable
|
|
380
|
+
|
|
381
|
+
Args:
|
|
382
|
+
dataset (xr.Dataset): The xarray dataset
|
|
383
|
+
"""
|
|
384
|
+
dataset.attrs["session_timestamp"] = self.session_timestamp
|
|
385
|
+
dataset.attrs["execution_timestamp"] = self.execution_timestamp
|
|
386
|
+
dataset.attrs["backend_configuration_name"] = self.backend_configuration_name
|
|
387
|
+
dataset.attrs["backend_name"] = self.backend.name
|
|
388
|
+
|
|
389
|
+
for key, value in self.configuration:
|
|
390
|
+
if key == "benchmark": # Avoid saving the class object
|
|
391
|
+
dataset.attrs[key] = value.name
|
|
392
|
+
else:
|
|
393
|
+
dataset.attrs[key] = value
|
|
394
|
+
|
|
395
|
+
def checkerboard_groups_from_coupling(self, coupling_map: List[Tuple[int, int]]) -> Tuple[List[int], List[int]]:
|
|
396
|
+
"""
|
|
397
|
+
Assign Group A and B to qubits based on a checkerboard pattern
|
|
398
|
+
inferred from the connectivity graph (assumed to be grid-like).
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
coupling_map (list of tuple): List of 2-qubit connections (edges).
|
|
402
|
+
|
|
403
|
+
Returns:
|
|
404
|
+
group_a (list): Qubit indices in Group A.
|
|
405
|
+
group_b (list): Qubit indices in Group B.
|
|
406
|
+
"""
|
|
407
|
+
G = nx.Graph()
|
|
408
|
+
G.add_edges_from(coupling_map)
|
|
409
|
+
if not nx.is_bipartite(G):
|
|
410
|
+
raise ValueError("The coupling map is not bipartite (not grid-like).")
|
|
411
|
+
coloring = nx.bipartite.color(G)
|
|
412
|
+
group_a = [q for q, color in coloring.items() if color == 0]
|
|
413
|
+
group_b = [q for q, color in coloring.items() if color == 1]
|
|
414
|
+
|
|
415
|
+
return group_a, group_b
|
|
416
|
+
|
|
417
|
+
def execute(
|
|
418
|
+
self,
|
|
419
|
+
backend: IQMBackendBase,
|
|
420
|
+
# pylint: disable=too-many-branches
|
|
421
|
+
# pylint: disable=too-many-statements
|
|
422
|
+
) -> xr.Dataset:
|
|
423
|
+
"""Executes the benchmark."""
|
|
424
|
+
self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
425
|
+
|
|
426
|
+
dataset = xr.Dataset()
|
|
427
|
+
self.add_all_meta_to_dataset(dataset)
|
|
428
|
+
|
|
429
|
+
self.circuits = Circuits()
|
|
430
|
+
self.untranspiled_circuits = BenchmarkCircuit(name="untranspiled_circuits")
|
|
431
|
+
self.transpiled_circuits = BenchmarkCircuit(name="transpiled_circuits")
|
|
432
|
+
|
|
433
|
+
qubit_set = list(range(backend.num_qubits))
|
|
434
|
+
if self.coherence_exp not in ["t1", "t2_echo"]:
|
|
435
|
+
raise ValueError("coherence_exp must be either 't1' or 't2_echo'.")
|
|
436
|
+
|
|
437
|
+
qcvv_logger.debug(f"Executing on {self.coherence_exp}.")
|
|
438
|
+
qcvv_logger.setLevel(logging.WARNING)
|
|
439
|
+
|
|
440
|
+
if self.backend.has_resonators():
|
|
441
|
+
qc_coherence = self.generate_coherence_circuits(self.backend.num_qubits)
|
|
442
|
+
effective_coupling_map = self.backend.coupling_map.reduce(qubit_set)
|
|
443
|
+
transpilation_params = {
|
|
444
|
+
"backend": self.backend,
|
|
445
|
+
"qubits": qubit_set,
|
|
446
|
+
"coupling_map": effective_coupling_map,
|
|
447
|
+
"qiskit_optim_level": self.qiskit_optim_level,
|
|
448
|
+
"optimize_sqg": self.optimize_sqg,
|
|
449
|
+
"routing_method": self.routing_method,
|
|
450
|
+
}
|
|
451
|
+
transpiled_qc_list, _ = perform_backend_transpilation(qc_coherence, **transpilation_params)
|
|
452
|
+
sorted_transpiled_qc_list = {tuple(qubit_set): transpiled_qc_list}
|
|
453
|
+
# Execute on the backend
|
|
454
|
+
if self.configuration.use_dd is True:
|
|
455
|
+
raise ValueError("Coherence benchmarks should not be run with dynamical decoupling.")
|
|
456
|
+
|
|
457
|
+
qcvv_logger.debug(f"Executing on {self.coherence_exp}.")
|
|
458
|
+
qcvv_logger.setLevel(logging.WARNING)
|
|
459
|
+
|
|
460
|
+
jobs, _ = submit_execute(
|
|
461
|
+
sorted_transpiled_qc_list,
|
|
462
|
+
self.backend,
|
|
463
|
+
self.shots,
|
|
464
|
+
self.calset_id,
|
|
465
|
+
max_gates_per_batch=self.max_gates_per_batch,
|
|
466
|
+
max_circuits_per_batch=self.configuration.max_circuits_per_batch,
|
|
467
|
+
circuit_compilation_options=self.circuit_compilation_options,
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
qcvv_logger.setLevel(logging.INFO)
|
|
471
|
+
execution_results = retrieve_all_counts(jobs)[0]
|
|
472
|
+
identifier = BenchmarkObservationIdentifier(qubit_set)
|
|
473
|
+
dataset, _ = add_counts_to_dataset(execution_results, identifier.string_identifier, dataset)
|
|
474
|
+
dataset.attrs.update(
|
|
475
|
+
{
|
|
476
|
+
"qubit_set": qubit_set,
|
|
477
|
+
"delay_list": self.delays,
|
|
478
|
+
"experiment": self.coherence_exp,
|
|
479
|
+
"group": [qubit_set],
|
|
480
|
+
"qubits_to_plot": self.qubits_to_plot,
|
|
481
|
+
}
|
|
482
|
+
)
|
|
483
|
+
|
|
484
|
+
else:
|
|
485
|
+
# For crystal topology, we use the checkerboard pattern
|
|
486
|
+
group_a, group_b = self.checkerboard_groups_from_coupling(list(self.backend.coupling_map))
|
|
487
|
+
for group in [group_a, group_b]:
|
|
488
|
+
nqubits_group = len(group)
|
|
489
|
+
qc_coherence = self.generate_coherence_circuits(nqubits_group)
|
|
490
|
+
transpiled_qc_list = transpile(
|
|
491
|
+
qc_coherence, backend=self.backend, initial_layout=group, optimization_level=self.qiskit_optim_level
|
|
492
|
+
)
|
|
493
|
+
sorted_transpiled_qc_list = {tuple(group): transpiled_qc_list}
|
|
494
|
+
# Execute on the backend
|
|
495
|
+
if self.configuration.use_dd is True:
|
|
496
|
+
raise ValueError("Coherence benchmarks should not be run with dynamical decoupling.")
|
|
497
|
+
|
|
498
|
+
jobs, _ = submit_execute(
|
|
499
|
+
sorted_transpiled_qc_list,
|
|
500
|
+
self.backend,
|
|
501
|
+
self.shots,
|
|
502
|
+
self.calset_id,
|
|
503
|
+
max_gates_per_batch=self.max_gates_per_batch,
|
|
504
|
+
max_circuits_per_batch=self.configuration.max_circuits_per_batch,
|
|
505
|
+
circuit_compilation_options=self.circuit_compilation_options,
|
|
506
|
+
)
|
|
507
|
+
qcvv_logger.setLevel(logging.INFO)
|
|
508
|
+
execution_results = retrieve_all_counts(jobs)[0]
|
|
509
|
+
identifier = BenchmarkObservationIdentifier(group)
|
|
510
|
+
dataset, _ = add_counts_to_dataset(execution_results, identifier.string_identifier, dataset)
|
|
511
|
+
|
|
512
|
+
dataset.attrs.update(
|
|
513
|
+
{
|
|
514
|
+
"qubit_set": qubit_set,
|
|
515
|
+
"delay_list": self.delays,
|
|
516
|
+
"experiment": self.coherence_exp,
|
|
517
|
+
"group": [group_a, group_b],
|
|
518
|
+
"qubits_to_plot": self.qubits_to_plot,
|
|
519
|
+
}
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
qcvv_logger.debug(f"Adding counts for {self.coherence_exp} to the dataset")
|
|
523
|
+
self.untranspiled_circuits.circuit_groups.append(CircuitGroup(name=self.coherence_exp, circuits=qc_coherence))
|
|
524
|
+
self.transpiled_circuits.circuit_groups.append(
|
|
525
|
+
CircuitGroup(name=self.coherence_exp, circuits=transpiled_qc_list)
|
|
526
|
+
)
|
|
527
|
+
|
|
528
|
+
return dataset
|
|
529
|
+
|
|
530
|
+
|
|
531
|
+
class CoherenceConfiguration(BenchmarkConfigurationBase):
|
|
532
|
+
"""Coherence configuration.
|
|
533
|
+
|
|
534
|
+
Attributes:
|
|
535
|
+
benchmark (Type[Benchmark]): The benchmark class used for coherence analysis, defaulting to CoherenceBenchmark.
|
|
536
|
+
delays (list[float]): List of delay times used in the coherence experiments.
|
|
537
|
+
qiskit_optim_level (int): Qiskit transpilation optimization level, default is 3.
|
|
538
|
+
optimize_sqg (bool): Indicates whether Single Qubit Gate Optimization is applied during transpilation, default is True.
|
|
539
|
+
coherence_exp (str): Specifies the type of coherence experiment, either "t1" or "echo", default is "t1".
|
|
540
|
+
"""
|
|
541
|
+
|
|
542
|
+
benchmark: Type[Benchmark] = CoherenceBenchmark
|
|
543
|
+
delays: list[float]
|
|
544
|
+
optimize_sqg: bool = True
|
|
545
|
+
qiskit_optim_level: int = 3
|
|
546
|
+
coherence_exp: str = "t1"
|
|
547
|
+
shots: int = 1000
|
|
548
|
+
qubits_to_plot: list[int]
|
|
@@ -422,9 +422,6 @@ def qscore_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
|
422
422
|
beta_ratio_list = []
|
|
423
423
|
beta_ratio_std_list = []
|
|
424
424
|
for num_nodes in nodes_list:
|
|
425
|
-
# Retrieve counts for all the instances within each executed node size.
|
|
426
|
-
execution_results = xrvariable_to_counts(dataset, num_nodes, num_instances)
|
|
427
|
-
|
|
428
425
|
# Retrieve other dataset values
|
|
429
426
|
dataset_dictionary = dataset.attrs[num_nodes]
|
|
430
427
|
|
|
@@ -433,18 +430,21 @@ def qscore_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
|
433
430
|
qubit_to_node_list = dataset_dictionary["qubit_to_node"]
|
|
434
431
|
virtual_node_list = dataset_dictionary["virtual_nodes"]
|
|
435
432
|
no_edge_instances = dataset_dictionary["no_edge_instances"]
|
|
436
|
-
|
|
437
433
|
cut_sizes_list = [0.0] * len(no_edge_instances)
|
|
434
|
+
|
|
435
|
+
# Retrieve counts for all the instances within each executed node size.
|
|
438
436
|
instances_with_edges = set(range(num_instances)) - set(no_edge_instances)
|
|
437
|
+
num_instances_with_edges = len(instances_with_edges)
|
|
438
|
+
execution_results = xrvariable_to_counts(dataset, num_nodes, num_instances_with_edges)
|
|
439
439
|
|
|
440
|
-
for inst_idx in list(instances_with_edges):
|
|
440
|
+
for inst_idx, instance in enumerate(list(instances_with_edges)):
|
|
441
441
|
cut_sizes = run_QAOA(
|
|
442
442
|
execution_results[inst_idx],
|
|
443
|
-
graph_list[
|
|
444
|
-
qubit_to_node_list[
|
|
443
|
+
graph_list[instance],
|
|
444
|
+
qubit_to_node_list[instance],
|
|
445
445
|
use_classically_optimized_angles,
|
|
446
446
|
num_qaoa_layers,
|
|
447
|
-
virtual_node_list[
|
|
447
|
+
virtual_node_list[instance],
|
|
448
448
|
)
|
|
449
449
|
cut_sizes_list.append(cut_sizes)
|
|
450
450
|
|
|
@@ -770,7 +770,7 @@ class QScoreBenchmark(Benchmark):
|
|
|
770
770
|
for num_nodes in node_numbers:
|
|
771
771
|
qc_list = []
|
|
772
772
|
qc_transpiled_list: List[QuantumCircuit] = []
|
|
773
|
-
execution_results = []
|
|
773
|
+
execution_results: List[Dict[str, int]] = []
|
|
774
774
|
graph_list = []
|
|
775
775
|
qubit_set_list = []
|
|
776
776
|
theta_list = []
|
|
@@ -789,6 +789,7 @@ class QScoreBenchmark(Benchmark):
|
|
|
789
789
|
virtual_node_list = []
|
|
790
790
|
qubit_to_node_list = []
|
|
791
791
|
no_edge_instances = []
|
|
792
|
+
qc_all = [] # all circuits, including those with no edges
|
|
792
793
|
for instance in range(self.num_instances):
|
|
793
794
|
qcvv_logger.debug(f"Executing graph {instance} with {num_nodes} nodes.")
|
|
794
795
|
graph = nx.generators.erdos_renyi_graph(num_nodes, 0.5, seed=seed)
|
|
@@ -839,69 +840,64 @@ class QScoreBenchmark(Benchmark):
|
|
|
839
840
|
theta_list.append(theta)
|
|
840
841
|
|
|
841
842
|
qc = self.generate_maxcut_ansatz(graph, theta)
|
|
842
|
-
qc_list.append(qc)
|
|
843
|
-
qubit_to_node_copy = self.qubit_to_node.copy()
|
|
844
|
-
qubit_to_node_list.append(qubit_to_node_copy)
|
|
845
|
-
|
|
846
|
-
if len(qc.count_ops()) == 0:
|
|
847
|
-
counts = {"": 1.0} # to handle the case of physical graph with no edges
|
|
848
|
-
qc_transpiled_list.append([])
|
|
849
|
-
execution_results.append(counts)
|
|
850
|
-
qc_list.append([])
|
|
851
|
-
qcvv_logger.debug(f"This graph instance has no edges.")
|
|
852
|
-
else:
|
|
853
|
-
qcvv_logger.setLevel(logging.WARNING)
|
|
854
|
-
# Account for all-to-all connected backends like Sirius
|
|
855
|
-
if "move" in backend.architecture.gates:
|
|
856
|
-
# If the circuit is defined on a subset of qubit_set, choose the first qubtis in the set
|
|
857
|
-
active_qubit_set = qubit_set[: len(qc.qubits)]
|
|
858
|
-
# All-to-all coupling map on the active qubits
|
|
859
|
-
effective_coupling_map = [[x, y] for x in active_qubit_set for y in active_qubit_set if x != y]
|
|
860
|
-
else:
|
|
861
|
-
if self.choose_qubits_routine == "naive":
|
|
862
|
-
active_qubit_set = None
|
|
863
|
-
effective_coupling_map = self.backend.coupling_map
|
|
864
|
-
else:
|
|
865
|
-
active_qubit_set = qubit_set
|
|
866
|
-
effective_coupling_map = self.backend.coupling_map.reduce(active_qubit_set)
|
|
867
|
-
|
|
868
|
-
transpilation_params = {
|
|
869
|
-
"backend": self.backend,
|
|
870
|
-
"qubits": active_qubit_set,
|
|
871
|
-
"coupling_map": effective_coupling_map,
|
|
872
|
-
"qiskit_optim_level": self.qiskit_optim_level,
|
|
873
|
-
"optimize_sqg": self.optimize_sqg,
|
|
874
|
-
"routing_method": self.routing_method,
|
|
875
|
-
}
|
|
876
843
|
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
self.shots,
|
|
885
|
-
self.calset_id,
|
|
886
|
-
max_gates_per_batch=self.max_gates_per_batch,
|
|
887
|
-
max_circuits_per_batch=self.configuration.max_circuits_per_batch,
|
|
888
|
-
circuit_compilation_options=self.circuit_compilation_options,
|
|
889
|
-
)
|
|
890
|
-
qc_transpiled_list.append(transpiled_qc)
|
|
891
|
-
qcvv_logger.setLevel(logging.INFO)
|
|
892
|
-
|
|
893
|
-
if self.REM:
|
|
894
|
-
rem_counts = apply_readout_error_mitigation(
|
|
895
|
-
backend, transpiled_qc, [retrieve_all_counts(jobs)[0][0]], self.mit_shots
|
|
896
|
-
)
|
|
897
|
-
rem_distribution = rem_counts[0][0].nearest_probability_distribution()
|
|
898
|
-
execution_results.append(rem_distribution)
|
|
899
|
-
else:
|
|
900
|
-
execution_results.append(retrieve_all_counts(jobs)[0][0])
|
|
844
|
+
if len(qc.count_ops()) != 0:
|
|
845
|
+
qc_list.append(qc)
|
|
846
|
+
qc_all.append(qc)
|
|
847
|
+
qubit_to_node_copy = self.qubit_to_node.copy()
|
|
848
|
+
qubit_to_node_list.append(qubit_to_node_copy)
|
|
849
|
+
else:
|
|
850
|
+
qc_all.append([])
|
|
901
851
|
|
|
902
852
|
seed += 1
|
|
903
853
|
qcvv_logger.debug(f"Solved the MaxCut on graph {instance+1}/{self.num_instances}.")
|
|
904
854
|
|
|
855
|
+
qcvv_logger.setLevel(logging.WARNING)
|
|
856
|
+
if self.choose_qubits_routine == "naive":
|
|
857
|
+
active_qubit_set = None
|
|
858
|
+
effective_coupling_map = self.backend.coupling_map
|
|
859
|
+
else:
|
|
860
|
+
active_qubit_set = qubit_set
|
|
861
|
+
effective_coupling_map = self.backend.coupling_map.reduce(active_qubit_set)
|
|
862
|
+
|
|
863
|
+
transpilation_params = {
|
|
864
|
+
"backend": self.backend,
|
|
865
|
+
"qubits": active_qubit_set,
|
|
866
|
+
"coupling_map": effective_coupling_map,
|
|
867
|
+
"qiskit_optim_level": self.qiskit_optim_level,
|
|
868
|
+
"optimize_sqg": self.optimize_sqg,
|
|
869
|
+
"routing_method": self.routing_method,
|
|
870
|
+
}
|
|
871
|
+
|
|
872
|
+
transpiled_qc, _ = perform_backend_transpilation(qc_list, **transpilation_params)
|
|
873
|
+
|
|
874
|
+
sorted_transpiled_qc_list = {tuple(qubit_set): transpiled_qc}
|
|
875
|
+
# Execute on the backend
|
|
876
|
+
jobs, _ = submit_execute(
|
|
877
|
+
sorted_transpiled_qc_list,
|
|
878
|
+
self.backend,
|
|
879
|
+
self.shots,
|
|
880
|
+
self.calset_id,
|
|
881
|
+
max_gates_per_batch=self.max_gates_per_batch,
|
|
882
|
+
max_circuits_per_batch=self.configuration.max_circuits_per_batch,
|
|
883
|
+
circuit_compilation_options=self.circuit_compilation_options,
|
|
884
|
+
)
|
|
885
|
+
qc_transpiled_list.append(transpiled_qc)
|
|
886
|
+
qcvv_logger.setLevel(logging.INFO)
|
|
887
|
+
instance_with_edges = set(range(self.num_instances)) - set(no_edge_instances)
|
|
888
|
+
num_instances_with_edges = len(instance_with_edges)
|
|
889
|
+
if self.REM:
|
|
890
|
+
rem_counts = apply_readout_error_mitigation(
|
|
891
|
+
backend, transpiled_qc, retrieve_all_counts(jobs)[0], self.mit_shots
|
|
892
|
+
)
|
|
893
|
+
execution_results.extend(
|
|
894
|
+
rem_counts[0][instance].nearest_probability_distribution()
|
|
895
|
+
for instance in range(num_instances_with_edges)
|
|
896
|
+
)
|
|
897
|
+
# execution_results.append(rem_distribution)
|
|
898
|
+
else:
|
|
899
|
+
execution_results.extend(retrieve_all_counts(jobs)[0])
|
|
900
|
+
|
|
905
901
|
dataset.attrs.update(
|
|
906
902
|
{
|
|
907
903
|
num_nodes: {
|
|
@@ -918,10 +914,7 @@ class QScoreBenchmark(Benchmark):
|
|
|
918
914
|
|
|
919
915
|
qcvv_logger.debug(f"Adding counts for the random graph for {num_nodes} nodes to the dataset")
|
|
920
916
|
dataset, _ = add_counts_to_dataset(execution_results, str(num_nodes), dataset)
|
|
921
|
-
|
|
922
|
-
# self.untranspiled_circuits[str(num_nodes)].update({tuple(qubit_set): qc_list})
|
|
923
|
-
# self.transpiled_circuits[str(num_nodes)].update(sorted_transpiled_qc_list)
|
|
924
|
-
self.untranspiled_circuits.circuit_groups.append(CircuitGroup(name=str(num_nodes), circuits=qc_list))
|
|
917
|
+
self.untranspiled_circuits.circuit_groups.append(CircuitGroup(name=str(num_nodes), circuits=qc_all))
|
|
925
918
|
self.transpiled_circuits.circuit_groups.append(
|
|
926
919
|
CircuitGroup(name=str(num_nodes), circuits=qc_transpiled_list)
|
|
927
920
|
)
|
iqm/benchmarks/utils.py
CHANGED
|
@@ -567,35 +567,31 @@ def perform_backend_transpilation(
|
|
|
567
567
|
# Helper function considering whether optimize_sqg is done,
|
|
568
568
|
# and whether the coupling map is reduced (whether final physical layout must be fixed onto an auxiliary QC)
|
|
569
569
|
def transpile_and_optimize(qc, aux_qc=None):
|
|
570
|
-
transpiled = transpile(
|
|
571
|
-
qc,
|
|
572
|
-
basis_gates=basis_gates,
|
|
573
|
-
coupling_map=coupling_map,
|
|
574
|
-
optimization_level=qiskit_optim_level,
|
|
575
|
-
initial_layout=qubits if aux_qc is None else None,
|
|
576
|
-
routing_method=routing_method,
|
|
577
|
-
)
|
|
578
|
-
if optimize_sqg:
|
|
579
|
-
transpiled = optimize_single_qubit_gates(transpiled, drop_final_rz=drop_final_rz)
|
|
580
570
|
if backend.has_resonators():
|
|
571
|
+
coupling_map_red = (
|
|
572
|
+
backend.coupling_map.reduce(qubits[: qc.num_qubits]) if aux_qc is not None else coupling_map
|
|
573
|
+
)
|
|
581
574
|
transpiled = transpile_to_IQM(
|
|
582
|
-
qc,
|
|
575
|
+
qc,
|
|
576
|
+
backend=backend,
|
|
577
|
+
optimize_single_qubits=optimize_sqg,
|
|
578
|
+
remove_final_rzs=drop_final_rz,
|
|
579
|
+
coupling_map=coupling_map_red,
|
|
580
|
+
# initial_layout=qubits if aux_qc is None else None,
|
|
583
581
|
)
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
transpiled, qubits=qubits + [backend.num_qubits], clbits=list(range(qc.num_clbits))
|
|
595
|
-
)
|
|
596
|
-
else:
|
|
582
|
+
else:
|
|
583
|
+
transpiled = transpile(
|
|
584
|
+
qc,
|
|
585
|
+
basis_gates=basis_gates,
|
|
586
|
+
coupling_map=coupling_map,
|
|
587
|
+
optimization_level=qiskit_optim_level,
|
|
588
|
+
initial_layout=qubits if aux_qc is None else None,
|
|
589
|
+
routing_method=routing_method,
|
|
590
|
+
)
|
|
591
|
+
if aux_qc is not None:
|
|
597
592
|
transpiled = aux_qc.compose(transpiled, qubits=qubits, clbits=list(range(qc.num_clbits)))
|
|
598
|
-
|
|
593
|
+
if optimize_sqg:
|
|
594
|
+
transpiled = optimize_single_qubit_gates(transpiled, drop_final_rz=drop_final_rz)
|
|
599
595
|
return transpiled
|
|
600
596
|
|
|
601
597
|
qcvv_logger.info(
|
|
@@ -607,7 +603,7 @@ def perform_backend_transpilation(
|
|
|
607
603
|
transpiled_qc_list = [transpile_and_optimize(qc) for qc in qc_list]
|
|
608
604
|
else: # The coupling map will be reduced if the physical layout is to be fixed
|
|
609
605
|
if backend.has_resonators():
|
|
610
|
-
aux_qc_list = [QuantumCircuit(backend.num_qubits
|
|
606
|
+
aux_qc_list = [QuantumCircuit(backend.num_qubits, q.num_clbits) for q in qc_list]
|
|
611
607
|
else:
|
|
612
608
|
aux_qc_list = [QuantumCircuit(backend.num_qubits, q.num_clbits) for q in qc_list]
|
|
613
609
|
transpiled_qc_list = [transpile_and_optimize(qc, aux_qc=aux_qc_list[idx]) for idx, qc in enumerate(qc_list)]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: iqm-benchmarks
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.43
|
|
4
4
|
Summary: A package for implementation of Quantum Characterization, Verification and Validation (QCVV) techniques on IQM's hardware at gate level abstraction
|
|
5
5
|
Author-email: IQM Finland Oy <developers@meetiqm.com>, Adrian Auer <adrian.auer@meetiqm.com>, Raphael Brieger <raphael.brieger@meetiqm.com>, Alessio Calzona <alessio.calzona@meetiqm.com>, Pedro Figueroa Romero <pedro.romero@meetiqm.com>, Amin Hosseinkhani <amin.hosseinkhani@meetiqm.com>, Miikka Koistinen <miikka@meetiqm.com>, Nadia Milazzo <nadia.milazzo@meetiqm.com>, Vicente Pina Canelles <vicente.pina@meetiqm.com>, Aniket Rath <aniket.rath@meetiqm.com>, Jami Rönkkö <jami@meetiqm.com>, Stefan Seegerer <stefan.seegerer@meetiqm.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/iqm-finland/iqm-benchmarks
|
|
@@ -4,9 +4,11 @@ iqm/benchmarks/benchmark_definition.py,sha256=e4xe0wlWKZqj48_6-zTglMaMeoiA9aGkHr
|
|
|
4
4
|
iqm/benchmarks/circuit_containers.py,sha256=anEtZEsodYqOX-34oZRmuKGeEpp_VfgG5045Mz4-4hI,7562
|
|
5
5
|
iqm/benchmarks/logging_config.py,sha256=U7olP5Kr75AcLJqNODf9VBhJLVqIvA4AYR6J39D5rww,1052
|
|
6
6
|
iqm/benchmarks/readout_mitigation.py,sha256=Q2SOGWTNgmklOYkNxepAaSaXlxSj0QQyymYY1bOkT8A,11756
|
|
7
|
-
iqm/benchmarks/utils.py,sha256=
|
|
7
|
+
iqm/benchmarks/utils.py,sha256=sItoMsfUYiMWTSCNOTe_RWi2l1xTf2slvXkFiEMRwKU,41091
|
|
8
8
|
iqm/benchmarks/utils_plots.py,sha256=Q4h7gcKXf8Eizm13P0yL2I_P-QobHVFr9JCV83wrUi8,14942
|
|
9
9
|
iqm/benchmarks/utils_shadows.py,sha256=e77PV_uaAO5m_woox9lAzompKAvFeDJ-0AKJrNJ7NFg,9728
|
|
10
|
+
iqm/benchmarks/coherence/__init__.py,sha256=yeyhk-_Lp8IbJ-f5lQj0HP5Q1HSKK_FzuXHazotUrVY,704
|
|
11
|
+
iqm/benchmarks/coherence/coherence.py,sha256=zX_6A8vCS2zeWesMDXPFZBfrJ8wUG90JI9_tFsonwXk,21191
|
|
10
12
|
iqm/benchmarks/compressive_gst/__init__.py,sha256=LneifgYXtcwo2jcXo7GdUEHL6_peipukShhkrdaTRCA,929
|
|
11
13
|
iqm/benchmarks/compressive_gst/compressive_gst.py,sha256=2kiRttog4jR-vtMHu847GTFe5qL_i_uYr_4WMGqt9Ww,25653
|
|
12
14
|
iqm/benchmarks/compressive_gst/gst_analysis.py,sha256=g0kEovWbetoDRvX7JFrS9oOoNrqBxaFmprujJi7qQbU,36297
|
|
@@ -14,7 +16,7 @@ iqm/benchmarks/entanglement/__init__.py,sha256=sHVVToRWRCz0LSntk1rQaoSNNeyZLPoiT
|
|
|
14
16
|
iqm/benchmarks/entanglement/ghz.py,sha256=12bf9ANfgzyR7Vs8REO-Xm68gisqn8Q7_WSfaNnAmOk,41213
|
|
15
17
|
iqm/benchmarks/entanglement/graph_states.py,sha256=7GMxuhbOeQXc3hn3yAwp51S-F-1qaP0AYXm6JtuL9gA,62560
|
|
16
18
|
iqm/benchmarks/optimization/__init__.py,sha256=_ajW_OibYLCtzU5AUv5c2zuuVYn8ZNeZUcUUSIGt51M,747
|
|
17
|
-
iqm/benchmarks/optimization/qscore.py,sha256=
|
|
19
|
+
iqm/benchmarks/optimization/qscore.py,sha256=D2BVVNAqO32uGu5_kLVl2XJUOBlRl1C-c6zYenaCBMg,37259
|
|
18
20
|
iqm/benchmarks/quantum_volume/__init__.py,sha256=i-Q4SpDWELBw7frXnxm1j4wJRcxbIyrS5uEK_v06YHo,951
|
|
19
21
|
iqm/benchmarks/quantum_volume/clops.py,sha256=fLY0aPHjNbW33SuVM9brAgBYFncDHjY5Bwh6iXzbjiU,31099
|
|
20
22
|
iqm/benchmarks/quantum_volume/quantum_volume.py,sha256=pro7Lz-A5pPpT9UZ8wtXKTyhdWmTjQjRHt4BylDR-3U,36553
|
|
@@ -33,7 +35,7 @@ iqm/benchmarks/randomized_benchmarking/interleaved_rb/__init__.py,sha256=sq6MgN_
|
|
|
33
35
|
iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py,sha256=TaR1YFWBhOgm1hmEQzuwLYpp0yl0Xpuo3jAT6YhiXpc,28471
|
|
34
36
|
iqm/benchmarks/randomized_benchmarking/mirror_rb/__init__.py,sha256=jRKbivWCZ3xdO1k0sx-ygC3s5DUkGSModd975PoAtcg,692
|
|
35
37
|
iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py,sha256=n_5gt9636ZDMsM9hC3Zm5qP2bQr2sy41zxGhOh0XMjI,32932
|
|
36
|
-
iqm_benchmarks-2.
|
|
38
|
+
iqm_benchmarks-2.43.dist-info/licenses/LICENSE,sha256=2Ncb40-hqkTil78RPv3-YiJfKaJ8te9USJgliKqIdSY,11558
|
|
37
39
|
mGST/LICENSE,sha256=TtHNq55cUcbglb7uhVudeBLUh_qPdUoAEvU0BBwFz-k,1098
|
|
38
40
|
mGST/README.md,sha256=v_5kw253csHF4-RfE-44KqFmBXIsSMRmOtN0AUPrRxE,5050
|
|
39
41
|
mGST/additional_fns.py,sha256=_SEJ10FRNM7_CroysT8hCLZTfpm6ZhEIDCY5zPTnhjo,31390
|
|
@@ -44,7 +46,7 @@ mGST/optimization.py,sha256=YHwkzIkYvsZOPjclR-BCQWh24jeqjuXp0BB0WX5Lwow,10559
|
|
|
44
46
|
mGST/qiskit_interface.py,sha256=ajx6Zn5FnrX_T7tMP8xnBLyG4c2ddFRm0Fu2_3r1t30,10118
|
|
45
47
|
mGST/reporting/figure_gen.py,sha256=6Xd8vwfy09hLY1YbJY6TRevuMsQSU4MsWqemly3ZO0I,12970
|
|
46
48
|
mGST/reporting/reporting.py,sha256=B8NWfpZrrSmyH7lwZxd0EbZMYLsAGK1YsHRB4D5qXH4,26002
|
|
47
|
-
iqm_benchmarks-2.
|
|
48
|
-
iqm_benchmarks-2.
|
|
49
|
-
iqm_benchmarks-2.
|
|
50
|
-
iqm_benchmarks-2.
|
|
49
|
+
iqm_benchmarks-2.43.dist-info/METADATA,sha256=ezkJGO1628fD-X8lXDqtZUeEK9y7kIsfZjDw-JqZBzs,10872
|
|
50
|
+
iqm_benchmarks-2.43.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
51
|
+
iqm_benchmarks-2.43.dist-info/top_level.txt,sha256=3G23Z-1LGf-IOzTCUl6QwWqiQ3USz25Zt90Ihq192to,9
|
|
52
|
+
iqm_benchmarks-2.43.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|