iqm-benchmarks 2.42__py3-none-any.whl → 2.44__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iqm/benchmarks/coherence/__init__.py +18 -0
- iqm/benchmarks/coherence/coherence.py +548 -0
- iqm/benchmarks/entanglement/ghz.py +3 -3
- iqm/benchmarks/entanglement/graph_states.py +21 -21
- iqm/benchmarks/quantum_volume/clops.py +2 -3
- iqm/benchmarks/randomized_benchmarking/eplg/eplg.py +23 -24
- iqm/benchmarks/utils.py +72 -8
- iqm/benchmarks/utils_plots.py +309 -65
- {iqm_benchmarks-2.42.dist-info → iqm_benchmarks-2.44.dist-info}/METADATA +1 -1
- {iqm_benchmarks-2.42.dist-info → iqm_benchmarks-2.44.dist-info}/RECORD +13 -11
- {iqm_benchmarks-2.42.dist-info → iqm_benchmarks-2.44.dist-info}/WHEEL +0 -0
- {iqm_benchmarks-2.42.dist-info → iqm_benchmarks-2.44.dist-info}/licenses/LICENSE +0 -0
- {iqm_benchmarks-2.42.dist-info → iqm_benchmarks-2.44.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# Copyright 2024 IQM Benchmarks developers
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""
|
|
15
|
+
Estimation of the coherence properties of the qubits and computational resonator.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
from . import coherence
|
|
@@ -0,0 +1,548 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Coherence benchmark
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
from time import strftime
|
|
7
|
+
from typing import Dict, List, Tuple, Type
|
|
8
|
+
|
|
9
|
+
from matplotlib.figure import Figure
|
|
10
|
+
import matplotlib.pyplot as plt
|
|
11
|
+
import networkx as nx
|
|
12
|
+
import numpy as np
|
|
13
|
+
from qiskit import QuantumCircuit, transpile
|
|
14
|
+
from scipy.optimize import curve_fit
|
|
15
|
+
import xarray as xr
|
|
16
|
+
|
|
17
|
+
from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
|
|
18
|
+
from iqm.benchmarks.benchmark_definition import (
|
|
19
|
+
Benchmark,
|
|
20
|
+
BenchmarkAnalysisResult,
|
|
21
|
+
BenchmarkObservation,
|
|
22
|
+
BenchmarkObservationIdentifier,
|
|
23
|
+
BenchmarkRunResult,
|
|
24
|
+
add_counts_to_dataset,
|
|
25
|
+
)
|
|
26
|
+
from iqm.benchmarks.circuit_containers import BenchmarkCircuit, CircuitGroup, Circuits
|
|
27
|
+
from iqm.benchmarks.logging_config import qcvv_logger
|
|
28
|
+
from iqm.benchmarks.utils import ( # execute_with_dd,
|
|
29
|
+
perform_backend_transpilation,
|
|
30
|
+
retrieve_all_counts,
|
|
31
|
+
submit_execute,
|
|
32
|
+
xrvariable_to_counts,
|
|
33
|
+
)
|
|
34
|
+
from iqm.qiskit_iqm.iqm_backend import IQMBackendBase
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def exp_decay(t, A, T, C):
|
|
38
|
+
"""
|
|
39
|
+
Calculate the exponential decay at time t.
|
|
40
|
+
|
|
41
|
+
Parameters:
|
|
42
|
+
t (float or array-like): The time variable(s) at which to evaluate the decay.
|
|
43
|
+
A (float): The initial amplitude of the decay.
|
|
44
|
+
T (float): The time constant, which dictates the rate of decay.
|
|
45
|
+
C (float): The constant offset added to the decay.
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
float or array-like: The value(s) of the exponential decay at time t.
|
|
49
|
+
"""
|
|
50
|
+
return A * np.exp(-t / T) + C
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def plot_coherence(
|
|
54
|
+
amplitude: Dict[str, float],
|
|
55
|
+
backend_name: str,
|
|
56
|
+
delays: List[float],
|
|
57
|
+
offset: Dict[str, float],
|
|
58
|
+
qubit_set: List[int],
|
|
59
|
+
qubit_probs: dict[str, List[float]],
|
|
60
|
+
timestamp: str,
|
|
61
|
+
T_fit: Dict[str, float],
|
|
62
|
+
T_fit_err: Dict[str, float],
|
|
63
|
+
qubit_to_plot: List[int] | None = None,
|
|
64
|
+
coherence_exp: str = "t1",
|
|
65
|
+
) -> Tuple[str, Figure]:
|
|
66
|
+
"""
|
|
67
|
+
Plot coherence decay (T1 or T2_echo) for each qubit as subplots.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
amplitude_list: Fitted amplitudes (A) per qubit.
|
|
71
|
+
backend_name: Name of the backend used for the experiment.
|
|
72
|
+
delays: List of delay times used in the coherence experiments.
|
|
73
|
+
offset: Fitted offsets (C) for each qubit.
|
|
74
|
+
qubit_set: List of qubit indices involved in the experiment.
|
|
75
|
+
qubit_probs: Measured probabilities P(1) for each qubit at different delays.
|
|
76
|
+
timestamp: Timestamp for labeling the plot.
|
|
77
|
+
T_fit: Fitted coherence time (T) for each qubit.
|
|
78
|
+
T_fit_err: Fitted coherence time error for each qubit.
|
|
79
|
+
qubit_to_plot: Specific qubits to plot. If None, all qubits in `qubit_set` are plotted.
|
|
80
|
+
coherence_exp: Type of coherence experiment ('t1' or 't2_echo') for labeling and plotting logic.
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
Tuple[str, Figure]: Filename of the saved plot and the matplotlib figure object.
|
|
85
|
+
"""
|
|
86
|
+
if qubit_to_plot is not None:
|
|
87
|
+
num_qubits = len(qubit_to_plot)
|
|
88
|
+
else:
|
|
89
|
+
num_qubits = len(qubit_set)
|
|
90
|
+
|
|
91
|
+
ncols = 3
|
|
92
|
+
nrows = (num_qubits + ncols - 1) // ncols
|
|
93
|
+
|
|
94
|
+
fig, axs = plt.subplots(nrows, ncols, figsize=(4 * ncols, 3 * nrows), squeeze=False)
|
|
95
|
+
for idx, qubit in enumerate(qubit_to_plot or []):
|
|
96
|
+
row, col = divmod(idx, ncols)
|
|
97
|
+
ax = axs[row][col]
|
|
98
|
+
ydata = np.array(qubit_probs[str(qubit)])
|
|
99
|
+
A = amplitude[str(qubit)]
|
|
100
|
+
C = offset[str(qubit)]
|
|
101
|
+
T_val = T_fit[str(qubit)]
|
|
102
|
+
|
|
103
|
+
ax.plot(delays, ydata, "o", label="Measured P(1)", color="blue")
|
|
104
|
+
t_fit = np.linspace(min(delays), max(delays), 200)
|
|
105
|
+
fitted_curve = exp_decay(t_fit, A, T_val, C)
|
|
106
|
+
ax.plot(
|
|
107
|
+
t_fit,
|
|
108
|
+
fitted_curve,
|
|
109
|
+
"--",
|
|
110
|
+
color="orange",
|
|
111
|
+
label=f"Fit (T = {T_val * 1e6:.1f} ± {T_fit_err[str(qubit)] * 1e6:.1f} µs)",
|
|
112
|
+
)
|
|
113
|
+
tick_list = np.linspace(min(delays), max(delays), 5)
|
|
114
|
+
ax.set_xticks(tick_list)
|
|
115
|
+
ax.set_xticklabels([f"{d * 1e6:.0f}" for d in tick_list])
|
|
116
|
+
ax.set_title(f"Qubit {qubit}")
|
|
117
|
+
ax.set_xlabel("Delay (µs)")
|
|
118
|
+
ax.set_ylabel("|1> Populatiuon")
|
|
119
|
+
ax.grid(True)
|
|
120
|
+
ax.legend()
|
|
121
|
+
|
|
122
|
+
for j in range(idx + 1, nrows * ncols): # pylint: disable=undefined-loop-variable
|
|
123
|
+
row, col = divmod(j, ncols)
|
|
124
|
+
fig.delaxes(axs[row][col])
|
|
125
|
+
|
|
126
|
+
fig.suptitle(f"{coherence_exp.upper()}_decay_{backend_name}_{timestamp}", fontsize=14)
|
|
127
|
+
fig.tight_layout(rect=(0, 0.03, 1, 0.95))
|
|
128
|
+
|
|
129
|
+
fig_name = f"{coherence_exp}_{backend_name}_{timestamp}.png"
|
|
130
|
+
plt.close()
|
|
131
|
+
|
|
132
|
+
return fig_name, fig
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def calculate_probabilities(counts: dict[str, int], nqubits: int, coherence_exp: str) -> Tuple[List[float], int]:
|
|
136
|
+
"""
|
|
137
|
+
Calculate the number of times '0' was measured for each qubit based on the provided counts.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
counts: A dictionary where keys are bitstrings representing measurement outcomes and values are the counts of those outcomes.
|
|
141
|
+
nqubits: The number of qubits being measured.
|
|
142
|
+
coherence_exp: A string indicating the coherence experiment type ('t1' or other).
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
tuple: A tuple containing:
|
|
146
|
+
- A list of occurrences of measuring '0' for each qubit.
|
|
147
|
+
- The total number of shots (measurements).
|
|
148
|
+
"""
|
|
149
|
+
p0_per_qubit = [0.0 for _ in range(nqubits)]
|
|
150
|
+
total_shots = sum(counts.values())
|
|
151
|
+
for bitstring, count in counts.items():
|
|
152
|
+
for q in range(nqubits):
|
|
153
|
+
if coherence_exp == "t1":
|
|
154
|
+
if bitstring[::-1][q] == "1":
|
|
155
|
+
p0_per_qubit[q] += count
|
|
156
|
+
else:
|
|
157
|
+
if bitstring[::-1][q] == "0":
|
|
158
|
+
p0_per_qubit[q] += count
|
|
159
|
+
return p0_per_qubit, total_shots
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def fit_coherence_model(
|
|
163
|
+
qubit: int, probs: np.ndarray, delays: np.ndarray, coherence_exp: str
|
|
164
|
+
) -> Tuple[List[BenchmarkObservation], float, float, float, float]:
|
|
165
|
+
"""Fit the coherence model and return observations.
|
|
166
|
+
|
|
167
|
+
This function fits a coherence model to the provided probability data
|
|
168
|
+
and returns the fitted parameters along with their uncertainties.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
qubit : The index of the qubit being analyzed.
|
|
172
|
+
probs: An array of probability values corresponding to the qubit's coherence.
|
|
173
|
+
delays An array of delay times at which the probabilities were measured.
|
|
174
|
+
coherence_exp: A string indicating the type of coherence experiment ('t1' or 't2_echo').
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
A tuple containing:
|
|
178
|
+
- A list of BenchmarkObservation objects for the fitted parameters.
|
|
179
|
+
- The fitted decay time (T_fit).
|
|
180
|
+
- The uncertainty in the fitted decay time (T_fit_err).
|
|
181
|
+
- The fitted amplitude (A).
|
|
182
|
+
- The fitted offset (C).
|
|
183
|
+
"""
|
|
184
|
+
observations_per_qubit = []
|
|
185
|
+
ydata = probs
|
|
186
|
+
|
|
187
|
+
# Estimate initial parameters
|
|
188
|
+
A_guess = np.max([ydata[0] - ydata[-1], 0])
|
|
189
|
+
C_guess = ydata[-1]
|
|
190
|
+
T_guess = delays[len(delays) // 2]
|
|
191
|
+
p0 = [A_guess, T_guess, C_guess]
|
|
192
|
+
|
|
193
|
+
# Set parameter bounds:
|
|
194
|
+
# - A must be positive (decay amplitude)
|
|
195
|
+
# - T must be positive (no negative decay time)
|
|
196
|
+
# - C between 0 and 1 (physical population values)
|
|
197
|
+
bounds = ([0, 1e-6, 0], [1.2, 10.0, 1])
|
|
198
|
+
|
|
199
|
+
try:
|
|
200
|
+
popt, pcov = curve_fit( # pylint: disable=unbalanced-tuple-unpacking
|
|
201
|
+
exp_decay, delays, ydata, p0=p0, bounds=bounds, maxfev=10000
|
|
202
|
+
)
|
|
203
|
+
A, T_fit, C = popt
|
|
204
|
+
perr = np.sqrt(np.diag(pcov))
|
|
205
|
+
T_fit_err = perr[1]
|
|
206
|
+
|
|
207
|
+
except RuntimeError:
|
|
208
|
+
A, T_fit, C, T_fit_err = np.nan, np.nan, np.nan, np.nan
|
|
209
|
+
|
|
210
|
+
observations_per_qubit.extend(
|
|
211
|
+
[
|
|
212
|
+
BenchmarkObservation(
|
|
213
|
+
name="T1" if coherence_exp == "t1" else "T2_echo",
|
|
214
|
+
value=T_fit,
|
|
215
|
+
identifier=BenchmarkObservationIdentifier(qubit),
|
|
216
|
+
uncertainty=T_fit_err,
|
|
217
|
+
),
|
|
218
|
+
]
|
|
219
|
+
)
|
|
220
|
+
return observations_per_qubit, T_fit, T_fit_err, A, C
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def coherence_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
224
|
+
"""Analysis function for a coherence experiment
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
run (RunResult): A coherence experiment run for which analysis result is created.
|
|
228
|
+
Returns:
|
|
229
|
+
AnalysisResult corresponding to coherence experiment.
|
|
230
|
+
"""
|
|
231
|
+
|
|
232
|
+
plots = {}
|
|
233
|
+
observations: list[BenchmarkObservation] = []
|
|
234
|
+
dataset = run.dataset.copy(deep=True)
|
|
235
|
+
|
|
236
|
+
backend_name = dataset.attrs["backend_name"]
|
|
237
|
+
timestamp = dataset.attrs["execution_timestamp"]
|
|
238
|
+
delays = dataset.attrs["delay_list"]
|
|
239
|
+
coherence_exp = dataset.attrs["experiment"]
|
|
240
|
+
qubit_set = dataset.attrs["qubit_set"]
|
|
241
|
+
tot_circs = len(delays)
|
|
242
|
+
groups = dataset.attrs["group"]
|
|
243
|
+
all_counts_group: List[Dict[str, int]] = []
|
|
244
|
+
qubit_probs: Dict[str, List[float]] = {}
|
|
245
|
+
|
|
246
|
+
qubits_to_plot = dataset.attrs["qubits_to_plot"]
|
|
247
|
+
for group in groups:
|
|
248
|
+
identifier = BenchmarkObservationIdentifier(group)
|
|
249
|
+
all_counts_group = xrvariable_to_counts(dataset, identifier.string_identifier, tot_circs)
|
|
250
|
+
nqubits = len(group)
|
|
251
|
+
qubit_probs.update({str(q): [] for q in group})
|
|
252
|
+
|
|
253
|
+
for counts in all_counts_group:
|
|
254
|
+
p0_per_qubit, total_shots = calculate_probabilities(counts, nqubits, coherence_exp)
|
|
255
|
+
for q_idx, qubit in enumerate(group):
|
|
256
|
+
qubit_probs[str(qubit)].append(p0_per_qubit[q_idx] / total_shots)
|
|
257
|
+
|
|
258
|
+
qubit_set = [item for sublist in groups for item in sublist]
|
|
259
|
+
amplitude = {str(qubit): 0.0 for qubit in qubit_set}
|
|
260
|
+
offset = {str(qubit): 0.0 for qubit in qubit_set}
|
|
261
|
+
T_fit = {str(qubit): 0.0 for qubit in qubit_set}
|
|
262
|
+
T_fit_err = {str(qubit): 0.0 for qubit in qubit_set}
|
|
263
|
+
|
|
264
|
+
for qubit in qubit_set:
|
|
265
|
+
qubit_str = str(qubit)
|
|
266
|
+
probs = np.array(qubit_probs[qubit_str])
|
|
267
|
+
results = fit_coherence_model(qubit, probs, delays, coherence_exp)
|
|
268
|
+
observations.extend(results[0])
|
|
269
|
+
T_fit[qubit_str] = results[1]
|
|
270
|
+
T_fit_err[qubit_str] = results[2]
|
|
271
|
+
amplitude[qubit_str] = results[3]
|
|
272
|
+
offset[qubit_str] = results[4]
|
|
273
|
+
|
|
274
|
+
fig_name, fig = plot_coherence(
|
|
275
|
+
amplitude,
|
|
276
|
+
backend_name,
|
|
277
|
+
delays,
|
|
278
|
+
offset,
|
|
279
|
+
qubit_set,
|
|
280
|
+
qubit_probs,
|
|
281
|
+
timestamp,
|
|
282
|
+
T_fit,
|
|
283
|
+
T_fit_err,
|
|
284
|
+
qubits_to_plot,
|
|
285
|
+
coherence_exp,
|
|
286
|
+
)
|
|
287
|
+
plots[fig_name] = fig
|
|
288
|
+
|
|
289
|
+
return BenchmarkAnalysisResult(dataset=dataset, plots=plots, observations=observations)
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
class CoherenceBenchmark(Benchmark):
|
|
293
|
+
"""
|
|
294
|
+
This benchmark estimates the coherence properties of the qubits and computational resonator.
|
|
295
|
+
"""
|
|
296
|
+
|
|
297
|
+
analysis_function = staticmethod(coherence_analysis)
|
|
298
|
+
|
|
299
|
+
name: str = "coherence"
|
|
300
|
+
|
|
301
|
+
def __init__(self, backend_arg: IQMBackendBase, configuration: "CoherenceConfiguration"):
|
|
302
|
+
"""Construct the CoherenceBenchmark class.
|
|
303
|
+
|
|
304
|
+
Args:
|
|
305
|
+
backend_arg (IQMBackendBase): the backend to execute the benchmark on
|
|
306
|
+
configuration (CoherenceConfiguration): the configuration of the benchmark
|
|
307
|
+
"""
|
|
308
|
+
super().__init__(backend_arg, configuration)
|
|
309
|
+
|
|
310
|
+
self.backend_configuration_name = backend_arg if isinstance(backend_arg, str) else backend_arg.name
|
|
311
|
+
self.delays = configuration.delays
|
|
312
|
+
self.shots = configuration.shots
|
|
313
|
+
self.optimize_sqg = configuration.optimize_sqg
|
|
314
|
+
self.coherence_exp = configuration.coherence_exp
|
|
315
|
+
self.qiskit_optim_level = configuration.qiskit_optim_level
|
|
316
|
+
self.qubits_to_plot = configuration.qubits_to_plot
|
|
317
|
+
|
|
318
|
+
self.session_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
319
|
+
self.execution_timestamp = ""
|
|
320
|
+
|
|
321
|
+
# Initialize the variable to contain all coherence circuits
|
|
322
|
+
self.circuits = Circuits()
|
|
323
|
+
self.untranspiled_circuits = BenchmarkCircuit(name="untranspiled_circuits")
|
|
324
|
+
self.transpiled_circuits = BenchmarkCircuit(name="transpiled_circuits")
|
|
325
|
+
|
|
326
|
+
def generate_coherence_circuits(
|
|
327
|
+
self,
|
|
328
|
+
nqubits: int,
|
|
329
|
+
) -> list[QuantumCircuit]:
|
|
330
|
+
"""Generates coherence circuits for the given qubit set and delay times.
|
|
331
|
+
|
|
332
|
+
Args:
|
|
333
|
+
nqubits (int): Number of qubits to apply the coherence circuits on.
|
|
334
|
+
|
|
335
|
+
Returns:
|
|
336
|
+
list[QuantumCircuit]: List of generated coherence circuits.
|
|
337
|
+
"""
|
|
338
|
+
circuits = []
|
|
339
|
+
for delay in self.delays:
|
|
340
|
+
qc = QuantumCircuit(nqubits)
|
|
341
|
+
if self.coherence_exp == "t1":
|
|
342
|
+
self._generate_t1_circuits(qc, nqubits, delay)
|
|
343
|
+
elif self.coherence_exp == "t2_echo":
|
|
344
|
+
self._generate_t2_echo_circuits(qc, nqubits, delay)
|
|
345
|
+
qc.measure_all()
|
|
346
|
+
circuits.append(qc)
|
|
347
|
+
return circuits
|
|
348
|
+
|
|
349
|
+
def _generate_t1_circuits(self, qc: QuantumCircuit, nqubits: int, delay: float):
|
|
350
|
+
"""Generates T1 coherence circuits.
|
|
351
|
+
|
|
352
|
+
Args:
|
|
353
|
+
qc (QuantumCircuit): The quantum circuit to modify.
|
|
354
|
+
nqubits (int): Number of qubits.
|
|
355
|
+
delay (float): Delay time for the circuit.
|
|
356
|
+
"""
|
|
357
|
+
|
|
358
|
+
for qubit in range(nqubits):
|
|
359
|
+
qc.x(qubit)
|
|
360
|
+
qc.delay(int(delay * 1e9), qubit, unit="ns")
|
|
361
|
+
|
|
362
|
+
def _generate_t2_echo_circuits(self, qc: QuantumCircuit, nqubits: int, delay: float):
|
|
363
|
+
"""Generates T2 echo coherence circuits.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
qc (QuantumCircuit): The quantum circuit to modify.
|
|
367
|
+
nqubits (int): Number of qubits.
|
|
368
|
+
delay (float): Delay time for the circuit.
|
|
369
|
+
"""
|
|
370
|
+
half_delay = delay / 2
|
|
371
|
+
for qubit in range(nqubits):
|
|
372
|
+
qc.h(qubit)
|
|
373
|
+
qc.delay(int(half_delay * 1e9), qubit, unit="ns")
|
|
374
|
+
qc.x(qubit)
|
|
375
|
+
qc.delay(int(half_delay * 1e9), qubit, unit="ns")
|
|
376
|
+
qc.h(qubit)
|
|
377
|
+
|
|
378
|
+
def add_all_meta_to_dataset(self, dataset: xr.Dataset):
|
|
379
|
+
"""Adds all configuration metadata and circuits to the dataset variable
|
|
380
|
+
|
|
381
|
+
Args:
|
|
382
|
+
dataset (xr.Dataset): The xarray dataset
|
|
383
|
+
"""
|
|
384
|
+
dataset.attrs["session_timestamp"] = self.session_timestamp
|
|
385
|
+
dataset.attrs["execution_timestamp"] = self.execution_timestamp
|
|
386
|
+
dataset.attrs["backend_configuration_name"] = self.backend_configuration_name
|
|
387
|
+
dataset.attrs["backend_name"] = self.backend.name
|
|
388
|
+
|
|
389
|
+
for key, value in self.configuration:
|
|
390
|
+
if key == "benchmark": # Avoid saving the class object
|
|
391
|
+
dataset.attrs[key] = value.name
|
|
392
|
+
else:
|
|
393
|
+
dataset.attrs[key] = value
|
|
394
|
+
|
|
395
|
+
def checkerboard_groups_from_coupling(self, coupling_map: List[Tuple[int, int]]) -> Tuple[List[int], List[int]]:
|
|
396
|
+
"""
|
|
397
|
+
Assign Group A and B to qubits based on a checkerboard pattern
|
|
398
|
+
inferred from the connectivity graph (assumed to be grid-like).
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
coupling_map (list of tuple): List of 2-qubit connections (edges).
|
|
402
|
+
|
|
403
|
+
Returns:
|
|
404
|
+
group_a (list): Qubit indices in Group A.
|
|
405
|
+
group_b (list): Qubit indices in Group B.
|
|
406
|
+
"""
|
|
407
|
+
G = nx.Graph()
|
|
408
|
+
G.add_edges_from(coupling_map)
|
|
409
|
+
if not nx.is_bipartite(G):
|
|
410
|
+
raise ValueError("The coupling map is not bipartite (not grid-like).")
|
|
411
|
+
coloring = nx.bipartite.color(G)
|
|
412
|
+
group_a = [q for q, color in coloring.items() if color == 0]
|
|
413
|
+
group_b = [q for q, color in coloring.items() if color == 1]
|
|
414
|
+
|
|
415
|
+
return group_a, group_b
|
|
416
|
+
|
|
417
|
+
def execute(
|
|
418
|
+
self,
|
|
419
|
+
backend: IQMBackendBase,
|
|
420
|
+
# pylint: disable=too-many-branches
|
|
421
|
+
# pylint: disable=too-many-statements
|
|
422
|
+
) -> xr.Dataset:
|
|
423
|
+
"""Executes the benchmark."""
|
|
424
|
+
self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
425
|
+
|
|
426
|
+
dataset = xr.Dataset()
|
|
427
|
+
self.add_all_meta_to_dataset(dataset)
|
|
428
|
+
|
|
429
|
+
self.circuits = Circuits()
|
|
430
|
+
self.untranspiled_circuits = BenchmarkCircuit(name="untranspiled_circuits")
|
|
431
|
+
self.transpiled_circuits = BenchmarkCircuit(name="transpiled_circuits")
|
|
432
|
+
|
|
433
|
+
qubit_set = list(range(backend.num_qubits))
|
|
434
|
+
if self.coherence_exp not in ["t1", "t2_echo"]:
|
|
435
|
+
raise ValueError("coherence_exp must be either 't1' or 't2_echo'.")
|
|
436
|
+
|
|
437
|
+
qcvv_logger.debug(f"Executing on {self.coherence_exp}.")
|
|
438
|
+
qcvv_logger.setLevel(logging.WARNING)
|
|
439
|
+
|
|
440
|
+
if self.backend.has_resonators():
|
|
441
|
+
qc_coherence = self.generate_coherence_circuits(self.backend.num_qubits)
|
|
442
|
+
effective_coupling_map = self.backend.coupling_map.reduce(qubit_set)
|
|
443
|
+
transpilation_params = {
|
|
444
|
+
"backend": self.backend,
|
|
445
|
+
"qubits": qubit_set,
|
|
446
|
+
"coupling_map": effective_coupling_map,
|
|
447
|
+
"qiskit_optim_level": self.qiskit_optim_level,
|
|
448
|
+
"optimize_sqg": self.optimize_sqg,
|
|
449
|
+
"routing_method": self.routing_method,
|
|
450
|
+
}
|
|
451
|
+
transpiled_qc_list, _ = perform_backend_transpilation(qc_coherence, **transpilation_params)
|
|
452
|
+
sorted_transpiled_qc_list = {tuple(qubit_set): transpiled_qc_list}
|
|
453
|
+
# Execute on the backend
|
|
454
|
+
if self.configuration.use_dd is True:
|
|
455
|
+
raise ValueError("Coherence benchmarks should not be run with dynamical decoupling.")
|
|
456
|
+
|
|
457
|
+
qcvv_logger.debug(f"Executing on {self.coherence_exp}.")
|
|
458
|
+
qcvv_logger.setLevel(logging.WARNING)
|
|
459
|
+
|
|
460
|
+
jobs, _ = submit_execute(
|
|
461
|
+
sorted_transpiled_qc_list,
|
|
462
|
+
self.backend,
|
|
463
|
+
self.shots,
|
|
464
|
+
self.calset_id,
|
|
465
|
+
max_gates_per_batch=self.max_gates_per_batch,
|
|
466
|
+
max_circuits_per_batch=self.configuration.max_circuits_per_batch,
|
|
467
|
+
circuit_compilation_options=self.circuit_compilation_options,
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
qcvv_logger.setLevel(logging.INFO)
|
|
471
|
+
execution_results = retrieve_all_counts(jobs)[0]
|
|
472
|
+
identifier = BenchmarkObservationIdentifier(qubit_set)
|
|
473
|
+
dataset, _ = add_counts_to_dataset(execution_results, identifier.string_identifier, dataset)
|
|
474
|
+
dataset.attrs.update(
|
|
475
|
+
{
|
|
476
|
+
"qubit_set": qubit_set,
|
|
477
|
+
"delay_list": self.delays,
|
|
478
|
+
"experiment": self.coherence_exp,
|
|
479
|
+
"group": [qubit_set],
|
|
480
|
+
"qubits_to_plot": self.qubits_to_plot,
|
|
481
|
+
}
|
|
482
|
+
)
|
|
483
|
+
|
|
484
|
+
else:
|
|
485
|
+
# For crystal topology, we use the checkerboard pattern
|
|
486
|
+
group_a, group_b = self.checkerboard_groups_from_coupling(list(self.backend.coupling_map))
|
|
487
|
+
for group in [group_a, group_b]:
|
|
488
|
+
nqubits_group = len(group)
|
|
489
|
+
qc_coherence = self.generate_coherence_circuits(nqubits_group)
|
|
490
|
+
transpiled_qc_list = transpile(
|
|
491
|
+
qc_coherence, backend=self.backend, initial_layout=group, optimization_level=self.qiskit_optim_level
|
|
492
|
+
)
|
|
493
|
+
sorted_transpiled_qc_list = {tuple(group): transpiled_qc_list}
|
|
494
|
+
# Execute on the backend
|
|
495
|
+
if self.configuration.use_dd is True:
|
|
496
|
+
raise ValueError("Coherence benchmarks should not be run with dynamical decoupling.")
|
|
497
|
+
|
|
498
|
+
jobs, _ = submit_execute(
|
|
499
|
+
sorted_transpiled_qc_list,
|
|
500
|
+
self.backend,
|
|
501
|
+
self.shots,
|
|
502
|
+
self.calset_id,
|
|
503
|
+
max_gates_per_batch=self.max_gates_per_batch,
|
|
504
|
+
max_circuits_per_batch=self.configuration.max_circuits_per_batch,
|
|
505
|
+
circuit_compilation_options=self.circuit_compilation_options,
|
|
506
|
+
)
|
|
507
|
+
qcvv_logger.setLevel(logging.INFO)
|
|
508
|
+
execution_results = retrieve_all_counts(jobs)[0]
|
|
509
|
+
identifier = BenchmarkObservationIdentifier(group)
|
|
510
|
+
dataset, _ = add_counts_to_dataset(execution_results, identifier.string_identifier, dataset)
|
|
511
|
+
|
|
512
|
+
dataset.attrs.update(
|
|
513
|
+
{
|
|
514
|
+
"qubit_set": qubit_set,
|
|
515
|
+
"delay_list": self.delays,
|
|
516
|
+
"experiment": self.coherence_exp,
|
|
517
|
+
"group": [group_a, group_b],
|
|
518
|
+
"qubits_to_plot": self.qubits_to_plot,
|
|
519
|
+
}
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
qcvv_logger.debug(f"Adding counts for {self.coherence_exp} to the dataset")
|
|
523
|
+
self.untranspiled_circuits.circuit_groups.append(CircuitGroup(name=self.coherence_exp, circuits=qc_coherence))
|
|
524
|
+
self.transpiled_circuits.circuit_groups.append(
|
|
525
|
+
CircuitGroup(name=self.coherence_exp, circuits=transpiled_qc_list)
|
|
526
|
+
)
|
|
527
|
+
|
|
528
|
+
return dataset
|
|
529
|
+
|
|
530
|
+
|
|
531
|
+
class CoherenceConfiguration(BenchmarkConfigurationBase):
|
|
532
|
+
"""Coherence configuration.
|
|
533
|
+
|
|
534
|
+
Attributes:
|
|
535
|
+
benchmark (Type[Benchmark]): The benchmark class used for coherence analysis, defaulting to CoherenceBenchmark.
|
|
536
|
+
delays (list[float]): List of delay times used in the coherence experiments.
|
|
537
|
+
qiskit_optim_level (int): Qiskit transpilation optimization level, default is 3.
|
|
538
|
+
optimize_sqg (bool): Indicates whether Single Qubit Gate Optimization is applied during transpilation, default is True.
|
|
539
|
+
coherence_exp (str): Specifies the type of coherence experiment, either "t1" or "echo", default is "t1".
|
|
540
|
+
"""
|
|
541
|
+
|
|
542
|
+
benchmark: Type[Benchmark] = CoherenceBenchmark
|
|
543
|
+
delays: list[float]
|
|
544
|
+
optimize_sqg: bool = True
|
|
545
|
+
qiskit_optim_level: int = 3
|
|
546
|
+
coherence_exp: str = "t1"
|
|
547
|
+
shots: int = 1000
|
|
548
|
+
qubits_to_plot: list[int]
|
|
@@ -145,7 +145,6 @@ def fidelity_ghz_coherences(dataset: xr.Dataset, qubit_layout: List[int], circui
|
|
|
145
145
|
phases = [np.pi * i / (num_qubits + 1) for i in range(2 * num_qubits + 2)]
|
|
146
146
|
idx = BenchmarkObservationIdentifier(qubit_layout).string_identifier
|
|
147
147
|
transpiled_circuits = circuits["transpiled_circuits"]
|
|
148
|
-
num_shots = dataset.attrs["shots"]
|
|
149
148
|
num_circuits = len(transpiled_circuits[f"{qubit_layout}_native_ghz"].circuits)
|
|
150
149
|
|
|
151
150
|
# Computing the phase acquired by the |11...1> component for each interval
|
|
@@ -155,8 +154,9 @@ def fidelity_ghz_coherences(dataset: xr.Dataset, qubit_layout: List[int], circui
|
|
|
155
154
|
counts = xrvariable_to_counts(dataset, f"{idx}", num_circuits)
|
|
156
155
|
all_zero_probability_list = [] # An ordered list for storing the probabilities of returning to the |00..0> state
|
|
157
156
|
for count in counts[1:]:
|
|
157
|
+
normalization = np.sum(list(count.values()))
|
|
158
158
|
if "0" * num_qubits in count.keys():
|
|
159
|
-
probability = count["0" * num_qubits] /
|
|
159
|
+
probability = count["0" * num_qubits] / normalization
|
|
160
160
|
else:
|
|
161
161
|
probability = 0
|
|
162
162
|
all_zero_probability_list.append(probability)
|
|
@@ -165,7 +165,7 @@ def fidelity_ghz_coherences(dataset: xr.Dataset, qubit_layout: List[int], circui
|
|
|
165
165
|
i_n = np.abs(np.dot(complex_coefficients, np.array(all_zero_probability_list))) / (len(phases))
|
|
166
166
|
|
|
167
167
|
# Extracting the probabilities of the 00...0 and 11...1 bit strings
|
|
168
|
-
probs_direct = {label: count /
|
|
168
|
+
probs_direct = {label: count / np.sum(list(counts[0].values())) for label, count in counts[0].items()}
|
|
169
169
|
|
|
170
170
|
# Computing GHZ state fidelity from i_n and the probabilities according to the method in [Mooney, 2021]
|
|
171
171
|
p0 = probs_direct["0" * num_qubits] if "0" * num_qubits in probs_direct.keys() else 0
|
|
@@ -439,34 +439,29 @@ def plot_max_negativities_graph(
|
|
|
439
439
|
fig = plt.figure()
|
|
440
440
|
ax = plt.axes()
|
|
441
441
|
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
else:
|
|
446
|
-
graph_backend = backend_coupling_map.graph.to_undirected(multigraph=False)
|
|
447
|
-
qubit_positions = GraphPositions.create_positions(graph_backend)
|
|
448
|
-
else:
|
|
449
|
-
graph_backend = backend_coupling_map.graph.to_undirected(multigraph=False)
|
|
450
|
-
if num_qubits in (20, 7):
|
|
451
|
-
station = "garnet" if num_qubits == 20 else "deneb"
|
|
452
|
-
qubit_positions = GraphPositions.predefined_stations[station]
|
|
453
|
-
else:
|
|
454
|
-
qubit_positions = GraphPositions.create_positions(graph_backend)
|
|
442
|
+
qubit_positions = GraphPositions.get_positions(
|
|
443
|
+
station=station, graph=backend_coupling_map.graph.to_undirected(multigraph=False), num_qubits=num_qubits
|
|
444
|
+
)
|
|
455
445
|
|
|
456
446
|
# Normalize negativity values to the range [0, 1] for color mapping
|
|
457
447
|
norm = plt.Normalize(vmin=cast(float, min(negativity_values)), vmax=cast(float, max(negativity_values)))
|
|
458
|
-
edge_colors = [
|
|
448
|
+
edge_colors = [
|
|
449
|
+
cmap(norm(negativity_edges[edge])) if edge in qubit_pairs else "lightgray" for edge in backend_coupling_map
|
|
450
|
+
] #
|
|
451
|
+
nodes = list(set(v for edge in backend_coupling_map for v in edge))
|
|
452
|
+
active_nodes = list(set(v for edge in qubit_pairs for v in edge))
|
|
453
|
+
node_colors = ["lightgray" if v not in active_nodes else "k" for v in nodes]
|
|
459
454
|
|
|
460
455
|
nx.draw_networkx(
|
|
461
456
|
rx_to_nx_graph(backend_coupling_map),
|
|
462
457
|
pos=qubit_positions,
|
|
463
|
-
nodelist=
|
|
464
|
-
|
|
458
|
+
nodelist=nodes,
|
|
459
|
+
edgelist=list(backend_coupling_map),
|
|
460
|
+
labels={x: qubit_names[x] for x in nodes},
|
|
465
461
|
font_size=6.5,
|
|
466
|
-
edgelist=qubit_pairs,
|
|
467
462
|
width=4.0,
|
|
468
463
|
edge_color=edge_colors,
|
|
469
|
-
node_color=
|
|
464
|
+
node_color=node_colors,
|
|
470
465
|
font_color="w",
|
|
471
466
|
ax=ax,
|
|
472
467
|
)
|
|
@@ -490,6 +485,8 @@ def plot_max_negativities_graph(
|
|
|
490
485
|
f"{shots_string}; Bootstraps: {num_bootstraps}"
|
|
491
486
|
f"\n{timestamp}"
|
|
492
487
|
)
|
|
488
|
+
# Invert y-axis to match the intended qubit positions
|
|
489
|
+
plt.gca().invert_yaxis()
|
|
493
490
|
plt.close()
|
|
494
491
|
|
|
495
492
|
return fig_name, fig
|
|
@@ -955,7 +952,7 @@ def negativity_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
|
955
952
|
dataset = run.dataset.copy(deep=True)
|
|
956
953
|
qcvv_logger.info("Dataset imported OK")
|
|
957
954
|
backend_name = dataset.attrs["backend_name"]
|
|
958
|
-
|
|
955
|
+
coupling_map_full = dataset.attrs["coupling_map_full"]
|
|
959
956
|
qubit_names = dataset.attrs["qubit_names"]
|
|
960
957
|
execution_timestamp = dataset.attrs["execution_timestamp"]
|
|
961
958
|
tomography = dataset.attrs["tomography"]
|
|
@@ -1004,7 +1001,7 @@ def negativity_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
|
1004
1001
|
|
|
1005
1002
|
fig_name, fig = plot_max_negativities_graph(
|
|
1006
1003
|
negativities=max_negativities,
|
|
1007
|
-
backend_coupling_map=
|
|
1004
|
+
backend_coupling_map=coupling_map_full,
|
|
1008
1005
|
qubit_names=qubit_names,
|
|
1009
1006
|
timestamp=execution_timestamp,
|
|
1010
1007
|
tomography=tomography,
|
|
@@ -1066,8 +1063,11 @@ class GraphStateBenchmark(Benchmark):
|
|
|
1066
1063
|
dataset.attrs["execution_timestamp"] = self.execution_timestamp
|
|
1067
1064
|
dataset.attrs["backend_configuration_name"] = self.backend_configuration_name
|
|
1068
1065
|
dataset.attrs["backend_name"] = self.backend.name
|
|
1069
|
-
dataset.attrs["qubit_names"] = {
|
|
1066
|
+
dataset.attrs["qubit_names"] = {
|
|
1067
|
+
qubit: self.backend.index_to_qubit_name(qubit) for qubit in np.arange(self.backend.num_qubits)
|
|
1068
|
+
}
|
|
1070
1069
|
dataset.attrs["coupling_map"] = self.coupling_map
|
|
1070
|
+
dataset.attrs["coupling_map_full"] = self.backend.coupling_map
|
|
1071
1071
|
|
|
1072
1072
|
for key, value in self.configuration:
|
|
1073
1073
|
if key == "benchmark": # Avoid saving the class object
|