iqm-benchmarks 2.27__py3-none-any.whl → 2.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of iqm-benchmarks might be problematic. Click here for more details.
- iqm/benchmarks/__init__.py +2 -0
- iqm/benchmarks/entanglement/__init__.py +2 -1
- iqm/benchmarks/entanglement/graph_states.py +1350 -0
- iqm/benchmarks/quantum_volume/quantum_volume.py +3 -6
- iqm/benchmarks/randomized_benchmarking/randomized_benchmarking_common.py +33 -8
- iqm/benchmarks/utils.py +276 -194
- iqm/benchmarks/utils_plots.py +232 -0
- iqm/benchmarks/utils_shadows.py +228 -0
- {iqm_benchmarks-2.27.dist-info → iqm_benchmarks-2.29.dist-info}/METADATA +2 -1
- {iqm_benchmarks-2.27.dist-info → iqm_benchmarks-2.29.dist-info}/RECORD +13 -10
- {iqm_benchmarks-2.27.dist-info → iqm_benchmarks-2.29.dist-info}/WHEEL +1 -1
- {iqm_benchmarks-2.27.dist-info → iqm_benchmarks-2.29.dist-info}/licenses/LICENSE +0 -0
- {iqm_benchmarks-2.27.dist-info → iqm_benchmarks-2.29.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1350 @@
|
|
|
1
|
+
# Copyright 2025 IQM Benchmarks developers
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
# pylint: disable=too-many-lines
|
|
16
|
+
|
|
17
|
+
"""
|
|
18
|
+
Graph states benchmark
|
|
19
|
+
"""
|
|
20
|
+
from collections import defaultdict
|
|
21
|
+
import itertools
|
|
22
|
+
from time import strftime
|
|
23
|
+
from typing import Any, Dict, List, Literal, Optional, Sequence, Set, Tuple, Type, cast
|
|
24
|
+
|
|
25
|
+
from matplotlib.figure import Figure
|
|
26
|
+
import matplotlib.pyplot as plt
|
|
27
|
+
import networkx as nx
|
|
28
|
+
import numpy as np
|
|
29
|
+
from qiskit import QuantumCircuit, transpile
|
|
30
|
+
from qiskit.transpiler import CouplingMap
|
|
31
|
+
import xarray as xr
|
|
32
|
+
|
|
33
|
+
from iqm.benchmarks import Benchmark, BenchmarkCircuit, BenchmarkRunResult, CircuitGroup, Circuits
|
|
34
|
+
from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
|
|
35
|
+
from iqm.benchmarks.benchmark_definition import (
|
|
36
|
+
BenchmarkAnalysisResult,
|
|
37
|
+
BenchmarkObservation,
|
|
38
|
+
BenchmarkObservationIdentifier,
|
|
39
|
+
add_counts_to_dataset,
|
|
40
|
+
)
|
|
41
|
+
from iqm.benchmarks.logging_config import qcvv_logger
|
|
42
|
+
from iqm.benchmarks.randomized_benchmarking.randomized_benchmarking_common import import_native_gate_cliffords
|
|
43
|
+
from iqm.benchmarks.utils import ( # marginal_distribution, perform_backend_transpilation,
|
|
44
|
+
bootstrap_counts,
|
|
45
|
+
generate_state_tomography_circuits,
|
|
46
|
+
get_neighbors_of_edges,
|
|
47
|
+
get_Pauli_expectation,
|
|
48
|
+
get_tomography_matrix,
|
|
49
|
+
median_with_uncertainty,
|
|
50
|
+
remove_directed_duplicates_to_list,
|
|
51
|
+
retrieve_all_counts,
|
|
52
|
+
retrieve_all_job_metadata,
|
|
53
|
+
set_coupling_map,
|
|
54
|
+
split_sequence_in_chunks,
|
|
55
|
+
submit_execute,
|
|
56
|
+
timeit,
|
|
57
|
+
xrvariable_to_counts,
|
|
58
|
+
)
|
|
59
|
+
from iqm.benchmarks.utils_plots import GraphPositions, rx_to_nx_graph
|
|
60
|
+
from iqm.benchmarks.utils_shadows import get_local_shadow, get_negativity, local_shadow_tomography
|
|
61
|
+
from iqm.qiskit_iqm.iqm_backend import IQMBackendBase
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def find_edges_with_disjoint_neighbors(
|
|
65
|
+
graph: Sequence[Sequence[int]],
|
|
66
|
+
) -> List[List[Sequence[int]]]:
|
|
67
|
+
"""Finds sets of edges with non-overlapping neighboring nodes.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
graph (Sequence[Sequence[int]]): The input graph specified as a sequence of edges (Sequence[int]).
|
|
71
|
+
Returns:
|
|
72
|
+
List[List[Tuple[int]]]: A list of lists of edges (Tuple[int]) from the original graph with non-overlapping neighboring nodes.
|
|
73
|
+
"""
|
|
74
|
+
# Build adjacency list representation of the graph
|
|
75
|
+
adjacency = defaultdict(set)
|
|
76
|
+
for u, v in graph:
|
|
77
|
+
adjacency[u].add(v)
|
|
78
|
+
adjacency[v].add(u)
|
|
79
|
+
|
|
80
|
+
# Function to get neighboring nodes of an edge
|
|
81
|
+
def get_edge_neighbors(edge):
|
|
82
|
+
u, v = edge
|
|
83
|
+
return (adjacency[u] | adjacency[v]) - {u, v}
|
|
84
|
+
|
|
85
|
+
remaining_edges = set(graph) # Keep track of remaining edges
|
|
86
|
+
iterations = [] # Store the edges chosen in each iteration
|
|
87
|
+
|
|
88
|
+
while remaining_edges:
|
|
89
|
+
current_iteration = set() # Edges chosen in this iteration
|
|
90
|
+
used_nodes = set() # Nodes already used in this iteration
|
|
91
|
+
|
|
92
|
+
for edge in list(remaining_edges):
|
|
93
|
+
u, v = edge
|
|
94
|
+
# Check if the edge is disconnected from already chosen edges
|
|
95
|
+
if u in used_nodes or v in used_nodes:
|
|
96
|
+
continue
|
|
97
|
+
|
|
98
|
+
# Get neighboring nodes of this edge
|
|
99
|
+
edge_neighbors = get_edge_neighbors(edge)
|
|
100
|
+
|
|
101
|
+
# Check if any neighbor belongs to an edge already in this iteration
|
|
102
|
+
if any(neighbor in used_nodes for neighbor in edge_neighbors):
|
|
103
|
+
continue
|
|
104
|
+
|
|
105
|
+
# Add the edge to the current iteration
|
|
106
|
+
current_iteration.add(edge)
|
|
107
|
+
used_nodes.update([u, v])
|
|
108
|
+
|
|
109
|
+
# Add the chosen edges to the result
|
|
110
|
+
iterations.append(list(current_iteration))
|
|
111
|
+
remaining_edges -= current_iteration # Remove chosen edges from the remaining edges
|
|
112
|
+
|
|
113
|
+
return iterations
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def generate_minimal_edge_layers(cp_map: CouplingMap) -> Dict[int, List[List[int]]]:
|
|
117
|
+
"""Sorts the edges of a coupling map, arranging them in a dictionary with values being subsets of the coupling map with no overlapping nodes.
|
|
118
|
+
Each item will correspond to a layer of pairs of qubits in which parallel 2Q gates can be applied.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
cp_map (CouplingMap): A list of lists of pairs of integers, representing a coupling map.
|
|
122
|
+
Returns:
|
|
123
|
+
Dict[int, List[List[int]]]: A dictionary with values being subsets of the coupling map with no overlapping nodes.
|
|
124
|
+
"""
|
|
125
|
+
# Build a conflict graph - Treat the input list as a graph
|
|
126
|
+
# where each sublist is a node, and an edge exists between nodes if they share any integers
|
|
127
|
+
undirect_cp_map_list = remove_directed_duplicates_to_list(cp_map)
|
|
128
|
+
|
|
129
|
+
n = len(undirect_cp_map_list)
|
|
130
|
+
graph: Dict[int, Set] = {i: set() for i in range(n)}
|
|
131
|
+
|
|
132
|
+
for i in range(n):
|
|
133
|
+
for j in range(i + 1, n):
|
|
134
|
+
if set(undirect_cp_map_list[i]) & set(undirect_cp_map_list[j]): # Check for shared integers
|
|
135
|
+
graph[i].add(j)
|
|
136
|
+
graph[j].add(i)
|
|
137
|
+
|
|
138
|
+
# Reduce to a graph coloring problem;
|
|
139
|
+
# each color represents a group in the dictionary
|
|
140
|
+
colors: Dict[int, int] = {}
|
|
141
|
+
for node in range(n):
|
|
142
|
+
# Find all used colors among neighbors
|
|
143
|
+
neighbor_colors = {colors[neighbor] for neighbor in graph[node] if neighbor in colors}
|
|
144
|
+
# Assign the smallest unused color
|
|
145
|
+
color = 0
|
|
146
|
+
while color in neighbor_colors:
|
|
147
|
+
color += 1
|
|
148
|
+
colors[node] = color
|
|
149
|
+
|
|
150
|
+
# Group by colors - minimize the number of groups
|
|
151
|
+
groups: Dict[int, List[List[int]]] = {}
|
|
152
|
+
for idx, color in colors.items():
|
|
153
|
+
if color not in groups:
|
|
154
|
+
groups[color] = []
|
|
155
|
+
groups[color].append(undirect_cp_map_list[idx])
|
|
156
|
+
|
|
157
|
+
return groups
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def generate_graph_state(qubits: Sequence[int], backend: IQMBackendBase | str) -> QuantumCircuit:
|
|
161
|
+
"""Generates a circuit with minimal depth preparing a native graph state for a given backend using given qubits.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
qubits (Sequence[int]): A list of integers representing the qubits.
|
|
165
|
+
backend (IQMBackendBase): The backend to target the graph state generating circuit.
|
|
166
|
+
Returns:
|
|
167
|
+
QuantumCircuit: The circuit generating a graph state in the target backend.
|
|
168
|
+
"""
|
|
169
|
+
num_qubits = len(qubits)
|
|
170
|
+
qc = QuantumCircuit(num_qubits)
|
|
171
|
+
coupling_map = set_coupling_map(qubits, backend, physical_layout="fixed")
|
|
172
|
+
layers = generate_minimal_edge_layers(coupling_map)
|
|
173
|
+
# Add all H
|
|
174
|
+
for q in range(num_qubits):
|
|
175
|
+
qc.r(np.pi / 2, np.pi / 2, q)
|
|
176
|
+
# Add all CZ
|
|
177
|
+
for layer in layers.values():
|
|
178
|
+
for edge in layer:
|
|
179
|
+
qc.cz(edge[0], edge[1])
|
|
180
|
+
# Transpile
|
|
181
|
+
qc_t = transpile(qc, backend=backend, initial_layout=qubits, optimization_level=3)
|
|
182
|
+
return qc_t
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def plot_density_matrix(
|
|
186
|
+
matrix: np.ndarray,
|
|
187
|
+
qubit_pair: Sequence[int],
|
|
188
|
+
projection: str,
|
|
189
|
+
negativity: Dict[str, float],
|
|
190
|
+
backend_name: str,
|
|
191
|
+
timestamp: str,
|
|
192
|
+
tomography: Literal["state_tomography", "shadow_tomography"],
|
|
193
|
+
num_RM_samples: Optional[int] = None,
|
|
194
|
+
num_MoMs_samples: Optional[int] = None,
|
|
195
|
+
) -> Tuple[str, Figure]:
|
|
196
|
+
"""Plots a density matrix for corresponding qubit pairs, neighbor qubit projections, and negativities.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
matrix (np.ndarray): The matrix to plot.
|
|
200
|
+
qubit_pair (Sequence[int]): The corresponding qubit pair.
|
|
201
|
+
projection (str): The projection corresponding to the matrix to plot.
|
|
202
|
+
negativity (Dict[str, float]): A dictionary with keys "value" and "uncertainty" and values being respective negativities.
|
|
203
|
+
backend_name (str): The name of the backend for the corresponding experiment.
|
|
204
|
+
timestamp (str): The timestamp for the corresponding experiment.
|
|
205
|
+
tomography (Literal["state_tomography", "shadow_tomography"]): The type of tomography used to gather the data of the matrix to plot.
|
|
206
|
+
num_RM_samples (Optional[int] = None): The number of randomized measurement samples if tomography is shadow_tomography.
|
|
207
|
+
* Default is None if tomography is state_tomography.
|
|
208
|
+
num_MoMs_samples (Optional[int] = None): The number of Median of Means used per randomized measurement if tomography is shadow_tomography.
|
|
209
|
+
* Default is None if tomography is state_tomography.
|
|
210
|
+
Returns:
|
|
211
|
+
Tuple[str, Figure]: The figure label and the density matrix plot figure.
|
|
212
|
+
"""
|
|
213
|
+
|
|
214
|
+
fig, ax = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(6, 6))
|
|
215
|
+
cmap = "winter_r"
|
|
216
|
+
fig_name = str(qubit_pair)
|
|
217
|
+
|
|
218
|
+
ax[0].matshow(matrix.real, interpolation="nearest", vmin=-np.max(matrix.real), vmax=np.max(matrix.real), cmap=cmap)
|
|
219
|
+
ax[0].set_title(r"$\mathrm{Re}(\hat{\rho})$")
|
|
220
|
+
for (i, j), z in np.ndenumerate(matrix.real):
|
|
221
|
+
ax[0].text(
|
|
222
|
+
j,
|
|
223
|
+
i,
|
|
224
|
+
f"{z:0.2f}",
|
|
225
|
+
ha="center",
|
|
226
|
+
va="center",
|
|
227
|
+
bbox={"boxstyle": "round", "facecolor": "white", "edgecolor": "0.3"},
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
im1 = ax[1].matshow(
|
|
231
|
+
matrix.imag, interpolation="nearest", vmin=-np.max(matrix.real), vmax=np.max(matrix.real), cmap=cmap
|
|
232
|
+
)
|
|
233
|
+
ax[1].set_title(r"$\mathrm{Im}(\hat{\rho})$")
|
|
234
|
+
for (i, j), z in np.ndenumerate(matrix.imag):
|
|
235
|
+
ax[1].text(
|
|
236
|
+
j,
|
|
237
|
+
i,
|
|
238
|
+
f"{z:0.2f}",
|
|
239
|
+
ha="center",
|
|
240
|
+
va="center",
|
|
241
|
+
bbox={"boxstyle": "round", "facecolor": "white", "edgecolor": "0.3"},
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
if tomography == "shadow_tomography":
|
|
245
|
+
fig.suptitle(
|
|
246
|
+
f"Average shadow for qubits {qubit_pair} ({num_RM_samples} local RM samples x {num_MoMs_samples} Median of Means samples)\n"
|
|
247
|
+
f"Projection: {projection}\nNegativity: {negativity['value']:.4f} +/- {negativity['uncertainty']:.4f}\n"
|
|
248
|
+
f"{backend_name} --- {timestamp}"
|
|
249
|
+
)
|
|
250
|
+
else:
|
|
251
|
+
fig.suptitle(
|
|
252
|
+
f"Tomographically reconstructed density matrix for qubits {qubit_pair}\n"
|
|
253
|
+
f"Projection: {projection}\nNegativity: {negativity['value']:.4f} +/- {negativity['uncertainty']:.4f}\n"
|
|
254
|
+
f"{backend_name} --- {timestamp}"
|
|
255
|
+
)
|
|
256
|
+
fig.colorbar(im1, shrink=0.5)
|
|
257
|
+
fig.tight_layout(rect=(0, 0.03, 1, 1.25))
|
|
258
|
+
|
|
259
|
+
plt.close()
|
|
260
|
+
|
|
261
|
+
return fig_name, fig
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def plot_max_negativities(
|
|
265
|
+
negativities: Dict[str, Dict[str, str | float]],
|
|
266
|
+
backend_name: str,
|
|
267
|
+
qubit_names: Dict[int, str],
|
|
268
|
+
timestamp: str,
|
|
269
|
+
tomography: Literal["shadow_tomography", "state_tomography"],
|
|
270
|
+
num_shots: int,
|
|
271
|
+
num_bootstraps: Optional[int] = None,
|
|
272
|
+
num_RM_samples: Optional[int] = None,
|
|
273
|
+
num_MoMs_samples: Optional[int] = None,
|
|
274
|
+
) -> Tuple[str, Figure]:
|
|
275
|
+
"""Plots the maximum negativity for each corresponding pair of qubits.
|
|
276
|
+
|
|
277
|
+
Args:
|
|
278
|
+
negativities (Dict[str, Dict[str, str | float]]): A dictionary (str qubit keys) of dictionaries (keys "value"/"uncertainty") of negativities (float) to plot.
|
|
279
|
+
backend_name (str): The name of the backend for the corresponding experiment.
|
|
280
|
+
qubit_names (Dict[int, str]): A dictionary of qubit names corresponding to qubit indices.
|
|
281
|
+
timestamp (str): The timestamp of the corresponding experiment.
|
|
282
|
+
tomography (Literal["shadow_tomography", "state_tomography"]): The type of tomography that was used.
|
|
283
|
+
num_shots (int): The number of shots used in the corresponding experiment.
|
|
284
|
+
num_bootstraps (Optional[int]): The number of bootstraps used if tomography corresponds to state tomography.
|
|
285
|
+
* Defaults to None if the tomography type is "shadow_tomography".
|
|
286
|
+
num_RM_samples (Optional[int]): The number of randomized measurement samples used if tomography corresponds to shadow tomography.
|
|
287
|
+
* Defaults to None if the tomography type is "state_tomography".
|
|
288
|
+
num_MoMs_samples (Optional[int]): The number of Median of Means samples per randomized measurement used if tomography corresponds to shadow tomography.
|
|
289
|
+
* Defaults to None if the tomography type is "shadow_tomography".
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
Tuple[str, Figure]: The figure label and the max negativities plot figure.
|
|
293
|
+
"""
|
|
294
|
+
fig_name = f"max_negativities_{backend_name}_{timestamp}".replace(" ", "_")
|
|
295
|
+
# Sort the negativities by value
|
|
296
|
+
sorted_negativities = dict(sorted(negativities.items(), key=lambda item: item[1]["value"]))
|
|
297
|
+
|
|
298
|
+
x = [x.replace("(", "").replace(")", "").replace(", ", "-") for x in list(sorted_negativities.keys())]
|
|
299
|
+
x_updated = [
|
|
300
|
+
f"{cast(str, qubit_names[int(a)])[2:]}-{cast(str, qubit_names[int(b)])[2:]}"
|
|
301
|
+
for edge in x
|
|
302
|
+
for a, b in [edge.split("-")]
|
|
303
|
+
] ## reindexes the edges label as in the QPU graph.
|
|
304
|
+
|
|
305
|
+
y = [a["value"] for a in sorted_negativities.values()]
|
|
306
|
+
yerr = [a["uncertainty"] for a in sorted_negativities.values()]
|
|
307
|
+
|
|
308
|
+
cmap = plt.get_cmap("winter")
|
|
309
|
+
|
|
310
|
+
fig = plt.figure()
|
|
311
|
+
ax = plt.axes()
|
|
312
|
+
|
|
313
|
+
if tomography == "shadow_tomography":
|
|
314
|
+
errorbar_labels = rf"$1 \mathrm{{SEM}}$ (N={cast(int, num_RM_samples)*cast(int,num_MoMs_samples)} RMs)"
|
|
315
|
+
else:
|
|
316
|
+
errorbar_labels = rf"$1 \sigma$ ({cast(int, num_bootstraps)} bootstraps)"
|
|
317
|
+
|
|
318
|
+
plt.errorbar(
|
|
319
|
+
x_updated,
|
|
320
|
+
y,
|
|
321
|
+
yerr=yerr,
|
|
322
|
+
capsize=2,
|
|
323
|
+
color=cmap(0.15),
|
|
324
|
+
fmt="o",
|
|
325
|
+
alpha=1,
|
|
326
|
+
mec="black",
|
|
327
|
+
markersize=3,
|
|
328
|
+
label=errorbar_labels,
|
|
329
|
+
)
|
|
330
|
+
plt.axhline(0.5, color=cmap(1.0), linestyle="dashed")
|
|
331
|
+
|
|
332
|
+
ax.set_xlabel("Qubit pair")
|
|
333
|
+
ax.set_ylabel("Negativity")
|
|
334
|
+
|
|
335
|
+
# Major y-ticks every 0.1, minor ticks every 0.05
|
|
336
|
+
major_ticks = np.arange(0, 0.5, 0.1)
|
|
337
|
+
minor_ticks = np.arange(-0.05, 0.55, 0.05)
|
|
338
|
+
ax.set_yticks(major_ticks)
|
|
339
|
+
ax.set_yticks(minor_ticks, minor=True)
|
|
340
|
+
ax.grid(which="both")
|
|
341
|
+
|
|
342
|
+
lower_y = np.min(y) - 1.75 * float(yerr[0]) - 0.02 if np.min(y) - float(yerr[0]) < 0 else -0.01
|
|
343
|
+
upper_y = np.max(y) + 1.75 * float(yerr[-1]) + 0.02 if np.max(y) + float(yerr[-1]) > 0.5 else 0.51
|
|
344
|
+
ax.set_ylim(
|
|
345
|
+
(
|
|
346
|
+
lower_y,
|
|
347
|
+
upper_y,
|
|
348
|
+
)
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
plt.xticks(rotation=90)
|
|
352
|
+
if tomography == "shadow_tomography":
|
|
353
|
+
plt.title(
|
|
354
|
+
f"Max entanglement negativities for qubit pairs in {backend_name}\n{num_RM_samples} local RM samples x {num_MoMs_samples} Median of Means samples\n{timestamp}"
|
|
355
|
+
)
|
|
356
|
+
else:
|
|
357
|
+
plt.title(
|
|
358
|
+
f"Max entanglement negativities for qubit pairs in {backend_name}\nShots per tomography sample: {num_shots}; Bootstraps: {num_bootstraps}\n{timestamp}"
|
|
359
|
+
)
|
|
360
|
+
plt.legend(fontsize=8)
|
|
361
|
+
|
|
362
|
+
ax.margins(tight=True)
|
|
363
|
+
|
|
364
|
+
if len(x) <= 40:
|
|
365
|
+
ax.set_aspect((2 / 3) * len(x))
|
|
366
|
+
ax.autoscale(enable=True, axis="x")
|
|
367
|
+
else:
|
|
368
|
+
####################################################################################
|
|
369
|
+
# Solution to fix tick spacings taken from:
|
|
370
|
+
# https://stackoverflow.com/questions/44863375/how-to-change-spacing-between-ticks
|
|
371
|
+
plt.gca().margins(x=0.01)
|
|
372
|
+
plt.gcf().canvas.draw()
|
|
373
|
+
tl = plt.gca().get_xticklabels()
|
|
374
|
+
maxsize = max(t.get_window_extent().width for t in tl)
|
|
375
|
+
m = 0.2 # inch margin
|
|
376
|
+
s = maxsize / plt.gcf().dpi * len(x) + 2 * m
|
|
377
|
+
margin = m / plt.gcf().get_size_inches()[0]
|
|
378
|
+
plt.gcf().subplots_adjust(left=margin, right=1.0 - margin)
|
|
379
|
+
plt.gcf().set_size_inches(s, plt.gcf().get_size_inches()[1])
|
|
380
|
+
#####################################################################################`
|
|
381
|
+
|
|
382
|
+
plt.close()
|
|
383
|
+
|
|
384
|
+
return fig_name, fig
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
def plot_max_negativities_graph(
|
|
388
|
+
negativities: Dict[str, Dict[str, str | float]],
|
|
389
|
+
backend_coupling_map: CouplingMap,
|
|
390
|
+
qubit_names: Dict[int, str],
|
|
391
|
+
timestamp: str,
|
|
392
|
+
tomography: Literal["shadow_tomography", "state_tomography"],
|
|
393
|
+
station: Optional[str] = None,
|
|
394
|
+
num_shots: Optional[int] = None,
|
|
395
|
+
num_bootstraps: Optional[int] = None,
|
|
396
|
+
num_RM_samples: Optional[int] = None,
|
|
397
|
+
num_MoMs_samples: Optional[int] = None,
|
|
398
|
+
) -> Tuple[str, Figure]:
|
|
399
|
+
"""Plots the maximum negativity for each corresponding pair of qubits in a graph layout of the given backend.
|
|
400
|
+
|
|
401
|
+
Args:
|
|
402
|
+
negativities (Dict[str, Dict[str, str | float]]): A dictionary (str qubit keys) of dictionaries (keys "value"/"uncertainty") of negativities (float) to plot.
|
|
403
|
+
backend_coupling_map (CouplingMap): The CouplingMap instance.
|
|
404
|
+
qubit_names (Dict[int, str]): A dictionary of qubit names corresponding to qubit indices.
|
|
405
|
+
timestamp (str): The timestamp of the corresponding experiment.
|
|
406
|
+
tomography (Literal["shadow_tomography", "state_tomography"]): The type of tomography that was used.
|
|
407
|
+
station (str): The name of the station to use for the graph layout.
|
|
408
|
+
num_shots (Optional[int]): The number of shots used in the corresponding experiment.
|
|
409
|
+
* Defaults to None: won't be displayed in title.
|
|
410
|
+
num_bootstraps (Optional[int]): The number of bootstraps used if tomography corresponds to state tomography.
|
|
411
|
+
* Defaults to None if the tomography type is "shadow_tomography".
|
|
412
|
+
num_RM_samples (Optional[int]): The number of randomized measurement samples used if tomography corresponds to shadow tomography.
|
|
413
|
+
* Defaults to None if the tomography type is "state_tomography".
|
|
414
|
+
num_MoMs_samples (Optional[int]): The number of Median of Means samples per randomized measurement used if tomography corresponds to shadow tomography.
|
|
415
|
+
* Defaults to None if the tomography type is "shadow_tomography".
|
|
416
|
+
|
|
417
|
+
Returns:
|
|
418
|
+
Tuple[str, Figure]: The figure label and the max negativities plot figure.
|
|
419
|
+
"""
|
|
420
|
+
num_qubits = len(qubit_names.keys())
|
|
421
|
+
fig_name = (
|
|
422
|
+
f"max_negativities_graph_{station}_{timestamp}"
|
|
423
|
+
if station is not None
|
|
424
|
+
else f"max_negativities_graph_{timestamp}"
|
|
425
|
+
)
|
|
426
|
+
# Sort the negativities by value
|
|
427
|
+
sorted_negativities = dict(sorted(negativities.items(), key=lambda item: item[1]["value"]))
|
|
428
|
+
|
|
429
|
+
qubit_pairs = [
|
|
430
|
+
tuple(int(num) for num in x.replace("(", "").replace(")", "").replace("...", "").split(", "))
|
|
431
|
+
for x in sorted_negativities.keys()
|
|
432
|
+
]
|
|
433
|
+
negativity_values = [a["value"] for a in sorted_negativities.values()]
|
|
434
|
+
|
|
435
|
+
negativity_edges = dict(zip(qubit_pairs, negativity_values))
|
|
436
|
+
|
|
437
|
+
cmap = plt.cm.get_cmap("winter")
|
|
438
|
+
|
|
439
|
+
fig = plt.figure()
|
|
440
|
+
ax = plt.axes()
|
|
441
|
+
|
|
442
|
+
if station is not None:
|
|
443
|
+
if station.lower() in GraphPositions.predefined_stations:
|
|
444
|
+
qubit_positions = GraphPositions.predefined_stations[station.lower()]
|
|
445
|
+
else:
|
|
446
|
+
graph_backend = backend_coupling_map.graph.to_undirected(multigraph=False)
|
|
447
|
+
qubit_positions = GraphPositions.create_positions(graph_backend)
|
|
448
|
+
else:
|
|
449
|
+
graph_backend = backend_coupling_map.graph.to_undirected(multigraph=False)
|
|
450
|
+
if num_qubits in (20, 7):
|
|
451
|
+
station = "garnet" if num_qubits == 20 else "deneb"
|
|
452
|
+
qubit_positions = GraphPositions.predefined_stations[station]
|
|
453
|
+
else:
|
|
454
|
+
qubit_positions = GraphPositions.create_positions(graph_backend)
|
|
455
|
+
|
|
456
|
+
# Normalize negativity values to the range [0, 1] for color mapping
|
|
457
|
+
norm = plt.Normalize(vmin=cast(float, min(negativity_values)), vmax=cast(float, max(negativity_values)))
|
|
458
|
+
edge_colors = [cmap(norm(negativity_edges[edge])) for edge in qubit_pairs]
|
|
459
|
+
|
|
460
|
+
nx.draw_networkx(
|
|
461
|
+
rx_to_nx_graph(backend_coupling_map),
|
|
462
|
+
pos=qubit_positions,
|
|
463
|
+
nodelist=list(range(num_qubits)),
|
|
464
|
+
labels={x: qubit_names[x] for x in range(num_qubits)},
|
|
465
|
+
font_size=6.5,
|
|
466
|
+
edgelist=qubit_pairs,
|
|
467
|
+
width=4.0,
|
|
468
|
+
edge_color=edge_colors,
|
|
469
|
+
node_color="k",
|
|
470
|
+
font_color="w",
|
|
471
|
+
ax=ax,
|
|
472
|
+
)
|
|
473
|
+
|
|
474
|
+
# Add colorbar
|
|
475
|
+
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
|
|
476
|
+
sm.set_array([])
|
|
477
|
+
fig.colorbar(sm, ax=ax, shrink=0.5)
|
|
478
|
+
|
|
479
|
+
shots_string = "" if num_shots is None else f"Shots per tomography sample: {num_shots}"
|
|
480
|
+
station_string = "IQM Backend" if station is None else station.capitalize()
|
|
481
|
+
if tomography == "shadow_tomography":
|
|
482
|
+
plt.title(
|
|
483
|
+
f"Max entanglement negativities for qubit pairs in {station_string}\n"
|
|
484
|
+
f"{num_RM_samples} local RM samples x {num_MoMs_samples} Median of Means samples\n"
|
|
485
|
+
f"{shots_string}; {timestamp}"
|
|
486
|
+
)
|
|
487
|
+
else:
|
|
488
|
+
plt.title(
|
|
489
|
+
f"Max entanglement negativities for qubit pairs in {station_string}\n"
|
|
490
|
+
f"{shots_string}; Bootstraps: {num_bootstraps}"
|
|
491
|
+
f"\n{timestamp}"
|
|
492
|
+
)
|
|
493
|
+
plt.close()
|
|
494
|
+
|
|
495
|
+
return fig_name, fig
|
|
496
|
+
|
|
497
|
+
|
|
498
|
+
def update_pauli_expectations(
|
|
499
|
+
pauli_expectations: Dict[str, Dict[str, float]],
|
|
500
|
+
projected_counts: Dict[str, Dict[str, int]],
|
|
501
|
+
nonId_pauli_label: str,
|
|
502
|
+
) -> Dict[str, Dict[str, float]]:
|
|
503
|
+
"""Helper function that updates the input Pauli expectations dictionary of dictionaries (projections -> {pauli string: expectation}).
|
|
504
|
+
|
|
505
|
+
Args:
|
|
506
|
+
pauli_expectations (Dict[str, Dict[str, float]]): The Pauli expectations dictionary of dictionaries to update.
|
|
507
|
+
* Outermost keys are projected bitstrings; innermost are pauli strings and values are expectation values.
|
|
508
|
+
projected_counts (Dict[str, Dict[str, int]]): The corresponding projected counts dictionary of dictionaries.
|
|
509
|
+
nonId_pauli_label (str): The Pauli label to update expectations of, that should not contain identities.
|
|
510
|
+
* Pauli expectations corresponding to I are inferred and updated from counts corresponding to strings containing Z instead.
|
|
511
|
+
|
|
512
|
+
Returns:
|
|
513
|
+
Dict[str, Dict[str, float]]: The updated Pauli expectations dictionary of dictionaries (projections -> {pauli string: expectation}).
|
|
514
|
+
"""
|
|
515
|
+
# Get the individual Pauli expectations for each projection
|
|
516
|
+
for projected_bit_string in projected_counts.keys():
|
|
517
|
+
# Ideally the counts should be labeled by Pauli basis measurement!
|
|
518
|
+
# Here by construction they should be ordered as all_pauli_labels,
|
|
519
|
+
# however, this assumed that measurements never got scrambled (which should not happen anyway).
|
|
520
|
+
counts = projected_counts[projected_bit_string]
|
|
521
|
+
get_exp = lambda label, ct=counts: get_Pauli_expectation(ct, label)
|
|
522
|
+
updates = {nonId_pauli_label: get_exp(nonId_pauli_label)}
|
|
523
|
+
|
|
524
|
+
if nonId_pauli_label == "ZZ":
|
|
525
|
+
updates.update({x: get_exp(x) for x in ["ZI", "IZ", "II"]})
|
|
526
|
+
if nonId_pauli_label[0] == "Z":
|
|
527
|
+
updates[f"I{nonId_pauli_label[1]}"] = get_exp(f"I{nonId_pauli_label[1]}")
|
|
528
|
+
if nonId_pauli_label[1] == "Z":
|
|
529
|
+
updates[f"{nonId_pauli_label[0]}I"] = get_exp(f"{nonId_pauli_label[0]}I")
|
|
530
|
+
|
|
531
|
+
pauli_expectations[projected_bit_string].update(updates)
|
|
532
|
+
|
|
533
|
+
return pauli_expectations
|
|
534
|
+
|
|
535
|
+
|
|
536
|
+
def shadow_tomography_analysis(
|
|
537
|
+
dataset: xr.Dataset,
|
|
538
|
+
all_qubit_pairs_per_group: Dict[int, List[Tuple[int, int]]],
|
|
539
|
+
all_qubit_neighbors_per_group: Dict[int, List[List[int]]],
|
|
540
|
+
all_unprojected_qubits: Dict[int, List[int]],
|
|
541
|
+
backend_name: str,
|
|
542
|
+
execution_timestamp: str,
|
|
543
|
+
) -> Tuple[Dict[str, Any], List[BenchmarkObservation], Dict[str, Dict[str, str | float]], xr.Dataset]:
|
|
544
|
+
"""
|
|
545
|
+
Performs shadow tomography analysis on the given dataset.
|
|
546
|
+
|
|
547
|
+
Args:
|
|
548
|
+
dataset (xr.Dataset): The dataset containing the experimental data.
|
|
549
|
+
all_qubit_pairs_per_group (Dict[int, List[Tuple[int, int]]]): Dictionary mapping group indices to lists of qubit pairs.
|
|
550
|
+
all_qubit_neighbors_per_group (Dict[int, List[List[int]]]): Dictionary mapping group indices to lists of neighbor qubit groups.
|
|
551
|
+
all_unprojected_qubits (Dict[int, List[int]]): Dictionary mapping group indices to lists of unprojected qubits.
|
|
552
|
+
backend_name (str): The name of the backend used for the experiment.
|
|
553
|
+
execution_timestamp (str): The timestamp of the experiment execution.
|
|
554
|
+
|
|
555
|
+
Returns:
|
|
556
|
+
Tuple[Dict[str, Any], List[BenchmarkObservation], Dict[str, Dict[str, str | float]], xr.Dataset]:
|
|
557
|
+
A tuple containing:
|
|
558
|
+
- A dictionary of plots.
|
|
559
|
+
- A list of benchmark observations.
|
|
560
|
+
- A dictionary of maximum negativities.
|
|
561
|
+
- The updated dataset.
|
|
562
|
+
"""
|
|
563
|
+
plots: Dict[str, Any] = {}
|
|
564
|
+
observations: List[BenchmarkObservation] = []
|
|
565
|
+
max_negativities: Dict[str, Dict[str, str | float]] = {}
|
|
566
|
+
|
|
567
|
+
execution_results = {}
|
|
568
|
+
|
|
569
|
+
num_RMs = dataset.attrs["n_random_unitaries"]
|
|
570
|
+
num_MoMs = dataset.attrs["n_median_of_means"]
|
|
571
|
+
|
|
572
|
+
qcvv_logger.info("Fetching Clifford dictionary")
|
|
573
|
+
clifford_1q_dict = import_native_gate_cliffords("1q")
|
|
574
|
+
all_unitaries = dataset.attrs["all_unitaries"]
|
|
575
|
+
|
|
576
|
+
shadows_per_projection: Dict[str, Dict[int, Dict[str, List[np.ndarray]]]] = {}
|
|
577
|
+
# shadows_per_projection: qubit_pair -> MoMs -> {Projection, List of shadows}
|
|
578
|
+
MoMs_shadows: Dict[str, Dict[str, np.ndarray]] = {}
|
|
579
|
+
# MoMs_shadows: qubit_pair -> {Projection: MoMs shadow}
|
|
580
|
+
average_shadows_per_projection: Dict[str, Dict[int, Dict[str, np.ndarray]]] = {}
|
|
581
|
+
# average_shadows_per_projection: qubit_pair -> MoMs -> {Projection: shadows}
|
|
582
|
+
all_negativities: Dict[str, Dict[int, Dict[str, float]]] = {}
|
|
583
|
+
# all_negativities: qubit_pair -> MoMs -> {Projection: Negativity}
|
|
584
|
+
MoMs_negativities: Dict[str, Dict[str, Dict[str, float]]] = {}
|
|
585
|
+
for group_idx, group in all_qubit_pairs_per_group.items():
|
|
586
|
+
qcvv_logger.info(f"Retrieving shadows for qubit-pair group {group_idx+1}/{len(all_qubit_pairs_per_group)}")
|
|
587
|
+
# Assume only pairs and nearest-neighbors were measured, and each pair in the group uses num_RMs randomized measurements:
|
|
588
|
+
execution_results[group_idx] = xrvariable_to_counts(
|
|
589
|
+
dataset, str(all_unprojected_qubits[group_idx]), num_RMs * num_MoMs * len(group)
|
|
590
|
+
)
|
|
591
|
+
|
|
592
|
+
partitioned_counts_MoMs_RMs = split_sequence_in_chunks(execution_results[group_idx], num_RMs * num_MoMs)
|
|
593
|
+
partitioned_counts_RMs = {}
|
|
594
|
+
|
|
595
|
+
for pair_idx, qubit_pair in enumerate(group):
|
|
596
|
+
all_negativities[str(qubit_pair)] = {}
|
|
597
|
+
MoMs_negativities[str(qubit_pair)] = {}
|
|
598
|
+
shadows_per_projection[str(qubit_pair)] = {}
|
|
599
|
+
average_shadows_per_projection[str(qubit_pair)] = {}
|
|
600
|
+
|
|
601
|
+
partitioned_counts_RMs[pair_idx] = split_sequence_in_chunks(partitioned_counts_MoMs_RMs[pair_idx], num_RMs)
|
|
602
|
+
|
|
603
|
+
# Get the neighbor qubits of qubit_pair
|
|
604
|
+
neighbor_qubits = all_qubit_neighbors_per_group[group_idx][pair_idx]
|
|
605
|
+
neighbor_bit_strings_length = len(neighbor_qubits)
|
|
606
|
+
# Generate all possible projection bitstrings for the neighbors, {'0','1'}^{\otimes{N}}
|
|
607
|
+
all_projection_bit_strings = [
|
|
608
|
+
"".join(x) for x in itertools.product(("0", "1"), repeat=neighbor_bit_strings_length)
|
|
609
|
+
]
|
|
610
|
+
|
|
611
|
+
for MoMs in range(num_MoMs):
|
|
612
|
+
qcvv_logger.info(
|
|
613
|
+
f"Now on qubit pair {qubit_pair} ({pair_idx+1}/{len(group)}) and median of means sample {MoMs+1}/{num_MoMs}"
|
|
614
|
+
)
|
|
615
|
+
|
|
616
|
+
# Get all shadows of qubit_pair
|
|
617
|
+
shadows_per_projection[str(qubit_pair)][MoMs] = {
|
|
618
|
+
projection: [] for projection in all_projection_bit_strings
|
|
619
|
+
}
|
|
620
|
+
for RM_idx, counts in enumerate(partitioned_counts_RMs[pair_idx][MoMs]):
|
|
621
|
+
# Retrieve both Cliffords (i.e. for each qubit)
|
|
622
|
+
cliffords_rm = [all_unitaries[group_idx][MoMs][str(q)][RM_idx] for q in qubit_pair]
|
|
623
|
+
# Organize counts by projection
|
|
624
|
+
# e.g. counts ~ {'000 00': 31, '000 01': 31, '000 10': 38, '000 11': 41, '001 00': 28, '001 01': 33,
|
|
625
|
+
# '001 10': 31, '001 11': 37, '010 00': 29, '010 01': 32, '010 10': 31, '010 11': 25,
|
|
626
|
+
# '011 00': 36, '011 01': 24, '011 10': 33, '011 11': 32, '100 00': 22, '100 01': 38,
|
|
627
|
+
# '100 10': 34, '100 11': 26, '101 00': 26, '101 01': 26, '101 10': 37, '101 11': 30,
|
|
628
|
+
# '110 00': 36, '110 01': 35, '110 10': 31, '110 11': 35, '111 00': 31, '111 01': 32,
|
|
629
|
+
# '111 10': 37, '111 11': 36}
|
|
630
|
+
# organize to projected_counts['000'] ~ {'00': 31, '01': 31, '10': 38, '11': 41},
|
|
631
|
+
# projected_counts['001'] ~ {'00': 28, '01': 33, '10': 31, '11': 37}
|
|
632
|
+
# ...
|
|
633
|
+
projected_counts = {
|
|
634
|
+
projection: {
|
|
635
|
+
b_s[-2:]: b_c
|
|
636
|
+
for b_s, b_c in counts.items()
|
|
637
|
+
if b_s[:neighbor_bit_strings_length] == projection
|
|
638
|
+
}
|
|
639
|
+
for projection in all_projection_bit_strings
|
|
640
|
+
}
|
|
641
|
+
|
|
642
|
+
# Get the individual shadow for each projection
|
|
643
|
+
for projected_bit_string in all_projection_bit_strings:
|
|
644
|
+
shadows_per_projection[str(qubit_pair)][MoMs][projected_bit_string].append(
|
|
645
|
+
get_local_shadow(
|
|
646
|
+
counts=projected_counts[projected_bit_string],
|
|
647
|
+
unitary_arg=cliffords_rm,
|
|
648
|
+
subsystem_bit_indices=list(range(2)),
|
|
649
|
+
clifford_or_haar="clifford",
|
|
650
|
+
cliffords_1q=clifford_1q_dict,
|
|
651
|
+
)
|
|
652
|
+
)
|
|
653
|
+
|
|
654
|
+
# Average the shadows for each projection and MoMs sample
|
|
655
|
+
average_shadows_per_projection[str(qubit_pair)][MoMs] = {
|
|
656
|
+
projected_bit_string: np.mean(
|
|
657
|
+
shadows_per_projection[str(qubit_pair)][MoMs][projected_bit_string], axis=0
|
|
658
|
+
)
|
|
659
|
+
for projected_bit_string in all_projection_bit_strings
|
|
660
|
+
}
|
|
661
|
+
|
|
662
|
+
# Compute the negativity of the shadow of each projection
|
|
663
|
+
qcvv_logger.info(
|
|
664
|
+
f"Computing the negativity of all shadow projections for qubit pair {qubit_pair} ({pair_idx+1}/{len(group)}) and median of means sample {MoMs+1}/{num_MoMs}"
|
|
665
|
+
)
|
|
666
|
+
all_negativities[str(qubit_pair)][MoMs] = {
|
|
667
|
+
projected_bit_string: get_negativity(
|
|
668
|
+
average_shadows_per_projection[str(qubit_pair)][MoMs][projected_bit_string], 1, 1
|
|
669
|
+
)
|
|
670
|
+
for projected_bit_string in all_projection_bit_strings
|
|
671
|
+
}
|
|
672
|
+
|
|
673
|
+
MoMs_negativities[str(qubit_pair)] = {
|
|
674
|
+
projected_bit_string: median_with_uncertainty(
|
|
675
|
+
[all_negativities[str(qubit_pair)][m][projected_bit_string] for m in range(num_MoMs)]
|
|
676
|
+
)
|
|
677
|
+
for projected_bit_string in all_projection_bit_strings
|
|
678
|
+
}
|
|
679
|
+
|
|
680
|
+
MoMs_shadows[str(qubit_pair)] = {
|
|
681
|
+
projected_bit_string: np.median(
|
|
682
|
+
[average_shadows_per_projection[str(qubit_pair)][m][projected_bit_string] for m in range(num_MoMs)],
|
|
683
|
+
axis=0,
|
|
684
|
+
)
|
|
685
|
+
for projected_bit_string in all_projection_bit_strings
|
|
686
|
+
}
|
|
687
|
+
|
|
688
|
+
all_negativities_list = [
|
|
689
|
+
MoMs_negativities[str(qubit_pair)][projected_bit_string]["value"]
|
|
690
|
+
for projected_bit_string in all_projection_bit_strings
|
|
691
|
+
]
|
|
692
|
+
all_negativities_uncertainty = [
|
|
693
|
+
MoMs_negativities[str(qubit_pair)][projected_bit_string]["uncertainty"]
|
|
694
|
+
for projected_bit_string in all_projection_bit_strings
|
|
695
|
+
]
|
|
696
|
+
|
|
697
|
+
max_negativity_projection = np.argmax(all_negativities_list)
|
|
698
|
+
|
|
699
|
+
max_negativity = {
|
|
700
|
+
"value": all_negativities_list[max_negativity_projection],
|
|
701
|
+
"uncertainty": all_negativities_uncertainty[max_negativity_projection],
|
|
702
|
+
}
|
|
703
|
+
|
|
704
|
+
max_negativities[str(qubit_pair)] = {} # {str(qubit_pair): {"negativity": float, "projection": str}}
|
|
705
|
+
max_negativities[str(qubit_pair)].update(
|
|
706
|
+
{
|
|
707
|
+
"projection": all_projection_bit_strings[max_negativity_projection],
|
|
708
|
+
}
|
|
709
|
+
)
|
|
710
|
+
max_negativities[str(qubit_pair)].update(max_negativity)
|
|
711
|
+
|
|
712
|
+
fig_name, fig = plot_density_matrix(
|
|
713
|
+
matrix=MoMs_shadows[str(qubit_pair)][all_projection_bit_strings[max_negativity_projection]],
|
|
714
|
+
qubit_pair=qubit_pair,
|
|
715
|
+
projection=all_projection_bit_strings[max_negativity_projection],
|
|
716
|
+
negativity=max_negativity,
|
|
717
|
+
backend_name=backend_name,
|
|
718
|
+
timestamp=execution_timestamp,
|
|
719
|
+
tomography="shadow_tomography",
|
|
720
|
+
num_RM_samples=num_RMs,
|
|
721
|
+
num_MoMs_samples=num_MoMs,
|
|
722
|
+
)
|
|
723
|
+
plots[fig_name] = fig
|
|
724
|
+
|
|
725
|
+
observations.extend(
|
|
726
|
+
[
|
|
727
|
+
BenchmarkObservation(
|
|
728
|
+
name="max_negativity",
|
|
729
|
+
value=max_negativity["value"],
|
|
730
|
+
uncertainty=max_negativity["uncertainty"],
|
|
731
|
+
identifier=BenchmarkObservationIdentifier(qubit_pair),
|
|
732
|
+
)
|
|
733
|
+
]
|
|
734
|
+
)
|
|
735
|
+
|
|
736
|
+
dataset.attrs.update(
|
|
737
|
+
{
|
|
738
|
+
"median_of_means_shadows": MoMs_shadows,
|
|
739
|
+
"median_of_means_negativities": MoMs_negativities,
|
|
740
|
+
"all_negativities": all_negativities,
|
|
741
|
+
"all_shadows": shadows_per_projection,
|
|
742
|
+
}
|
|
743
|
+
)
|
|
744
|
+
|
|
745
|
+
return plots, observations, max_negativities, dataset
|
|
746
|
+
|
|
747
|
+
|
|
748
|
+
def state_tomography_analysis(
|
|
749
|
+
dataset: xr.Dataset,
|
|
750
|
+
all_qubit_pairs_per_group: Dict[int, List[Tuple[int, int]]],
|
|
751
|
+
all_qubit_neighbors_per_group: Dict[int, List[List[int]]],
|
|
752
|
+
all_unprojected_qubits: Dict[int, List[int]],
|
|
753
|
+
backend_name: str,
|
|
754
|
+
execution_timestamp: str,
|
|
755
|
+
) -> Tuple[Dict[str, Any], List[BenchmarkObservation], Dict[str, Dict[str, str | float]], xr.Dataset]:
|
|
756
|
+
"""
|
|
757
|
+
Performs state tomography analysis on the given dataset.
|
|
758
|
+
|
|
759
|
+
Args:
|
|
760
|
+
dataset (xr.Dataset): The dataset containing the experimental data.
|
|
761
|
+
all_qubit_pairs_per_group (Dict[int, List[Tuple[int, int]]]): Dictionary mapping group indices to lists of qubit pairs.
|
|
762
|
+
all_qubit_neighbors_per_group (Dict[int, List[List[int]]]): Dictionary mapping group indices to lists of neighbor qubit groups.
|
|
763
|
+
all_unprojected_qubits (Dict[int, List[int]]): Dictionary mapping group indices to lists of unprojected qubits.
|
|
764
|
+
backend_name (str): The name of the backend used for the experiment.
|
|
765
|
+
execution_timestamp (str): The timestamp of the experiment execution.
|
|
766
|
+
|
|
767
|
+
Returns:
|
|
768
|
+
Tuple[Dict[str, Any], List[BenchmarkObservation], Dict[str, Dict[str, float]], xr.Dataset]:
|
|
769
|
+
A tuple containing:
|
|
770
|
+
- A dictionary of plots.
|
|
771
|
+
- A list of benchmark observations.
|
|
772
|
+
- A dictionary of maximum negativities.
|
|
773
|
+
- The updated dataset.
|
|
774
|
+
"""
|
|
775
|
+
plots: Dict[str, Any] = {}
|
|
776
|
+
observations: List[BenchmarkObservation] = []
|
|
777
|
+
max_negativities: Dict[str, Dict[str, str | float]] = {}
|
|
778
|
+
|
|
779
|
+
execution_results = {}
|
|
780
|
+
|
|
781
|
+
num_bootstraps = dataset.attrs["num_bootstraps"]
|
|
782
|
+
|
|
783
|
+
tomography_state: Dict[int, Dict[str, Dict[str, np.ndarray]]] = {}
|
|
784
|
+
# tomography_state: group_idx -> qubit_pair -> {projection:numpy array}
|
|
785
|
+
bootstrapped_states: Dict[int, Dict[str, List[np.ndarray]]] = {}
|
|
786
|
+
# bootstrapped_states: group_idx -> qubit_pair -> List of bootstrapped states for max_neg_projection
|
|
787
|
+
tomography_negativities: Dict[int, Dict[str, Dict[str, float]]] = {}
|
|
788
|
+
bootstrapped_negativities: Dict[int, Dict[str, List[float]]] = {}
|
|
789
|
+
bootstrapped_avg_negativities: Dict[int, Dict[str, Dict[str, float]]] = {}
|
|
790
|
+
num_tomo_samples = (
|
|
791
|
+
3**2
|
|
792
|
+
) # In general 3**n samples suffice (assuming trace-preservation and unitality for the Pauli measurements)
|
|
793
|
+
for group_idx, group in all_qubit_pairs_per_group.items():
|
|
794
|
+
qcvv_logger.info(
|
|
795
|
+
f"Retrieving tomography-reconstructed states with {num_bootstraps} for qubit-pair group {group_idx+1}/{len(all_qubit_pairs_per_group)}"
|
|
796
|
+
)
|
|
797
|
+
|
|
798
|
+
# Assume only pairs and nearest-neighbors were measured, and each pair in the group used num_RMs randomized measurements:
|
|
799
|
+
execution_results[group_idx] = xrvariable_to_counts(
|
|
800
|
+
dataset, str(all_unprojected_qubits[group_idx]), num_tomo_samples * len(group)
|
|
801
|
+
)
|
|
802
|
+
|
|
803
|
+
tomography_state[group_idx] = {}
|
|
804
|
+
bootstrapped_states[group_idx] = {}
|
|
805
|
+
tomography_negativities[group_idx] = {}
|
|
806
|
+
bootstrapped_negativities[group_idx] = {}
|
|
807
|
+
bootstrapped_avg_negativities[group_idx] = {}
|
|
808
|
+
|
|
809
|
+
partitioned_counts = split_sequence_in_chunks(execution_results[group_idx], num_tomo_samples)
|
|
810
|
+
|
|
811
|
+
for pair_idx, qubit_pair in enumerate(group):
|
|
812
|
+
# Get the neighbor qubits of qubit_pair
|
|
813
|
+
neighbor_qubits = all_qubit_neighbors_per_group[group_idx][pair_idx]
|
|
814
|
+
neighbor_bit_strings_length = len(neighbor_qubits)
|
|
815
|
+
# Generate all possible projection bitstrings for the neighbors, {'0','1'}^{\otimes{N}}
|
|
816
|
+
all_projection_bit_strings = [
|
|
817
|
+
"".join(x) for x in itertools.product(("0", "1"), repeat=neighbor_bit_strings_length)
|
|
818
|
+
]
|
|
819
|
+
|
|
820
|
+
sqg_pauli_strings = ("Z", "X", "Y")
|
|
821
|
+
all_nonId_pauli_labels = ["".join(x) for x in itertools.product(sqg_pauli_strings, repeat=2)]
|
|
822
|
+
|
|
823
|
+
pauli_expectations: Dict[str, Dict[str, float]] = {
|
|
824
|
+
projection: {} for projection in all_projection_bit_strings
|
|
825
|
+
}
|
|
826
|
+
# pauli_expectations: projected_bit_string -> pauli string -> float expectation
|
|
827
|
+
for pauli_idx, counts in enumerate(partitioned_counts[pair_idx]):
|
|
828
|
+
projected_counts = {
|
|
829
|
+
projection: {
|
|
830
|
+
b_s[-2:]: b_c for b_s, b_c in counts.items() if b_s[:neighbor_bit_strings_length] == projection
|
|
831
|
+
}
|
|
832
|
+
for projection in all_projection_bit_strings
|
|
833
|
+
if projection in [c[:neighbor_bit_strings_length] for c in counts.keys()]
|
|
834
|
+
}
|
|
835
|
+
|
|
836
|
+
pauli_expectations = update_pauli_expectations(
|
|
837
|
+
pauli_expectations,
|
|
838
|
+
projected_counts,
|
|
839
|
+
nonId_pauli_label=all_nonId_pauli_labels[pauli_idx],
|
|
840
|
+
)
|
|
841
|
+
|
|
842
|
+
# Remove projections with empty values for pauli_expectations
|
|
843
|
+
# This will happen if certain projection bitstrings were just not measured
|
|
844
|
+
pauli_expectations = {
|
|
845
|
+
projection: expectations for projection, expectations in pauli_expectations.items() if expectations
|
|
846
|
+
}
|
|
847
|
+
|
|
848
|
+
tomography_state[group_idx][str(qubit_pair)] = {
|
|
849
|
+
projection: get_tomography_matrix(pauli_expectations=pauli_expectations[projection])
|
|
850
|
+
for projection in pauli_expectations.keys()
|
|
851
|
+
}
|
|
852
|
+
|
|
853
|
+
tomography_negativities[group_idx][str(qubit_pair)] = {
|
|
854
|
+
projected_bit_string: get_negativity(
|
|
855
|
+
tomography_state[group_idx][str(qubit_pair)][projected_bit_string], 1, 1
|
|
856
|
+
)
|
|
857
|
+
for projected_bit_string in pauli_expectations.keys()
|
|
858
|
+
}
|
|
859
|
+
|
|
860
|
+
# Extract the max negativity and the corresponding projection - save in dictionary
|
|
861
|
+
all_negativities_list = [
|
|
862
|
+
tomography_negativities[group_idx][str(qubit_pair)][projected_bit_string]
|
|
863
|
+
for projected_bit_string in pauli_expectations.keys()
|
|
864
|
+
]
|
|
865
|
+
|
|
866
|
+
max_negativity_projection_idx = np.argmax(all_negativities_list)
|
|
867
|
+
max_negativity_bitstring = list(pauli_expectations.keys())[max_negativity_projection_idx]
|
|
868
|
+
|
|
869
|
+
# Bootstrapping - do only for max projection bitstring
|
|
870
|
+
bootstrapped_pauli_expectations: List[Dict[str, Dict[str, float]]] = [
|
|
871
|
+
{max_negativity_bitstring: {}} for _ in range(num_bootstraps)
|
|
872
|
+
]
|
|
873
|
+
for pauli_idx, counts in enumerate(partitioned_counts[pair_idx]):
|
|
874
|
+
projected_counts = {
|
|
875
|
+
b_s[-2:]: b_c
|
|
876
|
+
for b_s, b_c in counts.items()
|
|
877
|
+
if b_s[:neighbor_bit_strings_length] == max_negativity_bitstring
|
|
878
|
+
}
|
|
879
|
+
all_bootstrapped_counts = bootstrap_counts(
|
|
880
|
+
projected_counts, num_bootstraps, include_original_counts=True
|
|
881
|
+
)
|
|
882
|
+
for bootstrap in range(num_bootstraps):
|
|
883
|
+
bootstrapped_pauli_expectations[bootstrap] = update_pauli_expectations(
|
|
884
|
+
bootstrapped_pauli_expectations[bootstrap],
|
|
885
|
+
projected_counts={max_negativity_bitstring: all_bootstrapped_counts[bootstrap]},
|
|
886
|
+
nonId_pauli_label=all_nonId_pauli_labels[pauli_idx],
|
|
887
|
+
)
|
|
888
|
+
|
|
889
|
+
bootstrapped_states[group_idx][str(qubit_pair)] = [
|
|
890
|
+
get_tomography_matrix(
|
|
891
|
+
pauli_expectations=bootstrapped_pauli_expectations[bootstrap][max_negativity_bitstring]
|
|
892
|
+
)
|
|
893
|
+
for bootstrap in range(num_bootstraps)
|
|
894
|
+
]
|
|
895
|
+
|
|
896
|
+
bootstrapped_negativities[group_idx][str(qubit_pair)] = [
|
|
897
|
+
get_negativity(bootstrapped_states[group_idx][str(qubit_pair)][bootstrap], 1, 1)
|
|
898
|
+
for bootstrap in range(num_bootstraps)
|
|
899
|
+
]
|
|
900
|
+
|
|
901
|
+
bootstrapped_avg_negativities[group_idx][str(qubit_pair)] = {
|
|
902
|
+
"value": float(np.mean(bootstrapped_negativities[group_idx][str(qubit_pair)])),
|
|
903
|
+
"uncertainty": float(np.std(bootstrapped_negativities[group_idx][str(qubit_pair)])),
|
|
904
|
+
}
|
|
905
|
+
|
|
906
|
+
max_negativity = {
|
|
907
|
+
"value": all_negativities_list[max_negativity_projection_idx],
|
|
908
|
+
"bootstrapped_average": bootstrapped_avg_negativities[group_idx][str(qubit_pair)]["value"],
|
|
909
|
+
"uncertainty": bootstrapped_avg_negativities[group_idx][str(qubit_pair)]["uncertainty"],
|
|
910
|
+
}
|
|
911
|
+
|
|
912
|
+
max_negativities[str(qubit_pair)] = {} # {str(qubit_pair): {"negativity": float, "projection": str}}
|
|
913
|
+
max_negativities[str(qubit_pair)].update(
|
|
914
|
+
{
|
|
915
|
+
"projection": max_negativity_bitstring,
|
|
916
|
+
}
|
|
917
|
+
)
|
|
918
|
+
max_negativities[str(qubit_pair)].update(max_negativity)
|
|
919
|
+
|
|
920
|
+
fig_name, fig = plot_density_matrix(
|
|
921
|
+
matrix=tomography_state[group_idx][str(qubit_pair)][max_negativity_bitstring],
|
|
922
|
+
qubit_pair=qubit_pair,
|
|
923
|
+
projection=max_negativity_bitstring,
|
|
924
|
+
negativity=max_negativity,
|
|
925
|
+
backend_name=backend_name,
|
|
926
|
+
timestamp=execution_timestamp,
|
|
927
|
+
tomography="state_tomography",
|
|
928
|
+
)
|
|
929
|
+
plots[fig_name] = fig
|
|
930
|
+
|
|
931
|
+
observations.extend(
|
|
932
|
+
[
|
|
933
|
+
BenchmarkObservation(
|
|
934
|
+
name="max_negativity",
|
|
935
|
+
value=max_negativity["value"],
|
|
936
|
+
uncertainty=max_negativity["uncertainty"],
|
|
937
|
+
identifier=BenchmarkObservationIdentifier(qubit_pair),
|
|
938
|
+
)
|
|
939
|
+
]
|
|
940
|
+
)
|
|
941
|
+
|
|
942
|
+
dataset.attrs.update(
|
|
943
|
+
{
|
|
944
|
+
"all_tomography_states": tomography_state,
|
|
945
|
+
"all_negativities": tomography_negativities,
|
|
946
|
+
}
|
|
947
|
+
)
|
|
948
|
+
|
|
949
|
+
return plots, observations, max_negativities, dataset
|
|
950
|
+
|
|
951
|
+
|
|
952
|
+
def negativity_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
|
|
953
|
+
"""Analysis function for a Graph State benchmark experiment."""
|
|
954
|
+
qcvv_logger.info("Fetching dataset")
|
|
955
|
+
dataset = run.dataset.copy(deep=True)
|
|
956
|
+
qcvv_logger.info("Dataset imported OK")
|
|
957
|
+
backend_name = dataset.attrs["backend_name"]
|
|
958
|
+
coupling_map = dataset.attrs["coupling_map"]
|
|
959
|
+
qubit_names = dataset.attrs["qubit_names"]
|
|
960
|
+
execution_timestamp = dataset.attrs["execution_timestamp"]
|
|
961
|
+
tomography = dataset.attrs["tomography"]
|
|
962
|
+
num_bootstraps = dataset.attrs["num_bootstraps"]
|
|
963
|
+
num_RMs = dataset.attrs["n_random_unitaries"]
|
|
964
|
+
num_MoMs = dataset.attrs["n_median_of_means"]
|
|
965
|
+
num_shots = dataset.attrs["shots"]
|
|
966
|
+
|
|
967
|
+
all_qubit_pairs_per_group = dataset.attrs["all_pair_groups"]
|
|
968
|
+
all_qubit_neighbors_per_group = dataset.attrs["all_neighbor_groups"]
|
|
969
|
+
all_unprojected_qubits = dataset.attrs["all_unprojected_qubits"]
|
|
970
|
+
|
|
971
|
+
if tomography == "shadow_tomography":
|
|
972
|
+
plots, observations, max_negativities, dataset = shadow_tomography_analysis(
|
|
973
|
+
dataset,
|
|
974
|
+
all_qubit_pairs_per_group,
|
|
975
|
+
all_qubit_neighbors_per_group,
|
|
976
|
+
all_unprojected_qubits,
|
|
977
|
+
backend_name,
|
|
978
|
+
execution_timestamp,
|
|
979
|
+
)
|
|
980
|
+
else:
|
|
981
|
+
plots, observations, max_negativities, dataset = state_tomography_analysis(
|
|
982
|
+
dataset,
|
|
983
|
+
all_qubit_pairs_per_group,
|
|
984
|
+
all_qubit_neighbors_per_group,
|
|
985
|
+
all_unprojected_qubits,
|
|
986
|
+
backend_name,
|
|
987
|
+
execution_timestamp,
|
|
988
|
+
)
|
|
989
|
+
|
|
990
|
+
dataset.attrs.update({"max_negativities": max_negativities})
|
|
991
|
+
|
|
992
|
+
fig_name, fig = plot_max_negativities(
|
|
993
|
+
negativities=max_negativities,
|
|
994
|
+
backend_name=backend_name,
|
|
995
|
+
qubit_names=qubit_names,
|
|
996
|
+
timestamp=execution_timestamp,
|
|
997
|
+
tomography=tomography,
|
|
998
|
+
num_shots=num_shots,
|
|
999
|
+
num_bootstraps=num_bootstraps,
|
|
1000
|
+
num_RM_samples=num_RMs,
|
|
1001
|
+
num_MoMs_samples=num_MoMs,
|
|
1002
|
+
)
|
|
1003
|
+
plots[fig_name] = fig
|
|
1004
|
+
|
|
1005
|
+
fig_name, fig = plot_max_negativities_graph(
|
|
1006
|
+
negativities=max_negativities,
|
|
1007
|
+
backend_coupling_map=coupling_map,
|
|
1008
|
+
qubit_names=qubit_names,
|
|
1009
|
+
timestamp=execution_timestamp,
|
|
1010
|
+
tomography=tomography,
|
|
1011
|
+
num_shots=num_shots,
|
|
1012
|
+
num_bootstraps=num_bootstraps,
|
|
1013
|
+
num_RM_samples=num_RMs,
|
|
1014
|
+
num_MoMs_samples=num_MoMs,
|
|
1015
|
+
)
|
|
1016
|
+
plots[fig_name] = fig
|
|
1017
|
+
|
|
1018
|
+
qcvv_logger.info("Analysis of Graph State Benchmark experiment concluded!")
|
|
1019
|
+
|
|
1020
|
+
return BenchmarkAnalysisResult(dataset=dataset, plots=plots, observations=observations)
|
|
1021
|
+
|
|
1022
|
+
|
|
1023
|
+
class GraphStateBenchmark(Benchmark):
|
|
1024
|
+
"""The Graph States benchmark estimates the bipartite entangelement negativity of native graph states."""
|
|
1025
|
+
|
|
1026
|
+
analysis_function = staticmethod(negativity_analysis)
|
|
1027
|
+
name = "graph_states"
|
|
1028
|
+
|
|
1029
|
+
def __init__(self, backend_arg: IQMBackendBase, configuration: "GraphStateConfiguration"):
|
|
1030
|
+
"""Construct the GraphStateBenchmark class.
|
|
1031
|
+
|
|
1032
|
+
Args:
|
|
1033
|
+
backend_arg (IQMBackendBase): the backend to execute the benchmark on
|
|
1034
|
+
configuration (GraphStateConfiguration): the configuration of the benchmark
|
|
1035
|
+
"""
|
|
1036
|
+
super().__init__(backend_arg, configuration)
|
|
1037
|
+
|
|
1038
|
+
self.backend_configuration_name = backend_arg if isinstance(backend_arg, str) else backend_arg.name
|
|
1039
|
+
|
|
1040
|
+
self.qubits = configuration.qubits
|
|
1041
|
+
self.tomography = configuration.tomography
|
|
1042
|
+
|
|
1043
|
+
self.num_bootstraps = configuration.num_bootstraps
|
|
1044
|
+
self.n_random_unitaries = configuration.n_random_unitaries
|
|
1045
|
+
self.n_median_of_means = configuration.n_median_of_means
|
|
1046
|
+
|
|
1047
|
+
# Initialize relevant variables for the benchmark
|
|
1048
|
+
self.graph_state_circuit = generate_graph_state(self.qubits, self.backend)
|
|
1049
|
+
self.coupling_map = set_coupling_map(self.qubits, self.backend, physical_layout="fixed")
|
|
1050
|
+
|
|
1051
|
+
# Initialize the variable to contain the benchmark circuits of each layout
|
|
1052
|
+
self.circuits = Circuits()
|
|
1053
|
+
self.untranspiled_circuits = BenchmarkCircuit(name="untranspiled_circuits")
|
|
1054
|
+
self.transpiled_circuits = BenchmarkCircuit(name="transpiled_circuits")
|
|
1055
|
+
|
|
1056
|
+
self.session_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
1057
|
+
self.execution_timestamp = ""
|
|
1058
|
+
|
|
1059
|
+
def add_all_meta_to_dataset(self, dataset: xr.Dataset):
|
|
1060
|
+
"""Adds all configuration metadata and circuits to the dataset variable
|
|
1061
|
+
|
|
1062
|
+
Args:
|
|
1063
|
+
dataset (xr.Dataset): The xarray dataset
|
|
1064
|
+
"""
|
|
1065
|
+
dataset.attrs["session_timestamp"] = self.session_timestamp
|
|
1066
|
+
dataset.attrs["execution_timestamp"] = self.execution_timestamp
|
|
1067
|
+
dataset.attrs["backend_configuration_name"] = self.backend_configuration_name
|
|
1068
|
+
dataset.attrs["backend_name"] = self.backend.name
|
|
1069
|
+
dataset.attrs["qubit_names"] = {qubit: self.backend.index_to_qubit_name(qubit) for qubit in self.qubits}
|
|
1070
|
+
dataset.attrs["coupling_map"] = self.coupling_map
|
|
1071
|
+
|
|
1072
|
+
for key, value in self.configuration:
|
|
1073
|
+
if key == "benchmark": # Avoid saving the class object
|
|
1074
|
+
dataset.attrs[key] = value.name
|
|
1075
|
+
else:
|
|
1076
|
+
dataset.attrs[key] = value
|
|
1077
|
+
# Defined outside configuration - if any
|
|
1078
|
+
|
|
1079
|
+
@timeit
|
|
1080
|
+
def add_all_circuits_to_dataset(self, dataset: xr.Dataset):
|
|
1081
|
+
"""Adds all generated circuits during execution to the dataset variable
|
|
1082
|
+
|
|
1083
|
+
Args:
|
|
1084
|
+
dataset (xr.Dataset): The xarray dataset
|
|
1085
|
+
|
|
1086
|
+
Returns:
|
|
1087
|
+
|
|
1088
|
+
"""
|
|
1089
|
+
qcvv_logger.info(f"Adding all circuits to the dataset")
|
|
1090
|
+
for key, circuit in zip(
|
|
1091
|
+
["transpiled_circuits", "untranspiled_circuits"], [self.transpiled_circuits, self.untranspiled_circuits]
|
|
1092
|
+
):
|
|
1093
|
+
dictionary = {}
|
|
1094
|
+
for outer_key, outer_value in circuit.items():
|
|
1095
|
+
dictionary[str(outer_key)] = {
|
|
1096
|
+
str(inner_key): inner_values for inner_key, inner_values in outer_value.items()
|
|
1097
|
+
}
|
|
1098
|
+
dataset.attrs[key] = dictionary
|
|
1099
|
+
|
|
1100
|
+
@timeit
|
|
1101
|
+
def generate_all_circuit_info_for_graph_state_benchmark(self) -> Dict[str, Any]:
|
|
1102
|
+
"""
|
|
1103
|
+
Generates all circuits and associated information for the Graph State benchmark:
|
|
1104
|
+
- Generates native graph states
|
|
1105
|
+
- Identifies all pairs of qubits with disjoint neighbors
|
|
1106
|
+
- Generates all projected nodes to cover all pairs of qubits with disjoint neighbors
|
|
1107
|
+
|
|
1108
|
+
Returns:
|
|
1109
|
+
Dict[str, Any]: A dictionary containing all circuit information for the Graph State benchmark.
|
|
1110
|
+
|
|
1111
|
+
"""
|
|
1112
|
+
layout_mapping = {
|
|
1113
|
+
a._index: b # pylint: disable=W0212
|
|
1114
|
+
for a, b in self.graph_state_circuit.layout.initial_layout.get_virtual_bits().items()
|
|
1115
|
+
if b in self.qubits
|
|
1116
|
+
}
|
|
1117
|
+
|
|
1118
|
+
# Get unique list of edges - Use layout_mapping to determine the connections between phyical qubits
|
|
1119
|
+
graph_edges = [
|
|
1120
|
+
(layout_mapping[e[0]], layout_mapping[e[1]])
|
|
1121
|
+
for e in list(self.coupling_map.graph.to_undirected(multigraph=False).edge_list())
|
|
1122
|
+
]
|
|
1123
|
+
|
|
1124
|
+
# Find pairs of nodes with disjoint neighbors
|
|
1125
|
+
# {idx: [(q1,q2), (q3,q4), ...]}
|
|
1126
|
+
pair_groups = find_edges_with_disjoint_neighbors(graph_edges)
|
|
1127
|
+
# {idx: [(n11,n12,n13,...), (n21,n22,n23,...), ...]}
|
|
1128
|
+
neighbor_groups = {
|
|
1129
|
+
idx: [get_neighbors_of_edges([y], graph_edges) for y in x] for idx, x in enumerate(pair_groups)
|
|
1130
|
+
}
|
|
1131
|
+
|
|
1132
|
+
# Get all projected nodes to cover all pairs of qubits with disjoint neighbours
|
|
1133
|
+
# {idx: [q1,q2,q3,q4, ...]}
|
|
1134
|
+
unmeasured_qubit_indices = {idx: [a for b in x for a in b] for idx, x in enumerate(pair_groups)}
|
|
1135
|
+
# {idx: [n11,n12,n13,...,n21,n22,n23, ...]}
|
|
1136
|
+
projected_nodes = {idx: get_neighbors_of_edges(list(x), graph_edges) for idx, x in enumerate(pair_groups)}
|
|
1137
|
+
|
|
1138
|
+
# Generate copies of circuits to add projections and randomized measurements
|
|
1139
|
+
grouped_graph_circuits = {idx: self.graph_state_circuit.copy() for idx in projected_nodes.keys()}
|
|
1140
|
+
|
|
1141
|
+
return {
|
|
1142
|
+
"grouped_graph_circuits": grouped_graph_circuits,
|
|
1143
|
+
"unmeasured_qubit_indices": unmeasured_qubit_indices,
|
|
1144
|
+
"projected_nodes": projected_nodes,
|
|
1145
|
+
"pair_groups": dict(enumerate(pair_groups)),
|
|
1146
|
+
"neighbor_groups": neighbor_groups,
|
|
1147
|
+
}
|
|
1148
|
+
|
|
1149
|
+
def execute(self, backend) -> xr.Dataset: # pylint: disable=too-many-statements
|
|
1150
|
+
"""
|
|
1151
|
+
Executes the benchmark.
|
|
1152
|
+
"""
|
|
1153
|
+
self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
1154
|
+
|
|
1155
|
+
dataset = xr.Dataset()
|
|
1156
|
+
self.add_all_meta_to_dataset(dataset)
|
|
1157
|
+
|
|
1158
|
+
# Routine to generate all
|
|
1159
|
+
qcvv_logger.info(f"Identifying qubit pairs and neighbor groups for the Graph State benchmark")
|
|
1160
|
+
graph_benchmark_circuit_info, time_circuit_generation = (
|
|
1161
|
+
self.generate_all_circuit_info_for_graph_state_benchmark()
|
|
1162
|
+
)
|
|
1163
|
+
dataset.attrs.update({"time_circuit_generation": time_circuit_generation})
|
|
1164
|
+
|
|
1165
|
+
# pylint: disable=invalid-sequence-index
|
|
1166
|
+
grouped_graph_circuits: Dict[int, QuantumCircuit] = graph_benchmark_circuit_info["grouped_graph_circuits"]
|
|
1167
|
+
unprojected_qubits = graph_benchmark_circuit_info["unmeasured_qubit_indices"]
|
|
1168
|
+
neighbor_qubits = graph_benchmark_circuit_info["projected_nodes"]
|
|
1169
|
+
pair_groups = graph_benchmark_circuit_info["pair_groups"]
|
|
1170
|
+
neighbor_groups = graph_benchmark_circuit_info["neighbor_groups"]
|
|
1171
|
+
# pylint: enable=invalid-sequence-index
|
|
1172
|
+
|
|
1173
|
+
dataset.attrs.update(
|
|
1174
|
+
{
|
|
1175
|
+
"all_unprojected_qubits": unprojected_qubits,
|
|
1176
|
+
"all_projected_qubits": neighbor_qubits,
|
|
1177
|
+
"all_pair_groups": pair_groups,
|
|
1178
|
+
"all_neighbor_groups": neighbor_groups,
|
|
1179
|
+
}
|
|
1180
|
+
)
|
|
1181
|
+
|
|
1182
|
+
circuits_untranspiled: Dict[int, List[QuantumCircuit]] = {}
|
|
1183
|
+
circuits_transpiled: Dict[int, List[QuantumCircuit]] = {}
|
|
1184
|
+
|
|
1185
|
+
time_circuits = {}
|
|
1186
|
+
time_transpilation = {}
|
|
1187
|
+
all_graph_submit_results = []
|
|
1188
|
+
|
|
1189
|
+
if self.tomography == "shadow_tomography":
|
|
1190
|
+
clifford_1q_dict = import_native_gate_cliffords("1q")
|
|
1191
|
+
|
|
1192
|
+
qcvv_logger.info(f"Performing {self.tomography.replace('_',' ')} of all qubit pairs")
|
|
1193
|
+
|
|
1194
|
+
all_unitaries: Dict[int, Dict[int, Dict[str, List[str]]]] = {}
|
|
1195
|
+
# all_unitaries: group_idx -> MoMs -> projection -> List[Clifford labels]
|
|
1196
|
+
# Will be empty if state_tomography -> assign Clifford labels in analysis
|
|
1197
|
+
for idx, circuit in grouped_graph_circuits.items():
|
|
1198
|
+
# It is not clear now that grouping is needed,
|
|
1199
|
+
# since it seems like pairs must be measured one at a time
|
|
1200
|
+
# (marginalizing any other qubits gives maximally mixed states)
|
|
1201
|
+
# however, the same structure is used in case this can still somehow be parallelized
|
|
1202
|
+
qcvv_logger.info(f"Now on group {idx + 1}/{len(grouped_graph_circuits)}")
|
|
1203
|
+
if self.tomography == "shadow_tomography":
|
|
1204
|
+
# Outer loop for each mean to be considered for Median of Means (MoMs) estimators
|
|
1205
|
+
all_unitaries[idx] = {m: {} for m in range(self.n_median_of_means)}
|
|
1206
|
+
circuits_untranspiled[idx] = []
|
|
1207
|
+
circuits_transpiled[idx] = []
|
|
1208
|
+
time_circuits[idx] = 0
|
|
1209
|
+
time_transpilation[idx] = 0
|
|
1210
|
+
for qubit_pair, neighbors in zip(pair_groups[idx], neighbor_groups[idx]):
|
|
1211
|
+
RM_circuits_untranspiled_MoMs = []
|
|
1212
|
+
RM_circuits_transpiled_MoMs = []
|
|
1213
|
+
time_circuits_MoMs = 0
|
|
1214
|
+
for MoMs in range(self.n_median_of_means):
|
|
1215
|
+
# Go though each pair and only project neighbors
|
|
1216
|
+
# all_unitaries[idx][MoMs] = {}
|
|
1217
|
+
qcvv_logger.info(
|
|
1218
|
+
f"Now on qubit pair {qubit_pair} and neighbors {neighbors} for Median of Means sample {MoMs + 1}/{self.n_median_of_means}"
|
|
1219
|
+
)
|
|
1220
|
+
(unitaries_single_pair, rm_circuits_untranspiled_single_pair), time_rm_circuits_single_pair = (
|
|
1221
|
+
local_shadow_tomography(
|
|
1222
|
+
qc=circuit,
|
|
1223
|
+
Nu=self.n_random_unitaries,
|
|
1224
|
+
active_qubits=qubit_pair,
|
|
1225
|
+
measure_other=neighbors,
|
|
1226
|
+
measure_other_name="neighbors",
|
|
1227
|
+
clifford_or_haar="clifford",
|
|
1228
|
+
cliffords_1q=clifford_1q_dict,
|
|
1229
|
+
)
|
|
1230
|
+
)
|
|
1231
|
+
|
|
1232
|
+
all_unitaries[idx][MoMs].update(unitaries_single_pair)
|
|
1233
|
+
RM_circuits_untranspiled_MoMs.extend(rm_circuits_untranspiled_single_pair)
|
|
1234
|
+
# When using a Clifford dictionary, both the graph state and the RMs are generated natively
|
|
1235
|
+
RM_circuits_transpiled_MoMs.extend(rm_circuits_untranspiled_single_pair)
|
|
1236
|
+
time_circuits_MoMs += time_rm_circuits_single_pair
|
|
1237
|
+
|
|
1238
|
+
self.transpiled_circuits.circuit_groups.append(
|
|
1239
|
+
CircuitGroup(name=str(qubit_pair), circuits=rm_circuits_untranspiled_single_pair)
|
|
1240
|
+
)
|
|
1241
|
+
|
|
1242
|
+
time_circuits[idx] += time_circuits_MoMs
|
|
1243
|
+
circuits_untranspiled[idx].extend(RM_circuits_untranspiled_MoMs)
|
|
1244
|
+
circuits_transpiled[idx].extend(RM_circuits_transpiled_MoMs)
|
|
1245
|
+
|
|
1246
|
+
dataset.attrs.update({"all_unitaries": all_unitaries})
|
|
1247
|
+
else: # if self.tomography == "state_tomography" (default)
|
|
1248
|
+
circuits_untranspiled[idx] = []
|
|
1249
|
+
circuits_transpiled[idx] = []
|
|
1250
|
+
time_circuits[idx] = 0
|
|
1251
|
+
time_transpilation[idx] = 0
|
|
1252
|
+
for qubit_pair, neighbors in zip(pair_groups[idx], neighbor_groups[idx]):
|
|
1253
|
+
qcvv_logger.info(f"Now on qubit pair {qubit_pair} and neighbors {neighbors}")
|
|
1254
|
+
state_tomography_circuits, time_state_tomo_circuits_single_pair = (
|
|
1255
|
+
generate_state_tomography_circuits(
|
|
1256
|
+
qc=circuit,
|
|
1257
|
+
active_qubits=qubit_pair,
|
|
1258
|
+
measure_other=neighbors,
|
|
1259
|
+
measure_other_name="neighbors",
|
|
1260
|
+
native=True,
|
|
1261
|
+
)
|
|
1262
|
+
)
|
|
1263
|
+
|
|
1264
|
+
self.transpiled_circuits.circuit_groups.append(
|
|
1265
|
+
CircuitGroup(
|
|
1266
|
+
name=str(qubit_pair), circuits=list(cast(dict, state_tomography_circuits).values())
|
|
1267
|
+
)
|
|
1268
|
+
)
|
|
1269
|
+
time_circuits[idx] += time_state_tomo_circuits_single_pair
|
|
1270
|
+
circuits_untranspiled[idx].extend(cast(dict, state_tomography_circuits).values())
|
|
1271
|
+
# When using a native gates in tomo step, both the graph state and the RMs are generated natively
|
|
1272
|
+
circuits_transpiled[idx].extend(cast(dict, state_tomography_circuits).values())
|
|
1273
|
+
|
|
1274
|
+
# Submit for execution in backend - submit all per pair group, irrespective of tomography procedure.
|
|
1275
|
+
# A whole group is considered as a single batch.
|
|
1276
|
+
# Jobs will only be split in separate submissions if there are batch size limitations (retrieval will occur per batch).
|
|
1277
|
+
# It shouldn't be a problem [anymore] that different qubits are being measured in a single batch.
|
|
1278
|
+
# Post-processing will take care of separating MoMs samples and identifying all unitary (Clifford) labels.
|
|
1279
|
+
sorted_transpiled_qc_list = {tuple(unprojected_qubits[idx]): circuits_transpiled[idx]}
|
|
1280
|
+
graph_jobs, time_submit = submit_execute(
|
|
1281
|
+
sorted_transpiled_qc_list, backend, self.shots, self.calset_id, self.max_gates_per_batch
|
|
1282
|
+
)
|
|
1283
|
+
|
|
1284
|
+
all_graph_submit_results.append(
|
|
1285
|
+
{
|
|
1286
|
+
"unprojected_qubits": unprojected_qubits[idx],
|
|
1287
|
+
"neighbor_qubits": neighbor_qubits[idx],
|
|
1288
|
+
"jobs": graph_jobs,
|
|
1289
|
+
"time_submit": time_submit,
|
|
1290
|
+
}
|
|
1291
|
+
)
|
|
1292
|
+
|
|
1293
|
+
# Retrieve all counts and add to dataset
|
|
1294
|
+
for job_idx, job_dict in enumerate(all_graph_submit_results):
|
|
1295
|
+
unprojected_qubits = job_dict["unprojected_qubits"]
|
|
1296
|
+
# Retrieve counts
|
|
1297
|
+
execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], identifier=str(unprojected_qubits))
|
|
1298
|
+
|
|
1299
|
+
# Retrieve all job meta data
|
|
1300
|
+
all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
|
|
1301
|
+
|
|
1302
|
+
# Export all to dataset
|
|
1303
|
+
dataset.attrs.update(
|
|
1304
|
+
{
|
|
1305
|
+
job_idx: {
|
|
1306
|
+
"time_circuits": time_circuits[job_idx],
|
|
1307
|
+
"time_transpilation": time_transpilation[job_idx],
|
|
1308
|
+
"time_submit": job_dict["time_submit"],
|
|
1309
|
+
"time_retrieve": time_retrieve,
|
|
1310
|
+
"all_job_metadata": all_job_metadata,
|
|
1311
|
+
}
|
|
1312
|
+
}
|
|
1313
|
+
)
|
|
1314
|
+
|
|
1315
|
+
qcvv_logger.info(f"Adding counts of qubit pairs {unprojected_qubits} to the dataset")
|
|
1316
|
+
dataset, _ = add_counts_to_dataset(execution_results, str(unprojected_qubits), dataset)
|
|
1317
|
+
|
|
1318
|
+
self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
|
|
1319
|
+
|
|
1320
|
+
# if self.rem: TODO: add REM functionality
|
|
1321
|
+
|
|
1322
|
+
qcvv_logger.info(f"Graph State benchmark experiment execution concluded !")
|
|
1323
|
+
|
|
1324
|
+
return dataset
|
|
1325
|
+
|
|
1326
|
+
|
|
1327
|
+
class GraphStateConfiguration(BenchmarkConfigurationBase):
|
|
1328
|
+
"""Graph States Benchmark configuration.
|
|
1329
|
+
|
|
1330
|
+
Attributes:
|
|
1331
|
+
benchmark (Type[Benchmark]): GraphStateBenchmark
|
|
1332
|
+
qubits (Sequence[int]): The physical qubit layout in which to benchmark graph state generation.
|
|
1333
|
+
tomography (Literal["state_tomography", "shadow_tomography"]): Whether to use state or shadow tomography.
|
|
1334
|
+
* Default is "state_tomography".
|
|
1335
|
+
num_bootstraps (int): The amount of bootstrap samples to use with state tomography.
|
|
1336
|
+
* Default is 50.
|
|
1337
|
+
n_random_unitaries (int): The number of Haar random single-qubit unitaries to use for (local) shadow tomography.
|
|
1338
|
+
* Default is 100.
|
|
1339
|
+
n_median_of_means(int): The number of mean samples over n_random_unitaries to generate a median of means estimator for shadow tomography.
|
|
1340
|
+
* NB: The total amount of execution calls will be a multiplicative factor of n_random_unitaries x n_median_of_means.
|
|
1341
|
+
* Default is 1 (no median of means).
|
|
1342
|
+
|
|
1343
|
+
"""
|
|
1344
|
+
|
|
1345
|
+
benchmark: Type[Benchmark] = GraphStateBenchmark
|
|
1346
|
+
qubits: Sequence[int]
|
|
1347
|
+
tomography: Literal["state_tomography", "shadow_tomography"] = "state_tomography"
|
|
1348
|
+
num_bootstraps: int = 50
|
|
1349
|
+
n_random_unitaries: int = 100
|
|
1350
|
+
n_median_of_means: int = 1
|