iqm-benchmarks 2.27__py3-none-any.whl → 2.28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of iqm-benchmarks might be problematic. Click here for more details.

@@ -0,0 +1,1348 @@
1
+ # Copyright 2025 IQM Benchmarks developers
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # pylint: disable=too-many-lines
16
+
17
+ """
18
+ Graph states benchmark
19
+ """
20
+ from collections import defaultdict
21
+ import itertools
22
+ from time import strftime
23
+ from typing import Any, Dict, List, Literal, Optional, Sequence, Set, Tuple, Type, cast
24
+
25
+ from matplotlib.figure import Figure
26
+ import matplotlib.pyplot as plt
27
+ import networkx as nx
28
+ import numpy as np
29
+ from qiskit import QuantumCircuit, transpile
30
+ from qiskit.transpiler import CouplingMap
31
+ import xarray as xr
32
+
33
+ from iqm.benchmarks import Benchmark, BenchmarkCircuit, BenchmarkRunResult, CircuitGroup, Circuits
34
+ from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
35
+ from iqm.benchmarks.benchmark_definition import (
36
+ BenchmarkAnalysisResult,
37
+ BenchmarkObservation,
38
+ BenchmarkObservationIdentifier,
39
+ add_counts_to_dataset,
40
+ )
41
+ from iqm.benchmarks.logging_config import qcvv_logger
42
+ from iqm.benchmarks.randomized_benchmarking.randomized_benchmarking_common import import_native_gate_cliffords
43
+ from iqm.benchmarks.utils import ( # marginal_distribution, perform_backend_transpilation,
44
+ bootstrap_counts,
45
+ generate_state_tomography_circuits,
46
+ get_neighbors_of_edges,
47
+ get_Pauli_expectation,
48
+ get_tomography_matrix,
49
+ median_with_uncertainty,
50
+ remove_directed_duplicates_to_list,
51
+ retrieve_all_counts,
52
+ retrieve_all_job_metadata,
53
+ set_coupling_map,
54
+ split_sequence_in_chunks,
55
+ submit_execute,
56
+ timeit,
57
+ xrvariable_to_counts,
58
+ )
59
+ from iqm.benchmarks.utils_plots import GraphPositions, rx_to_nx_graph
60
+ from iqm.benchmarks.utils_shadows import get_local_shadow, get_negativity, local_shadow_tomography
61
+ from iqm.qiskit_iqm.iqm_backend import IQMBackendBase
62
+
63
+
64
+ def find_edges_with_disjoint_neighbors(
65
+ graph: Sequence[Sequence[int]],
66
+ ) -> List[List[Sequence[int]]]:
67
+ """Finds sets of edges with non-overlapping neighboring nodes.
68
+
69
+ Args:
70
+ graph (Sequence[Sequence[int]]): The input graph specified as a sequence of edges (Sequence[int]).
71
+ Returns:
72
+ List[List[Tuple[int]]]: A list of lists of edges (Tuple[int]) from the original graph with non-overlapping neighboring nodes.
73
+ """
74
+ # Build adjacency list representation of the graph
75
+ adjacency = defaultdict(set)
76
+ for u, v in graph:
77
+ adjacency[u].add(v)
78
+ adjacency[v].add(u)
79
+
80
+ # Function to get neighboring nodes of an edge
81
+ def get_edge_neighbors(edge):
82
+ u, v = edge
83
+ return (adjacency[u] | adjacency[v]) - {u, v}
84
+
85
+ remaining_edges = set(graph) # Keep track of remaining edges
86
+ iterations = [] # Store the edges chosen in each iteration
87
+
88
+ while remaining_edges:
89
+ current_iteration = set() # Edges chosen in this iteration
90
+ used_nodes = set() # Nodes already used in this iteration
91
+
92
+ for edge in list(remaining_edges):
93
+ u, v = edge
94
+ # Check if the edge is disconnected from already chosen edges
95
+ if u in used_nodes or v in used_nodes:
96
+ continue
97
+
98
+ # Get neighboring nodes of this edge
99
+ edge_neighbors = get_edge_neighbors(edge)
100
+
101
+ # Check if any neighbor belongs to an edge already in this iteration
102
+ if any(neighbor in used_nodes for neighbor in edge_neighbors):
103
+ continue
104
+
105
+ # Add the edge to the current iteration
106
+ current_iteration.add(edge)
107
+ used_nodes.update([u, v])
108
+
109
+ # Add the chosen edges to the result
110
+ iterations.append(list(current_iteration))
111
+ remaining_edges -= current_iteration # Remove chosen edges from the remaining edges
112
+
113
+ return iterations
114
+
115
+
116
+ def generate_minimal_edge_layers(cp_map: CouplingMap) -> Dict[int, List[List[int]]]:
117
+ """Sorts the edges of a coupling map, arranging them in a dictionary with values being subsets of the coupling map with no overlapping nodes.
118
+ Each item will correspond to a layer of pairs of qubits in which parallel 2Q gates can be applied.
119
+
120
+ Args:
121
+ cp_map (CouplingMap): A list of lists of pairs of integers, representing a coupling map.
122
+ Returns:
123
+ Dict[int, List[List[int]]]: A dictionary with values being subsets of the coupling map with no overlapping nodes.
124
+ """
125
+ # Build a conflict graph - Treat the input list as a graph
126
+ # where each sublist is a node, and an edge exists between nodes if they share any integers
127
+ undirect_cp_map_list = remove_directed_duplicates_to_list(cp_map)
128
+
129
+ n = len(undirect_cp_map_list)
130
+ graph: Dict[int, Set] = {i: set() for i in range(n)}
131
+
132
+ for i in range(n):
133
+ for j in range(i + 1, n):
134
+ if set(undirect_cp_map_list[i]) & set(undirect_cp_map_list[j]): # Check for shared integers
135
+ graph[i].add(j)
136
+ graph[j].add(i)
137
+
138
+ # Reduce to a graph coloring problem;
139
+ # each color represents a group in the dictionary
140
+ colors: Dict[int, int] = {}
141
+ for node in range(n):
142
+ # Find all used colors among neighbors
143
+ neighbor_colors = {colors[neighbor] for neighbor in graph[node] if neighbor in colors}
144
+ # Assign the smallest unused color
145
+ color = 0
146
+ while color in neighbor_colors:
147
+ color += 1
148
+ colors[node] = color
149
+
150
+ # Group by colors - minimize the number of groups
151
+ groups: Dict[int, List[List[int]]] = {}
152
+ for idx, color in colors.items():
153
+ if color not in groups:
154
+ groups[color] = []
155
+ groups[color].append(undirect_cp_map_list[idx])
156
+
157
+ return groups
158
+
159
+
160
+ def generate_graph_state(qubits: Sequence[int], backend: IQMBackendBase | str) -> QuantumCircuit:
161
+ """Generates a circuit with minimal depth preparing a native graph state for a given backend using given qubits.
162
+
163
+ Args:
164
+ qubits (Sequence[int]): A list of integers representing the qubits.
165
+ backend (IQMBackendBase): The backend to target the graph state generating circuit.
166
+ Returns:
167
+ QuantumCircuit: The circuit generating a graph state in the target backend.
168
+ """
169
+ num_qubits = len(qubits)
170
+ qc = QuantumCircuit(num_qubits)
171
+ coupling_map = set_coupling_map(qubits, backend, physical_layout="fixed")
172
+ layers = generate_minimal_edge_layers(coupling_map)
173
+ # Add all H
174
+ for q in range(num_qubits):
175
+ qc.r(np.pi / 2, np.pi / 2, q)
176
+ # Add all CZ
177
+ for layer in layers.values():
178
+ for edge in layer:
179
+ qc.cz(edge[0], edge[1])
180
+ # Transpile
181
+ qc_t = transpile(qc, backend=backend, initial_layout=qubits, optimization_level=3)
182
+ return qc_t
183
+
184
+
185
+ def plot_density_matrix(
186
+ matrix: np.ndarray,
187
+ qubit_pair: Sequence[int],
188
+ projection: str,
189
+ negativity: Dict[str, float],
190
+ backend_name: str,
191
+ timestamp: str,
192
+ tomography: Literal["state_tomography", "shadow_tomography"],
193
+ num_RM_samples: Optional[int] = None,
194
+ num_MoMs_samples: Optional[int] = None,
195
+ ) -> Tuple[str, Figure]:
196
+ """Plots a density matrix for corresponding qubit pairs, neighbor qubit projections, and negativities.
197
+
198
+ Args:
199
+ matrix (np.ndarray): The matrix to plot.
200
+ qubit_pair (Sequence[int]): The corresponding qubit pair.
201
+ projection (str): The projection corresponding to the matrix to plot.
202
+ negativity (Dict[str, float]): A dictionary with keys "value" and "uncertainty" and values being respective negativities.
203
+ backend_name (str): The name of the backend for the corresponding experiment.
204
+ timestamp (str): The timestamp for the corresponding experiment.
205
+ tomography (Literal["state_tomography", "shadow_tomography"]): The type of tomography used to gather the data of the matrix to plot.
206
+ num_RM_samples (Optional[int] = None): The number of randomized measurement samples if tomography is shadow_tomography.
207
+ * Default is None if tomography is state_tomography.
208
+ num_MoMs_samples (Optional[int] = None): The number of Median of Means used per randomized measurement if tomography is shadow_tomography.
209
+ * Default is None if tomography is state_tomography.
210
+ Returns:
211
+ Tuple[str, Figure]: The figure label and the density matrix plot figure.
212
+ """
213
+
214
+ fig, ax = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(6, 6))
215
+ cmap = "winter_r"
216
+ fig_name = str(qubit_pair)
217
+
218
+ ax[0].matshow(matrix.real, interpolation="nearest", vmin=-np.max(matrix.real), vmax=np.max(matrix.real), cmap=cmap)
219
+ ax[0].set_title(r"$\mathrm{Re}(\hat{\rho})$")
220
+ for (i, j), z in np.ndenumerate(matrix.real):
221
+ ax[0].text(
222
+ j,
223
+ i,
224
+ f"{z:0.2f}",
225
+ ha="center",
226
+ va="center",
227
+ bbox={"boxstyle": "round", "facecolor": "white", "edgecolor": "0.3"},
228
+ )
229
+
230
+ im1 = ax[1].matshow(
231
+ matrix.imag, interpolation="nearest", vmin=-np.max(matrix.real), vmax=np.max(matrix.real), cmap=cmap
232
+ )
233
+ ax[1].set_title(r"$\mathrm{Im}(\hat{\rho})$")
234
+ for (i, j), z in np.ndenumerate(matrix.imag):
235
+ ax[1].text(
236
+ j,
237
+ i,
238
+ f"{z:0.2f}",
239
+ ha="center",
240
+ va="center",
241
+ bbox={"boxstyle": "round", "facecolor": "white", "edgecolor": "0.3"},
242
+ )
243
+
244
+ if tomography == "shadow_tomography":
245
+ fig.suptitle(
246
+ f"Average shadow for qubits {qubit_pair} ({num_RM_samples} local RM samples x {num_MoMs_samples} Median of Means samples)\n"
247
+ f"Projection: {projection}\nNegativity: {negativity['value']:.4f} +/- {negativity['uncertainty']:.4f}\n"
248
+ f"{backend_name} --- {timestamp}"
249
+ )
250
+ else:
251
+ fig.suptitle(
252
+ f"Tomographically reconstructed density matrix for qubits {qubit_pair}\n"
253
+ f"Projection: {projection}\nNegativity: {negativity['value']:.4f} +/- {negativity['uncertainty']:.4f}\n"
254
+ f"{backend_name} --- {timestamp}"
255
+ )
256
+ fig.colorbar(im1, shrink=0.5)
257
+ fig.tight_layout(rect=(0, 0.03, 1, 1.25))
258
+
259
+ plt.close()
260
+
261
+ return fig_name, fig
262
+
263
+
264
+ def plot_max_negativities(
265
+ negativities: Dict[str, Dict[str, str | float]],
266
+ backend_name: str,
267
+ qubit_names: Dict[int, str],
268
+ timestamp: str,
269
+ tomography: Literal["shadow_tomography", "state_tomography"],
270
+ num_shots: int,
271
+ num_bootstraps: Optional[int] = None,
272
+ num_RM_samples: Optional[int] = None,
273
+ num_MoMs_samples: Optional[int] = None,
274
+ ) -> Tuple[str, Figure]:
275
+ """Plots the maximum negativity for each corresponding pair of qubits.
276
+
277
+ Args:
278
+ negativities (Dict[str, Dict[str, str | float]]): A dictionary (str qubit keys) of dictionaries (keys "value"/"uncertainty") of negativities (float) to plot.
279
+ backend_name (str): The name of the backend for the corresponding experiment.
280
+ qubit_names (Dict[int, str]): A dictionary of qubit names corresponding to qubit indices.
281
+ timestamp (str): The timestamp of the corresponding experiment.
282
+ tomography (Literal["shadow_tomography", "state_tomography"]): The type of tomography that was used.
283
+ num_shots (int): The number of shots used in the corresponding experiment.
284
+ num_bootstraps (Optional[int]): The number of bootstraps used if tomography corresponds to state tomography.
285
+ * Defaults to None if the tomography type is "shadow_tomography".
286
+ num_RM_samples (Optional[int]): The number of randomized measurement samples used if tomography corresponds to shadow tomography.
287
+ * Defaults to None if the tomography type is "state_tomography".
288
+ num_MoMs_samples (Optional[int]): The number of Median of Means samples per randomized measurement used if tomography corresponds to shadow tomography.
289
+ * Defaults to None if the tomography type is "shadow_tomography".
290
+
291
+ Returns:
292
+ Tuple[str, Figure]: The figure label and the max negativities plot figure.
293
+ """
294
+ fig_name = f"max_negativities_{backend_name}_{timestamp}".replace(" ", "_")
295
+ # Sort the negativities by value
296
+ sorted_negativities = dict(sorted(negativities.items(), key=lambda item: item[1]["value"]))
297
+
298
+ x = [x.replace("(", "").replace(")", "").replace(", ", "-") for x in list(sorted_negativities.keys())]
299
+ x_updated = [
300
+ f"{cast(str, qubit_names[int(a)])[2:]}-{cast(str, qubit_names[int(b)])[2:]}"
301
+ for edge in x
302
+ for a, b in [edge.split("-")]
303
+ ] ## reindexes the edges label as in the QPU graph.
304
+
305
+ y = [a["value"] for a in sorted_negativities.values()]
306
+ yerr = [a["uncertainty"] for a in sorted_negativities.values()]
307
+
308
+ cmap = plt.get_cmap("winter")
309
+
310
+ fig = plt.figure()
311
+ ax = plt.axes()
312
+
313
+ if tomography == "shadow_tomography":
314
+ errorbar_labels = rf"$1 \mathrm{{SEM}}$ (N={cast(int, num_RM_samples)*cast(int,num_MoMs_samples)} RMs)"
315
+ else:
316
+ errorbar_labels = rf"$1 \sigma$ ({cast(int, num_bootstraps)} bootstraps)"
317
+
318
+ plt.errorbar(
319
+ x_updated,
320
+ y,
321
+ yerr=yerr,
322
+ capsize=2,
323
+ color=cmap(0.15),
324
+ fmt="o",
325
+ alpha=1,
326
+ mec="black",
327
+ markersize=3,
328
+ label=errorbar_labels,
329
+ )
330
+ plt.axhline(0.5, color=cmap(1.0), linestyle="dashed")
331
+
332
+ ax.set_xlabel("Qubit pair")
333
+ ax.set_ylabel("Negativity")
334
+
335
+ # Major y-ticks every 0.1, minor ticks every 0.05
336
+ major_ticks = np.arange(0, 0.5, 0.1)
337
+ minor_ticks = np.arange(-0.05, 0.55, 0.05)
338
+ ax.set_yticks(major_ticks)
339
+ ax.set_yticks(minor_ticks, minor=True)
340
+ ax.grid(which="both")
341
+
342
+ lower_y = np.min(y) - 1.75 * float(yerr[0]) - 0.02 if np.min(y) - float(yerr[0]) < 0 else -0.01
343
+ upper_y = np.max(y) + 1.75 * float(yerr[-1]) + 0.02 if np.max(y) + float(yerr[-1]) > 0.5 else 0.51
344
+ ax.set_ylim(
345
+ (
346
+ lower_y,
347
+ upper_y,
348
+ )
349
+ )
350
+
351
+ plt.xticks(rotation=90)
352
+ if tomography == "shadow_tomography":
353
+ plt.title(
354
+ f"Max entanglement negativities for qubit pairs in {backend_name}\n{num_RM_samples} local RM samples x {num_MoMs_samples} Median of Means samples\n{timestamp}"
355
+ )
356
+ else:
357
+ plt.title(
358
+ f"Max entanglement negativities for qubit pairs in {backend_name}\nShots per tomography sample: {num_shots}; Bootstraps: {num_bootstraps}\n{timestamp}"
359
+ )
360
+ plt.legend(fontsize=8)
361
+
362
+ ax.margins(tight=True)
363
+
364
+ if len(x) <= 40:
365
+ ax.set_aspect((2 / 3) * len(x))
366
+ ax.autoscale(enable=True, axis="x")
367
+ else:
368
+ ####################################################################################
369
+ # Solution to fix tick spacings taken from:
370
+ # https://stackoverflow.com/questions/44863375/how-to-change-spacing-between-ticks
371
+ plt.gca().margins(x=0.01)
372
+ plt.gcf().canvas.draw()
373
+ tl = plt.gca().get_xticklabels()
374
+ maxsize = max(t.get_window_extent().width for t in tl)
375
+ m = 0.2 # inch margin
376
+ s = maxsize / plt.gcf().dpi * len(x) + 2 * m
377
+ margin = m / plt.gcf().get_size_inches()[0]
378
+ plt.gcf().subplots_adjust(left=margin, right=1.0 - margin)
379
+ plt.gcf().set_size_inches(s, plt.gcf().get_size_inches()[1])
380
+ #####################################################################################`
381
+
382
+ plt.close()
383
+
384
+ return fig_name, fig
385
+
386
+
387
+ def plot_max_negativities_graph(
388
+ negativities: Dict[str, Dict[str, str | float]],
389
+ backend_coupling_map: CouplingMap,
390
+ qubit_names: Dict[int, str],
391
+ timestamp: str,
392
+ tomography: Literal["shadow_tomography", "state_tomography"],
393
+ station: Optional[str] = None,
394
+ num_shots: Optional[int] = None,
395
+ num_bootstraps: Optional[int] = None,
396
+ num_RM_samples: Optional[int] = None,
397
+ num_MoMs_samples: Optional[int] = None,
398
+ ) -> Tuple[str, Figure]:
399
+ """Plots the maximum negativity for each corresponding pair of qubits in a graph layout of the given backend.
400
+
401
+ Args:
402
+ negativities (Dict[str, Dict[str, str | float]]): A dictionary (str qubit keys) of dictionaries (keys "value"/"uncertainty") of negativities (float) to plot.
403
+ backend_coupling_map (CouplingMap): The CouplingMap instance.
404
+ qubit_names (Dict[int, str]): A dictionary of qubit names corresponding to qubit indices.
405
+ timestamp (str): The timestamp of the corresponding experiment.
406
+ tomography (Literal["shadow_tomography", "state_tomography"]): The type of tomography that was used.
407
+ station (str): The name of the station to use for the graph layout.
408
+ num_shots (Optional[int]): The number of shots used in the corresponding experiment.
409
+ * Defaults to None: won't be displayed in title.
410
+ num_bootstraps (Optional[int]): The number of bootstraps used if tomography corresponds to state tomography.
411
+ * Defaults to None if the tomography type is "shadow_tomography".
412
+ num_RM_samples (Optional[int]): The number of randomized measurement samples used if tomography corresponds to shadow tomography.
413
+ * Defaults to None if the tomography type is "state_tomography".
414
+ num_MoMs_samples (Optional[int]): The number of Median of Means samples per randomized measurement used if tomography corresponds to shadow tomography.
415
+ * Defaults to None if the tomography type is "shadow_tomography".
416
+
417
+ Returns:
418
+ Tuple[str, Figure]: The figure label and the max negativities plot figure.
419
+ """
420
+ num_qubits = len(qubit_names.keys())
421
+ fig_name = (
422
+ f"max_negativities_graph_{station}_{timestamp}"
423
+ if station is not None
424
+ else f"max_negativities_graph_{timestamp}"
425
+ )
426
+ # Sort the negativities by value
427
+ sorted_negativities = dict(sorted(negativities.items(), key=lambda item: item[1]["value"]))
428
+
429
+ qubit_pairs = [
430
+ tuple(int(num) for num in x.replace("(", "").replace(")", "").replace("...", "").split(", "))
431
+ for x in sorted_negativities.keys()
432
+ ]
433
+ negativity_values = [a["value"] for a in sorted_negativities.values()]
434
+
435
+ negativity_edges = dict(zip(qubit_pairs, negativity_values))
436
+
437
+ cmap = plt.cm.get_cmap("winter")
438
+
439
+ fig = plt.figure()
440
+ ax = plt.axes()
441
+
442
+ if station is not None:
443
+ if station.lower() in GraphPositions.predefined_stations:
444
+ qubit_positions = GraphPositions.predefined_stations[station.lower()]
445
+ else:
446
+ graph_backend = backend_coupling_map.graph.to_undirected(multigraph=False)
447
+ qubit_positions = GraphPositions.create_positions(graph_backend)
448
+ else:
449
+ graph_backend = backend_coupling_map.graph.to_undirected(multigraph=False)
450
+ if num_qubits in (20, 7):
451
+ station = "garnet" if num_qubits == 20 else "deneb"
452
+ qubit_positions = GraphPositions.predefined_stations[station]
453
+ else:
454
+ qubit_positions = GraphPositions.create_positions(graph_backend)
455
+
456
+ # Normalize negativity values to the range [0, 1] for color mapping
457
+ norm = plt.Normalize(vmin=cast(float, min(negativity_values)), vmax=cast(float, max(negativity_values)))
458
+ edge_colors = [cmap(norm(negativity_edges[edge])) for edge in qubit_pairs]
459
+
460
+ nx.draw_networkx(
461
+ rx_to_nx_graph(backend_coupling_map),
462
+ pos=qubit_positions,
463
+ nodelist=list(range(num_qubits)),
464
+ labels={x: qubit_names[x] for x in range(num_qubits)},
465
+ font_size=6.5,
466
+ edgelist=qubit_pairs,
467
+ width=4.0,
468
+ edge_color=edge_colors,
469
+ node_color="k",
470
+ font_color="w",
471
+ ax=ax,
472
+ )
473
+
474
+ # Add colorbar
475
+ sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
476
+ sm.set_array([])
477
+ fig.colorbar(sm, ax=ax, shrink=0.5)
478
+
479
+ shots_string = "" if num_shots is None else f"Shots per tomography sample: {num_shots}"
480
+ station_string = "IQM Backend" if station is None else station.capitalize()
481
+ if tomography == "shadow_tomography":
482
+ plt.title(
483
+ f"Max entanglement negativities for qubit pairs in {station_string}\n"
484
+ f"{num_RM_samples} local RM samples x {num_MoMs_samples} Median of Means samples\n"
485
+ f"{shots_string}; {timestamp}"
486
+ )
487
+ else:
488
+ plt.title(
489
+ f"Max entanglement negativities for qubit pairs in {station_string}\n"
490
+ f"{shots_string}; Bootstraps: {num_bootstraps}"
491
+ f"\n{timestamp}"
492
+ )
493
+ plt.close()
494
+
495
+ return fig_name, fig
496
+
497
+
498
+ def update_pauli_expectations(
499
+ pauli_expectations: Dict[str, Dict[str, float]],
500
+ projected_counts: Dict[str, Dict[str, int]],
501
+ nonId_pauli_label: str,
502
+ ) -> Dict[str, Dict[str, float]]:
503
+ """Helper function that updates the input Pauli expectations dictionary of dictionaries (projections -> {pauli string: expectation}).
504
+
505
+ Args:
506
+ pauli_expectations (Dict[str, Dict[str, float]]): The Pauli expectations dictionary of dictionaries to update.
507
+ * Outermost keys are projected bitstrings; innermost are pauli strings and values are expectation values.
508
+ projected_counts (Dict[str, Dict[str, int]]): The corresponding projected counts dictionary of dictionaries.
509
+ nonId_pauli_label (str): The Pauli label to update expectations of, that should not contain identities.
510
+ * Pauli expectations corresponding to I are inferred and updated from counts corresponding to strings containing Z instead.
511
+
512
+ Returns:
513
+ Dict[str, Dict[str, float]]: The updated Pauli expectations dictionary of dictionaries (projections -> {pauli string: expectation}).
514
+ """
515
+ # Get the individual Pauli expectations for each projection
516
+ for projected_bit_string in projected_counts.keys():
517
+ # Ideally the counts should be labeled by Pauli basis measurement!
518
+ # Here by construction they should be ordered as all_pauli_labels,
519
+ # however, this assumed that measurements never got scrambled (which should not happen anyway).
520
+ counts = projected_counts[projected_bit_string]
521
+ get_exp = lambda label, ct=counts: get_Pauli_expectation(ct, label)
522
+ updates = {nonId_pauli_label: get_exp(nonId_pauli_label)}
523
+
524
+ if nonId_pauli_label == "ZZ":
525
+ updates.update({x: get_exp(x) for x in ["ZI", "IZ", "II"]})
526
+ if nonId_pauli_label[0] == "Z":
527
+ updates[f"I{nonId_pauli_label[1]}"] = get_exp(f"I{nonId_pauli_label[1]}")
528
+ if nonId_pauli_label[1] == "Z":
529
+ updates[f"{nonId_pauli_label[0]}I"] = get_exp(f"{nonId_pauli_label[0]}I")
530
+
531
+ pauli_expectations[projected_bit_string].update(updates)
532
+
533
+ return pauli_expectations
534
+
535
+
536
+ def shadow_tomography_analysis(
537
+ dataset: xr.Dataset,
538
+ all_qubit_pairs_per_group: Dict[int, List[Tuple[int, int]]],
539
+ all_qubit_neighbors_per_group: Dict[int, List[List[int]]],
540
+ all_unprojected_qubits: Dict[int, List[int]],
541
+ backend_name: str,
542
+ execution_timestamp: str,
543
+ ) -> Tuple[Dict[str, Any], List[BenchmarkObservation], Dict[str, Dict[str, str | float]], xr.Dataset]:
544
+ """
545
+ Performs shadow tomography analysis on the given dataset.
546
+
547
+ Args:
548
+ dataset (xr.Dataset): The dataset containing the experimental data.
549
+ all_qubit_pairs_per_group (Dict[int, List[Tuple[int, int]]]): Dictionary mapping group indices to lists of qubit pairs.
550
+ all_qubit_neighbors_per_group (Dict[int, List[List[int]]]): Dictionary mapping group indices to lists of neighbor qubit groups.
551
+ all_unprojected_qubits (Dict[int, List[int]]): Dictionary mapping group indices to lists of unprojected qubits.
552
+ backend_name (str): The name of the backend used for the experiment.
553
+ execution_timestamp (str): The timestamp of the experiment execution.
554
+
555
+ Returns:
556
+ Tuple[Dict[str, Any], List[BenchmarkObservation], Dict[str, Dict[str, str | float]], xr.Dataset]:
557
+ A tuple containing:
558
+ - A dictionary of plots.
559
+ - A list of benchmark observations.
560
+ - A dictionary of maximum negativities.
561
+ - The updated dataset.
562
+ """
563
+ plots: Dict[str, Any] = {}
564
+ observations: List[BenchmarkObservation] = []
565
+ max_negativities: Dict[str, Dict[str, str | float]] = {}
566
+
567
+ execution_results = {}
568
+
569
+ num_RMs = dataset.attrs["n_random_unitaries"]
570
+ num_MoMs = dataset.attrs["n_median_of_means"]
571
+
572
+ qcvv_logger.info("Fetching Clifford dictionary")
573
+ clifford_1q_dict = import_native_gate_cliffords("1q")
574
+ all_unitaries = dataset.attrs["all_unitaries"]
575
+
576
+ shadows_per_projection: Dict[str, Dict[int, Dict[str, List[np.ndarray]]]] = {}
577
+ # shadows_per_projection: qubit_pair -> MoMs -> {Projection, List of shadows}
578
+ MoMs_shadows: Dict[str, Dict[str, np.ndarray]] = {}
579
+ # MoMs_shadows: qubit_pair -> {Projection: MoMs shadow}
580
+ average_shadows_per_projection: Dict[str, Dict[int, Dict[str, np.ndarray]]] = {}
581
+ # average_shadows_per_projection: qubit_pair -> MoMs -> {Projection: shadows}
582
+ all_negativities: Dict[str, Dict[int, Dict[str, float]]] = {}
583
+ # all_negativities: qubit_pair -> MoMs -> {Projection: Negativity}
584
+ MoMs_negativities: Dict[str, Dict[str, Dict[str, float]]] = {}
585
+ for group_idx, group in all_qubit_pairs_per_group.items():
586
+ qcvv_logger.info(f"Retrieving shadows for qubit-pair group {group_idx+1}/{len(all_qubit_pairs_per_group)}")
587
+ # Assume only pairs and nearest-neighbors were measured, and each pair in the group uses num_RMs randomized measurements:
588
+ execution_results[group_idx] = xrvariable_to_counts(
589
+ dataset, str(all_unprojected_qubits[group_idx]), num_RMs * num_MoMs * len(group)
590
+ )
591
+
592
+ partitioned_counts_MoMs_RMs = split_sequence_in_chunks(execution_results[group_idx], num_RMs * num_MoMs)
593
+ partitioned_counts_RMs = {}
594
+
595
+ for pair_idx, qubit_pair in enumerate(group):
596
+ all_negativities[str(qubit_pair)] = {}
597
+ MoMs_negativities[str(qubit_pair)] = {}
598
+ shadows_per_projection[str(qubit_pair)] = {}
599
+ average_shadows_per_projection[str(qubit_pair)] = {}
600
+
601
+ partitioned_counts_RMs[pair_idx] = split_sequence_in_chunks(partitioned_counts_MoMs_RMs[pair_idx], num_RMs)
602
+
603
+ # Get the neighbor qubits of qubit_pair
604
+ neighbor_qubits = all_qubit_neighbors_per_group[group_idx][pair_idx]
605
+ neighbor_bit_strings_length = len(neighbor_qubits)
606
+ # Generate all possible projection bitstrings for the neighbors, {'0','1'}^{\otimes{N}}
607
+ all_projection_bit_strings = [
608
+ "".join(x) for x in itertools.product(("0", "1"), repeat=neighbor_bit_strings_length)
609
+ ]
610
+
611
+ for MoMs in range(num_MoMs):
612
+ qcvv_logger.info(
613
+ f"Now on qubit pair {qubit_pair} ({pair_idx+1}/{len(group)}) and median of means sample {MoMs+1}/{num_MoMs}"
614
+ )
615
+
616
+ # Get all shadows of qubit_pair
617
+ shadows_per_projection[str(qubit_pair)][MoMs] = {
618
+ projection: [] for projection in all_projection_bit_strings
619
+ }
620
+ for RM_idx, counts in enumerate(partitioned_counts_RMs[pair_idx][MoMs]):
621
+ # Retrieve both Cliffords (i.e. for each qubit)
622
+ cliffords_rm = [all_unitaries[group_idx][MoMs][str(q)][RM_idx] for q in qubit_pair]
623
+ # Organize counts by projection
624
+ # e.g. counts ~ {'000 00': 31, '000 01': 31, '000 10': 38, '000 11': 41, '001 00': 28, '001 01': 33,
625
+ # '001 10': 31, '001 11': 37, '010 00': 29, '010 01': 32, '010 10': 31, '010 11': 25,
626
+ # '011 00': 36, '011 01': 24, '011 10': 33, '011 11': 32, '100 00': 22, '100 01': 38,
627
+ # '100 10': 34, '100 11': 26, '101 00': 26, '101 01': 26, '101 10': 37, '101 11': 30,
628
+ # '110 00': 36, '110 01': 35, '110 10': 31, '110 11': 35, '111 00': 31, '111 01': 32,
629
+ # '111 10': 37, '111 11': 36}
630
+ # organize to projected_counts['000'] ~ {'00': 31, '01': 31, '10': 38, '11': 41},
631
+ # projected_counts['001'] ~ {'00': 28, '01': 33, '10': 31, '11': 37}
632
+ # ...
633
+ projected_counts = {
634
+ projection: {
635
+ b_s[-2:]: b_c
636
+ for b_s, b_c in counts.items()
637
+ if b_s[:neighbor_bit_strings_length] == projection
638
+ }
639
+ for projection in all_projection_bit_strings
640
+ }
641
+
642
+ # Get the individual shadow for each projection
643
+ for projected_bit_string in all_projection_bit_strings:
644
+ shadows_per_projection[str(qubit_pair)][MoMs][projected_bit_string].append(
645
+ get_local_shadow(
646
+ counts=projected_counts[projected_bit_string],
647
+ unitary_arg=cliffords_rm,
648
+ subsystem_bit_indices=list(range(2)),
649
+ clifford_or_haar="clifford",
650
+ cliffords_1q=clifford_1q_dict,
651
+ )
652
+ )
653
+
654
+ # Average the shadows for each projection and MoMs sample
655
+ average_shadows_per_projection[str(qubit_pair)][MoMs] = {
656
+ projected_bit_string: np.mean(
657
+ shadows_per_projection[str(qubit_pair)][MoMs][projected_bit_string], axis=0
658
+ )
659
+ for projected_bit_string in all_projection_bit_strings
660
+ }
661
+
662
+ # Compute the negativity of the shadow of each projection
663
+ qcvv_logger.info(
664
+ f"Computing the negativity of all shadow projections for qubit pair {qubit_pair} ({pair_idx+1}/{len(group)}) and median of means sample {MoMs+1}/{num_MoMs}"
665
+ )
666
+ all_negativities[str(qubit_pair)][MoMs] = {
667
+ projected_bit_string: get_negativity(
668
+ average_shadows_per_projection[str(qubit_pair)][MoMs][projected_bit_string], 1, 1
669
+ )
670
+ for projected_bit_string in all_projection_bit_strings
671
+ }
672
+
673
+ MoMs_negativities[str(qubit_pair)] = {
674
+ projected_bit_string: median_with_uncertainty(
675
+ [all_negativities[str(qubit_pair)][m][projected_bit_string] for m in range(num_MoMs)]
676
+ )
677
+ for projected_bit_string in all_projection_bit_strings
678
+ }
679
+
680
+ MoMs_shadows[str(qubit_pair)] = {
681
+ projected_bit_string: np.median(
682
+ [average_shadows_per_projection[str(qubit_pair)][m][projected_bit_string] for m in range(num_MoMs)],
683
+ axis=0,
684
+ )
685
+ for projected_bit_string in all_projection_bit_strings
686
+ }
687
+
688
+ all_negativities_list = [
689
+ MoMs_negativities[str(qubit_pair)][projected_bit_string]["value"]
690
+ for projected_bit_string in all_projection_bit_strings
691
+ ]
692
+ all_negativities_uncertainty = [
693
+ MoMs_negativities[str(qubit_pair)][projected_bit_string]["uncertainty"]
694
+ for projected_bit_string in all_projection_bit_strings
695
+ ]
696
+
697
+ max_negativity_projection = np.argmax(all_negativities_list)
698
+
699
+ max_negativity = {
700
+ "value": all_negativities_list[max_negativity_projection],
701
+ "uncertainty": all_negativities_uncertainty[max_negativity_projection],
702
+ }
703
+
704
+ max_negativities[str(qubit_pair)] = {} # {str(qubit_pair): {"negativity": float, "projection": str}}
705
+ max_negativities[str(qubit_pair)].update(
706
+ {
707
+ "projection": all_projection_bit_strings[max_negativity_projection],
708
+ }
709
+ )
710
+ max_negativities[str(qubit_pair)].update(max_negativity)
711
+
712
+ fig_name, fig = plot_density_matrix(
713
+ matrix=MoMs_shadows[str(qubit_pair)][all_projection_bit_strings[max_negativity_projection]],
714
+ qubit_pair=qubit_pair,
715
+ projection=all_projection_bit_strings[max_negativity_projection],
716
+ negativity=max_negativity,
717
+ backend_name=backend_name,
718
+ timestamp=execution_timestamp,
719
+ tomography="shadow_tomography",
720
+ num_RM_samples=num_RMs,
721
+ num_MoMs_samples=num_MoMs,
722
+ )
723
+ plots[fig_name] = fig
724
+
725
+ observations.extend(
726
+ [
727
+ BenchmarkObservation(
728
+ name="max_negativity",
729
+ value=max_negativity["value"],
730
+ uncertainty=max_negativity["uncertainty"],
731
+ identifier=BenchmarkObservationIdentifier(qubit_pair),
732
+ )
733
+ ]
734
+ )
735
+
736
+ dataset.attrs.update(
737
+ {
738
+ "median_of_means_shadows": MoMs_shadows,
739
+ "median_of_means_negativities": MoMs_negativities,
740
+ "all_negativities": all_negativities,
741
+ "all_shadows": shadows_per_projection,
742
+ }
743
+ )
744
+
745
+ return plots, observations, max_negativities, dataset
746
+
747
+
748
+ def state_tomography_analysis(
749
+ dataset: xr.Dataset,
750
+ all_qubit_pairs_per_group: Dict[int, List[Tuple[int, int]]],
751
+ all_qubit_neighbors_per_group: Dict[int, List[List[int]]],
752
+ all_unprojected_qubits: Dict[int, List[int]],
753
+ backend_name: str,
754
+ execution_timestamp: str,
755
+ ) -> Tuple[Dict[str, Any], List[BenchmarkObservation], Dict[str, Dict[str, str | float]], xr.Dataset]:
756
+ """
757
+ Performs state tomography analysis on the given dataset.
758
+
759
+ Args:
760
+ dataset (xr.Dataset): The dataset containing the experimental data.
761
+ all_qubit_pairs_per_group (Dict[int, List[Tuple[int, int]]]): Dictionary mapping group indices to lists of qubit pairs.
762
+ all_qubit_neighbors_per_group (Dict[int, List[List[int]]]): Dictionary mapping group indices to lists of neighbor qubit groups.
763
+ all_unprojected_qubits (Dict[int, List[int]]): Dictionary mapping group indices to lists of unprojected qubits.
764
+ backend_name (str): The name of the backend used for the experiment.
765
+ execution_timestamp (str): The timestamp of the experiment execution.
766
+
767
+ Returns:
768
+ Tuple[Dict[str, Any], List[BenchmarkObservation], Dict[str, Dict[str, float]], xr.Dataset]:
769
+ A tuple containing:
770
+ - A dictionary of plots.
771
+ - A list of benchmark observations.
772
+ - A dictionary of maximum negativities.
773
+ - The updated dataset.
774
+ """
775
+ plots: Dict[str, Any] = {}
776
+ observations: List[BenchmarkObservation] = []
777
+ max_negativities: Dict[str, Dict[str, str | float]] = {}
778
+
779
+ execution_results = {}
780
+
781
+ num_bootstraps = dataset.attrs["num_bootstraps"]
782
+
783
+ tomography_state: Dict[int, Dict[str, Dict[str, np.ndarray]]] = {}
784
+ # tomography_state: group_idx -> qubit_pair -> {projection:numpy array}
785
+ bootstrapped_states: Dict[int, Dict[str, List[np.ndarray]]] = {}
786
+ # bootstrapped_states: group_idx -> qubit_pair -> List of bootstrapped states for max_neg_projection
787
+ tomography_negativities: Dict[int, Dict[str, Dict[str, float]]] = {}
788
+ bootstrapped_negativities: Dict[int, Dict[str, List[float]]] = {}
789
+ bootstrapped_avg_negativities: Dict[int, Dict[str, Dict[str, float]]] = {}
790
+ num_tomo_samples = 3**2 # In general 3**n samples suffice (assuming trace-preservation and unitality for the Pauli measurements)
791
+ for group_idx, group in all_qubit_pairs_per_group.items():
792
+ qcvv_logger.info(
793
+ f"Retrieving tomography-reconstructed states with {num_bootstraps} for qubit-pair group {group_idx+1}/{len(all_qubit_pairs_per_group)}"
794
+ )
795
+
796
+ # Assume only pairs and nearest-neighbors were measured, and each pair in the group used num_RMs randomized measurements:
797
+ execution_results[group_idx] = xrvariable_to_counts(
798
+ dataset, str(all_unprojected_qubits[group_idx]), num_tomo_samples * len(group)
799
+ )
800
+
801
+ tomography_state[group_idx] = {}
802
+ bootstrapped_states[group_idx] = {}
803
+ tomography_negativities[group_idx] = {}
804
+ bootstrapped_negativities[group_idx] = {}
805
+ bootstrapped_avg_negativities[group_idx] = {}
806
+
807
+ partitioned_counts = split_sequence_in_chunks(execution_results[group_idx], num_tomo_samples)
808
+
809
+ for pair_idx, qubit_pair in enumerate(group):
810
+ # Get the neighbor qubits of qubit_pair
811
+ neighbor_qubits = all_qubit_neighbors_per_group[group_idx][pair_idx]
812
+ neighbor_bit_strings_length = len(neighbor_qubits)
813
+ # Generate all possible projection bitstrings for the neighbors, {'0','1'}^{\otimes{N}}
814
+ all_projection_bit_strings = [
815
+ "".join(x) for x in itertools.product(("0", "1"), repeat=neighbor_bit_strings_length)
816
+ ]
817
+
818
+ sqg_pauli_strings = ("Z", "X", "Y")
819
+ all_nonId_pauli_labels = ["".join(x) for x in itertools.product(sqg_pauli_strings, repeat=2)]
820
+
821
+ pauli_expectations: Dict[str, Dict[str, float]] = {
822
+ projection: {} for projection in all_projection_bit_strings
823
+ }
824
+ # pauli_expectations: projected_bit_string -> pauli string -> float expectation
825
+ for pauli_idx, counts in enumerate(partitioned_counts[pair_idx]):
826
+ projected_counts = {
827
+ projection: {
828
+ b_s[-2:]: b_c for b_s, b_c in counts.items() if b_s[:neighbor_bit_strings_length] == projection
829
+ }
830
+ for projection in all_projection_bit_strings
831
+ if projection in [c[:neighbor_bit_strings_length] for c in counts.keys()]
832
+ }
833
+
834
+ pauli_expectations = update_pauli_expectations(
835
+ pauli_expectations,
836
+ projected_counts,
837
+ nonId_pauli_label=all_nonId_pauli_labels[pauli_idx],
838
+ )
839
+
840
+ # Remove projections with empty values for pauli_expectations
841
+ # This will happen if certain projection bitstrings were just not measured
842
+ pauli_expectations = {
843
+ projection: expectations for projection, expectations in pauli_expectations.items() if expectations
844
+ }
845
+
846
+ tomography_state[group_idx][str(qubit_pair)] = {
847
+ projection: get_tomography_matrix(pauli_expectations=pauli_expectations[projection])
848
+ for projection in pauli_expectations.keys()
849
+ }
850
+
851
+ tomography_negativities[group_idx][str(qubit_pair)] = {
852
+ projected_bit_string: get_negativity(
853
+ tomography_state[group_idx][str(qubit_pair)][projected_bit_string], 1, 1
854
+ )
855
+ for projected_bit_string in pauli_expectations.keys()
856
+ }
857
+
858
+ # Extract the max negativity and the corresponding projection - save in dictionary
859
+ all_negativities_list = [
860
+ tomography_negativities[group_idx][str(qubit_pair)][projected_bit_string]
861
+ for projected_bit_string in pauli_expectations.keys()
862
+ ]
863
+
864
+ max_negativity_projection_idx = np.argmax(all_negativities_list)
865
+ max_negativity_bitstring = list(pauli_expectations.keys())[max_negativity_projection_idx]
866
+
867
+ # Bootstrapping - do only for max projection bitstring
868
+ bootstrapped_pauli_expectations: List[Dict[str, Dict[str, float]]] = [
869
+ {max_negativity_bitstring: {}} for _ in range(num_bootstraps)
870
+ ]
871
+ for pauli_idx, counts in enumerate(partitioned_counts[pair_idx]):
872
+ projected_counts = {
873
+ b_s[-2:]: b_c
874
+ for b_s, b_c in counts.items()
875
+ if b_s[:neighbor_bit_strings_length] == max_negativity_bitstring
876
+ }
877
+ all_bootstrapped_counts = bootstrap_counts(
878
+ projected_counts, num_bootstraps, include_original_counts=True
879
+ )
880
+ for bootstrap in range(num_bootstraps):
881
+ bootstrapped_pauli_expectations[bootstrap] = update_pauli_expectations(
882
+ bootstrapped_pauli_expectations[bootstrap],
883
+ projected_counts={max_negativity_bitstring: all_bootstrapped_counts[bootstrap]},
884
+ nonId_pauli_label=all_nonId_pauli_labels[pauli_idx],
885
+ )
886
+
887
+ bootstrapped_states[group_idx][str(qubit_pair)] = [
888
+ get_tomography_matrix(
889
+ pauli_expectations=bootstrapped_pauli_expectations[bootstrap][max_negativity_bitstring]
890
+ )
891
+ for bootstrap in range(num_bootstraps)
892
+ ]
893
+
894
+ bootstrapped_negativities[group_idx][str(qubit_pair)] = [
895
+ get_negativity(bootstrapped_states[group_idx][str(qubit_pair)][bootstrap], 1, 1)
896
+ for bootstrap in range(num_bootstraps)
897
+ ]
898
+
899
+ bootstrapped_avg_negativities[group_idx][str(qubit_pair)] = {
900
+ "value": float(np.mean(bootstrapped_negativities[group_idx][str(qubit_pair)])),
901
+ "uncertainty": float(np.std(bootstrapped_negativities[group_idx][str(qubit_pair)])),
902
+ }
903
+
904
+ max_negativity = {
905
+ "value": all_negativities_list[max_negativity_projection_idx],
906
+ "bootstrapped_average": bootstrapped_avg_negativities[group_idx][str(qubit_pair)]["value"],
907
+ "uncertainty": bootstrapped_avg_negativities[group_idx][str(qubit_pair)]["uncertainty"],
908
+ }
909
+
910
+ max_negativities[str(qubit_pair)] = {} # {str(qubit_pair): {"negativity": float, "projection": str}}
911
+ max_negativities[str(qubit_pair)].update(
912
+ {
913
+ "projection": max_negativity_bitstring,
914
+ }
915
+ )
916
+ max_negativities[str(qubit_pair)].update(max_negativity)
917
+
918
+ fig_name, fig = plot_density_matrix(
919
+ matrix=tomography_state[group_idx][str(qubit_pair)][max_negativity_bitstring],
920
+ qubit_pair=qubit_pair,
921
+ projection=max_negativity_bitstring,
922
+ negativity=max_negativity,
923
+ backend_name=backend_name,
924
+ timestamp=execution_timestamp,
925
+ tomography="state_tomography",
926
+ )
927
+ plots[fig_name] = fig
928
+
929
+ observations.extend(
930
+ [
931
+ BenchmarkObservation(
932
+ name="max_negativity",
933
+ value=max_negativity["value"],
934
+ uncertainty=max_negativity["uncertainty"],
935
+ identifier=BenchmarkObservationIdentifier(qubit_pair),
936
+ )
937
+ ]
938
+ )
939
+
940
+ dataset.attrs.update(
941
+ {
942
+ "all_tomography_states": tomography_state,
943
+ "all_negativities": tomography_negativities,
944
+ }
945
+ )
946
+
947
+ return plots, observations, max_negativities, dataset
948
+
949
+
950
+ def negativity_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
951
+ """Analysis function for a Graph State benchmark experiment."""
952
+ qcvv_logger.info("Fetching dataset")
953
+ dataset = run.dataset.copy(deep=True)
954
+ qcvv_logger.info("Dataset imported OK")
955
+ backend_name = dataset.attrs["backend_name"]
956
+ coupling_map = dataset.attrs["coupling_map"]
957
+ qubit_names = dataset.attrs["qubit_names"]
958
+ execution_timestamp = dataset.attrs["execution_timestamp"]
959
+ tomography = dataset.attrs["tomography"]
960
+ num_bootstraps = dataset.attrs["num_bootstraps"]
961
+ num_RMs = dataset.attrs["n_random_unitaries"]
962
+ num_MoMs = dataset.attrs["n_median_of_means"]
963
+ num_shots = dataset.attrs["shots"]
964
+
965
+ all_qubit_pairs_per_group = dataset.attrs["all_pair_groups"]
966
+ all_qubit_neighbors_per_group = dataset.attrs["all_neighbor_groups"]
967
+ all_unprojected_qubits = dataset.attrs["all_unprojected_qubits"]
968
+
969
+ if tomography == "shadow_tomography":
970
+ plots, observations, max_negativities, dataset = shadow_tomography_analysis(
971
+ dataset,
972
+ all_qubit_pairs_per_group,
973
+ all_qubit_neighbors_per_group,
974
+ all_unprojected_qubits,
975
+ backend_name,
976
+ execution_timestamp,
977
+ )
978
+ else:
979
+ plots, observations, max_negativities, dataset = state_tomography_analysis(
980
+ dataset,
981
+ all_qubit_pairs_per_group,
982
+ all_qubit_neighbors_per_group,
983
+ all_unprojected_qubits,
984
+ backend_name,
985
+ execution_timestamp,
986
+ )
987
+
988
+ dataset.attrs.update({"max_negativities": max_negativities})
989
+
990
+ fig_name, fig = plot_max_negativities(
991
+ negativities=max_negativities,
992
+ backend_name=backend_name,
993
+ qubit_names=qubit_names,
994
+ timestamp=execution_timestamp,
995
+ tomography=tomography,
996
+ num_shots=num_shots,
997
+ num_bootstraps=num_bootstraps,
998
+ num_RM_samples=num_RMs,
999
+ num_MoMs_samples=num_MoMs,
1000
+ )
1001
+ plots[fig_name] = fig
1002
+
1003
+ fig_name, fig = plot_max_negativities_graph(
1004
+ negativities=max_negativities,
1005
+ backend_coupling_map=coupling_map,
1006
+ qubit_names=qubit_names,
1007
+ timestamp=execution_timestamp,
1008
+ tomography=tomography,
1009
+ num_shots=num_shots,
1010
+ num_bootstraps=num_bootstraps,
1011
+ num_RM_samples=num_RMs,
1012
+ num_MoMs_samples=num_MoMs,
1013
+ )
1014
+ plots[fig_name] = fig
1015
+
1016
+ qcvv_logger.info("Analysis of Graph State Benchmark experiment concluded!")
1017
+
1018
+ return BenchmarkAnalysisResult(dataset=dataset, plots=plots, observations=observations)
1019
+
1020
+
1021
+ class GraphStateBenchmark(Benchmark):
1022
+ """The Graph States benchmark estimates the bipartite entangelement negativity of native graph states."""
1023
+
1024
+ analysis_function = staticmethod(negativity_analysis)
1025
+ name = "graph_states"
1026
+
1027
+ def __init__(self, backend_arg: IQMBackendBase, configuration: "GraphStateConfiguration"):
1028
+ """Construct the GraphStateBenchmark class.
1029
+
1030
+ Args:
1031
+ backend_arg (IQMBackendBase): the backend to execute the benchmark on
1032
+ configuration (GraphStateConfiguration): the configuration of the benchmark
1033
+ """
1034
+ super().__init__(backend_arg, configuration)
1035
+
1036
+ self.backend_configuration_name = backend_arg if isinstance(backend_arg, str) else backend_arg.name
1037
+
1038
+ self.qubits = configuration.qubits
1039
+ self.tomography = configuration.tomography
1040
+
1041
+ self.num_bootstraps = configuration.num_bootstraps
1042
+ self.n_random_unitaries = configuration.n_random_unitaries
1043
+ self.n_median_of_means = configuration.n_median_of_means
1044
+
1045
+ # Initialize relevant variables for the benchmark
1046
+ self.graph_state_circuit = generate_graph_state(self.qubits, self.backend)
1047
+ self.coupling_map = set_coupling_map(self.qubits, self.backend, physical_layout="fixed")
1048
+
1049
+ # Initialize the variable to contain the benchmark circuits of each layout
1050
+ self.circuits = Circuits()
1051
+ self.untranspiled_circuits = BenchmarkCircuit(name="untranspiled_circuits")
1052
+ self.transpiled_circuits = BenchmarkCircuit(name="transpiled_circuits")
1053
+
1054
+ self.session_timestamp = strftime("%Y%m%d-%H%M%S")
1055
+ self.execution_timestamp = ""
1056
+
1057
+ def add_all_meta_to_dataset(self, dataset: xr.Dataset):
1058
+ """Adds all configuration metadata and circuits to the dataset variable
1059
+
1060
+ Args:
1061
+ dataset (xr.Dataset): The xarray dataset
1062
+ """
1063
+ dataset.attrs["session_timestamp"] = self.session_timestamp
1064
+ dataset.attrs["execution_timestamp"] = self.execution_timestamp
1065
+ dataset.attrs["backend_configuration_name"] = self.backend_configuration_name
1066
+ dataset.attrs["backend_name"] = self.backend.name
1067
+ dataset.attrs["qubit_names"] = {qubit: self.backend.index_to_qubit_name(qubit) for qubit in self.qubits}
1068
+ dataset.attrs["coupling_map"] = self.coupling_map
1069
+
1070
+ for key, value in self.configuration:
1071
+ if key == "benchmark": # Avoid saving the class object
1072
+ dataset.attrs[key] = value.name
1073
+ else:
1074
+ dataset.attrs[key] = value
1075
+ # Defined outside configuration - if any
1076
+
1077
+ @timeit
1078
+ def add_all_circuits_to_dataset(self, dataset: xr.Dataset):
1079
+ """Adds all generated circuits during execution to the dataset variable
1080
+
1081
+ Args:
1082
+ dataset (xr.Dataset): The xarray dataset
1083
+
1084
+ Returns:
1085
+
1086
+ """
1087
+ qcvv_logger.info(f"Adding all circuits to the dataset")
1088
+ for key, circuit in zip(
1089
+ ["transpiled_circuits", "untranspiled_circuits"], [self.transpiled_circuits, self.untranspiled_circuits]
1090
+ ):
1091
+ dictionary = {}
1092
+ for outer_key, outer_value in circuit.items():
1093
+ dictionary[str(outer_key)] = {
1094
+ str(inner_key): inner_values for inner_key, inner_values in outer_value.items()
1095
+ }
1096
+ dataset.attrs[key] = dictionary
1097
+
1098
+ @timeit
1099
+ def generate_all_circuit_info_for_graph_state_benchmark(self) -> Dict[str, Any]:
1100
+ """
1101
+ Generates all circuits and associated information for the Graph State benchmark:
1102
+ - Generates native graph states
1103
+ - Identifies all pairs of qubits with disjoint neighbors
1104
+ - Generates all projected nodes to cover all pairs of qubits with disjoint neighbors
1105
+
1106
+ Returns:
1107
+ Dict[str, Any]: A dictionary containing all circuit information for the Graph State benchmark.
1108
+
1109
+ """
1110
+ layout_mapping = {
1111
+ a._index: b # pylint: disable=W0212
1112
+ for a, b in self.graph_state_circuit.layout.initial_layout.get_virtual_bits().items()
1113
+ if b in self.qubits
1114
+ }
1115
+
1116
+ # Get unique list of edges - Use layout_mapping to determine the connections between phyical qubits
1117
+ graph_edges = [
1118
+ (layout_mapping[e[0]], layout_mapping[e[1]])
1119
+ for e in list(self.coupling_map.graph.to_undirected(multigraph=False).edge_list())
1120
+ ]
1121
+
1122
+ # Find pairs of nodes with disjoint neighbors
1123
+ # {idx: [(q1,q2), (q3,q4), ...]}
1124
+ pair_groups = find_edges_with_disjoint_neighbors(graph_edges)
1125
+ # {idx: [(n11,n12,n13,...), (n21,n22,n23,...), ...]}
1126
+ neighbor_groups = {
1127
+ idx: [get_neighbors_of_edges([y], graph_edges) for y in x] for idx, x in enumerate(pair_groups)
1128
+ }
1129
+
1130
+ # Get all projected nodes to cover all pairs of qubits with disjoint neighbours
1131
+ # {idx: [q1,q2,q3,q4, ...]}
1132
+ unmeasured_qubit_indices = {idx: [a for b in x for a in b] for idx, x in enumerate(pair_groups)}
1133
+ # {idx: [n11,n12,n13,...,n21,n22,n23, ...]}
1134
+ projected_nodes = {idx: get_neighbors_of_edges(list(x), graph_edges) for idx, x in enumerate(pair_groups)}
1135
+
1136
+ # Generate copies of circuits to add projections and randomized measurements
1137
+ grouped_graph_circuits = {idx: self.graph_state_circuit.copy() for idx in projected_nodes.keys()}
1138
+
1139
+ return {
1140
+ "grouped_graph_circuits": grouped_graph_circuits,
1141
+ "unmeasured_qubit_indices": unmeasured_qubit_indices,
1142
+ "projected_nodes": projected_nodes,
1143
+ "pair_groups": dict(enumerate(pair_groups)),
1144
+ "neighbor_groups": neighbor_groups,
1145
+ }
1146
+
1147
+ def execute(self, backend) -> xr.Dataset: # pylint: disable=too-many-statements
1148
+ """
1149
+ Executes the benchmark.
1150
+ """
1151
+ self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
1152
+
1153
+ dataset = xr.Dataset()
1154
+ self.add_all_meta_to_dataset(dataset)
1155
+
1156
+ # Routine to generate all
1157
+ qcvv_logger.info(f"Identifying qubit pairs and neighbor groups for the Graph State benchmark")
1158
+ graph_benchmark_circuit_info, time_circuit_generation = (
1159
+ self.generate_all_circuit_info_for_graph_state_benchmark()
1160
+ )
1161
+ dataset.attrs.update({"time_circuit_generation": time_circuit_generation})
1162
+
1163
+ # pylint: disable=invalid-sequence-index
1164
+ grouped_graph_circuits: Dict[int, QuantumCircuit] = graph_benchmark_circuit_info["grouped_graph_circuits"]
1165
+ unprojected_qubits = graph_benchmark_circuit_info["unmeasured_qubit_indices"]
1166
+ neighbor_qubits = graph_benchmark_circuit_info["projected_nodes"]
1167
+ pair_groups = graph_benchmark_circuit_info["pair_groups"]
1168
+ neighbor_groups = graph_benchmark_circuit_info["neighbor_groups"]
1169
+ # pylint: enable=invalid-sequence-index
1170
+
1171
+ dataset.attrs.update(
1172
+ {
1173
+ "all_unprojected_qubits": unprojected_qubits,
1174
+ "all_projected_qubits": neighbor_qubits,
1175
+ "all_pair_groups": pair_groups,
1176
+ "all_neighbor_groups": neighbor_groups,
1177
+ }
1178
+ )
1179
+
1180
+ circuits_untranspiled: Dict[int, List[QuantumCircuit]] = {}
1181
+ circuits_transpiled: Dict[int, List[QuantumCircuit]] = {}
1182
+
1183
+ time_circuits = {}
1184
+ time_transpilation = {}
1185
+ all_graph_submit_results = []
1186
+
1187
+ if self.tomography == "shadow_tomography":
1188
+ clifford_1q_dict = import_native_gate_cliffords("1q")
1189
+
1190
+ qcvv_logger.info(f"Performing {self.tomography.replace('_',' ')} of all qubit pairs")
1191
+
1192
+ all_unitaries: Dict[int, Dict[int, Dict[str, List[str]]]] = {}
1193
+ # all_unitaries: group_idx -> MoMs -> projection -> List[Clifford labels]
1194
+ # Will be empty if state_tomography -> assign Clifford labels in analysis
1195
+ for idx, circuit in grouped_graph_circuits.items():
1196
+ # It is not clear now that grouping is needed,
1197
+ # since it seems like pairs must be measured one at a time
1198
+ # (marginalizing any other qubits gives maximally mixed states)
1199
+ # however, the same structure is used in case this can still somehow be parallelized
1200
+ qcvv_logger.info(f"Now on group {idx + 1}/{len(grouped_graph_circuits)}")
1201
+ if self.tomography == "shadow_tomography":
1202
+ # Outer loop for each mean to be considered for Median of Means (MoMs) estimators
1203
+ all_unitaries[idx] = {m: {} for m in range(self.n_median_of_means)}
1204
+ circuits_untranspiled[idx] = []
1205
+ circuits_transpiled[idx] = []
1206
+ time_circuits[idx] = 0
1207
+ time_transpilation[idx] = 0
1208
+ for qubit_pair, neighbors in zip(pair_groups[idx], neighbor_groups[idx]):
1209
+ RM_circuits_untranspiled_MoMs = []
1210
+ RM_circuits_transpiled_MoMs = []
1211
+ time_circuits_MoMs = 0
1212
+ for MoMs in range(self.n_median_of_means):
1213
+ # Go though each pair and only project neighbors
1214
+ # all_unitaries[idx][MoMs] = {}
1215
+ qcvv_logger.info(
1216
+ f"Now on qubit pair {qubit_pair} and neighbors {neighbors} for Median of Means sample {MoMs + 1}/{self.n_median_of_means}"
1217
+ )
1218
+ (unitaries_single_pair, rm_circuits_untranspiled_single_pair), time_rm_circuits_single_pair = (
1219
+ local_shadow_tomography(
1220
+ qc=circuit,
1221
+ Nu=self.n_random_unitaries,
1222
+ active_qubits=qubit_pair,
1223
+ measure_other=neighbors,
1224
+ measure_other_name="neighbors",
1225
+ clifford_or_haar="clifford",
1226
+ cliffords_1q=clifford_1q_dict,
1227
+ )
1228
+ )
1229
+
1230
+ all_unitaries[idx][MoMs].update(unitaries_single_pair)
1231
+ RM_circuits_untranspiled_MoMs.extend(rm_circuits_untranspiled_single_pair)
1232
+ # When using a Clifford dictionary, both the graph state and the RMs are generated natively
1233
+ RM_circuits_transpiled_MoMs.extend(rm_circuits_untranspiled_single_pair)
1234
+ time_circuits_MoMs += time_rm_circuits_single_pair
1235
+
1236
+ self.transpiled_circuits.circuit_groups.append(
1237
+ CircuitGroup(name=str(qubit_pair), circuits=rm_circuits_untranspiled_single_pair)
1238
+ )
1239
+
1240
+ time_circuits[idx] += time_circuits_MoMs
1241
+ circuits_untranspiled[idx].extend(RM_circuits_untranspiled_MoMs)
1242
+ circuits_transpiled[idx].extend(RM_circuits_transpiled_MoMs)
1243
+
1244
+ dataset.attrs.update({"all_unitaries": all_unitaries})
1245
+ else: # if self.tomography == "state_tomography" (default)
1246
+ circuits_untranspiled[idx] = []
1247
+ circuits_transpiled[idx] = []
1248
+ time_circuits[idx] = 0
1249
+ time_transpilation[idx] = 0
1250
+ for qubit_pair, neighbors in zip(pair_groups[idx], neighbor_groups[idx]):
1251
+ qcvv_logger.info(f"Now on qubit pair {qubit_pair} and neighbors {neighbors}")
1252
+ state_tomography_circuits, time_state_tomo_circuits_single_pair = (
1253
+ generate_state_tomography_circuits(
1254
+ qc=circuit,
1255
+ active_qubits=qubit_pair,
1256
+ measure_other=neighbors,
1257
+ measure_other_name="neighbors",
1258
+ native=True,
1259
+ )
1260
+ )
1261
+
1262
+ self.transpiled_circuits.circuit_groups.append(
1263
+ CircuitGroup(
1264
+ name=str(qubit_pair), circuits=list(cast(dict, state_tomography_circuits).values())
1265
+ )
1266
+ )
1267
+ time_circuits[idx] += time_state_tomo_circuits_single_pair
1268
+ circuits_untranspiled[idx].extend(cast(dict, state_tomography_circuits).values())
1269
+ # When using a native gates in tomo step, both the graph state and the RMs are generated natively
1270
+ circuits_transpiled[idx].extend(cast(dict, state_tomography_circuits).values())
1271
+
1272
+ # Submit for execution in backend - submit all per pair group, irrespective of tomography procedure.
1273
+ # A whole group is considered as a single batch.
1274
+ # Jobs will only be split in separate submissions if there are batch size limitations (retrieval will occur per batch).
1275
+ # It shouldn't be a problem [anymore] that different qubits are being measured in a single batch.
1276
+ # Post-processing will take care of separating MoMs samples and identifying all unitary (Clifford) labels.
1277
+ sorted_transpiled_qc_list = {tuple(unprojected_qubits[idx]): circuits_transpiled[idx]}
1278
+ graph_jobs, time_submit = submit_execute(
1279
+ sorted_transpiled_qc_list, backend, self.shots, self.calset_id, self.max_gates_per_batch
1280
+ )
1281
+
1282
+ all_graph_submit_results.append(
1283
+ {
1284
+ "unprojected_qubits": unprojected_qubits[idx],
1285
+ "neighbor_qubits": neighbor_qubits[idx],
1286
+ "jobs": graph_jobs,
1287
+ "time_submit": time_submit,
1288
+ }
1289
+ )
1290
+
1291
+ # Retrieve all counts and add to dataset
1292
+ for job_idx, job_dict in enumerate(all_graph_submit_results):
1293
+ unprojected_qubits = job_dict["unprojected_qubits"]
1294
+ # Retrieve counts
1295
+ execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], identifier=str(unprojected_qubits))
1296
+
1297
+ # Retrieve all job meta data
1298
+ all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
1299
+
1300
+ # Export all to dataset
1301
+ dataset.attrs.update(
1302
+ {
1303
+ job_idx: {
1304
+ "time_circuits": time_circuits[job_idx],
1305
+ "time_transpilation": time_transpilation[job_idx],
1306
+ "time_submit": job_dict["time_submit"],
1307
+ "time_retrieve": time_retrieve,
1308
+ "all_job_metadata": all_job_metadata,
1309
+ }
1310
+ }
1311
+ )
1312
+
1313
+ qcvv_logger.info(f"Adding counts of qubit pairs {unprojected_qubits} to the dataset")
1314
+ dataset, _ = add_counts_to_dataset(execution_results, str(unprojected_qubits), dataset)
1315
+
1316
+ self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
1317
+
1318
+ # if self.rem: TODO: add REM functionality
1319
+
1320
+ qcvv_logger.info(f"Graph State benchmark experiment execution concluded !")
1321
+
1322
+ return dataset
1323
+
1324
+
1325
+ class GraphStateConfiguration(BenchmarkConfigurationBase):
1326
+ """Graph States Benchmark configuration.
1327
+
1328
+ Attributes:
1329
+ benchmark (Type[Benchmark]): GraphStateBenchmark
1330
+ qubits (Sequence[int]): The physical qubit layout in which to benchmark graph state generation.
1331
+ tomography (Literal["state_tomography", "shadow_tomography"]): Whether to use state or shadow tomography.
1332
+ * Default is "state_tomography".
1333
+ num_bootstraps (int): The amount of bootstrap samples to use with state tomography.
1334
+ * Default is 50.
1335
+ n_random_unitaries (int): The number of Haar random single-qubit unitaries to use for (local) shadow tomography.
1336
+ * Default is 100.
1337
+ n_median_of_means(int): The number of mean samples over n_random_unitaries to generate a median of means estimator for shadow tomography.
1338
+ * NB: The total amount of execution calls will be a multiplicative factor of n_random_unitaries x n_median_of_means.
1339
+ * Default is 1 (no median of means).
1340
+
1341
+ """
1342
+
1343
+ benchmark: Type[Benchmark] = GraphStateBenchmark
1344
+ qubits: Sequence[int]
1345
+ tomography: Literal["state_tomography", "shadow_tomography"] = "state_tomography"
1346
+ num_bootstraps: int = 50
1347
+ n_random_unitaries: int = 100
1348
+ n_median_of_means: int = 1