iqm-benchmarks 2.44__py3-none-any.whl → 2.45__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,17 +3,20 @@ Data analysis code for compressive gate set tomography
3
3
  """
4
4
 
5
5
  import ast
6
- from itertools import product
6
+ import multiprocessing as mp
7
7
  from time import perf_counter
8
8
  from typing import Any, List, Tuple, Union
9
9
 
10
10
  from matplotlib.figure import Figure
11
11
  import matplotlib.pyplot as plt
12
- from matplotlib.transforms import Bbox
13
12
  from numpy import ndarray
14
13
  import numpy as np
15
14
  from pandas import DataFrame
15
+ import pandas as pd
16
+ import psutil
16
17
  from pygsti.models.model import Model
18
+ from tqdm import tqdm, trange
19
+ from tqdm.contrib.logging import logging_redirect_tqdm
17
20
  import xarray as xr
18
21
 
19
22
  from iqm.benchmarks.benchmark_definition import (
@@ -22,59 +25,86 @@ from iqm.benchmarks.benchmark_definition import (
22
25
  BenchmarkObservationIdentifier,
23
26
  BenchmarkRunResult,
24
27
  )
28
+ from iqm.benchmarks.logging_config import qcvv_logger
25
29
  from mGST import additional_fns, algorithm, compatibility
26
30
  from mGST.low_level_jit import contract
27
31
  from mGST.qiskit_interface import qiskit_gate_to_operator
28
32
  from mGST.reporting import figure_gen, reporting
29
33
 
30
34
 
31
- def dataframe_to_figure(
32
- df: DataFrame, row_labels: Union[List[str], None] = None, col_width: float = 2, fontsize: int = 12
33
- ) -> Figure:
34
- """Turns a pandas DataFrame into a figure
35
- This is needed to conform with the standard file saving routine of QCVV.
35
+ def process_bootstrap_samples(
36
+ y_sampled: ndarray, attrs: dict[str, Any], init: list[ndarray], target_mdl: Model, identifier: str
37
+ ) -> tuple[ndarray, ndarray, ndarray, ndarray, ndarray, bool]:
38
+ """Process a single bootstrap sample for Gate Set Tomography.
39
+
40
+ This function performs a GST analysis on a sampled dataset, applies gauge optimization,
41
+ and generates result reports.
36
42
 
37
43
  Args:
38
- df: Pandas DataFrame
39
- A dataframe table containing GST results
40
- row_labels: List[str]
41
- The row labels for the dataframe
42
- col_width: int
43
- Used to control cell width in the table
44
- fontsize: int
45
- Font size of text/numbers in table cells
44
+ y_sampled: ndarray
45
+ A 2D array of measurement outcomes for sequences in J;
46
+ Each column contains the outcome probabilities for a fixed sequence
47
+ attrs: dict[str, Any]
48
+ Dictionary containing configuration parameters for the GST algorithm
49
+ init: list[ndarray]
50
+ Initial values for the gate set optimization [K, E, rho]
51
+ target_mdl: Model
52
+ The target gate set model
53
+ identifier: str
54
+ String identifier for the current qubit layout
46
55
 
47
56
  Returns:
48
- figure: Matplotlib figure object
49
- A figure representing the dataframe.
57
+ X_opt_pp: ndarray
58
+ Array of optimized gate tensors in Pauli basis
59
+ E_opt_pp: ndarray
60
+ Optimized POVM elements in Pauli basis
61
+ rho_opt_pp: ndarray
62
+ Optimized initial state in Pauli basis
63
+ df_g.values: ndarray
64
+ Array of gate quality measures
65
+ df_o.values: ndarray
66
+ Array of SPAM and other quality measures
67
+ opt_success: bool
68
+ Whether the optimization successfully converged below expected least-squares error
50
69
  """
51
-
52
- if row_labels is None:
53
- row_labels = list(np.arange(df.shape[0]))
54
-
55
- row_height = fontsize / 70 * 2
56
- n_cols = df.shape[1]
57
- n_rows = df.shape[0]
58
- figsize = np.array([n_cols + 1, n_rows + 1]) * np.array([col_width, row_height])
59
-
60
- fig, ax = plt.subplots(figsize=figsize)
61
-
62
- fig.patch.set_visible(False)
63
- ax.axis("off")
64
- ax.axis("tight")
65
- data_array = (df.to_numpy(dtype="str")).copy()
66
- column_names = df.columns.tolist()
67
- table = ax.table(
68
- cellText=data_array,
69
- colLabels=column_names,
70
- rowLabels=row_labels,
71
- cellLoc="center",
72
- colColours=["#7FA1C3" for _ in range(n_cols)],
73
- bbox=Bbox([[0, 0], [1, 1]]),
70
+ _, X_, E_, rho_, res_list = algorithm.run_mGST(
71
+ y_sampled,
72
+ attrs["J"],
73
+ attrs["seq_len_list"][-1],
74
+ attrs["num_gates"],
75
+ attrs["pdim"] ** 2,
76
+ attrs["rank"],
77
+ attrs["num_povm"],
78
+ attrs["batch_size"],
79
+ attrs["shots"],
80
+ method=attrs["opt_method"],
81
+ max_inits=attrs["max_inits"],
82
+ max_iter=0,
83
+ final_iter=attrs["max_iterations"][1],
84
+ threshold_multiplier=attrs["convergence_criteria"][0],
85
+ target_rel_prec=attrs["convergence_criteria"][1],
86
+ init=init,
87
+ verbose_level=0,
88
+ )
89
+ # Compute ideal least squares error that only includes shot noise and no model mismatch
90
+ delta = (1 - y_sampled.reshape(-1)) @ y_sampled.reshape(-1) / len(attrs["J"]) / attrs["num_povm"] / attrs["shots"]
91
+ # Account for model mismatch depending on the Kraus rank (heuristic factor)
92
+ delta *= attrs["convergence_criteria"][0] * np.max([(attrs["pdim"] ** 2 - attrs["rank"])/attrs["pdim"], 1])
93
+ opt_success = res_list[-1] < delta
94
+
95
+ X_opt, E_opt, rho_opt = reporting.gauge_opt(X_, E_, rho_, target_mdl, attrs["gauge_weights"])
96
+ df_g, df_o = reporting.report(
97
+ X_opt,
98
+ E_opt,
99
+ rho_opt,
100
+ attrs["J"],
101
+ y_sampled,
102
+ target_mdl,
103
+ attrs["gate_labels"][identifier],
74
104
  )
75
- table.set_fontsize(fontsize)
76
- table.set_figure(fig)
77
- return fig
105
+
106
+ X_opt_pp, E_opt_pp, rho_opt_pp = compatibility.std2pp(X_opt, E_opt, rho_opt)
107
+ return X_opt_pp, E_opt_pp, rho_opt_pp, df_g.values, df_o.values, opt_success
78
108
 
79
109
 
80
110
  def bootstrap_errors(
@@ -86,7 +116,7 @@ def bootstrap_errors(
86
116
  rho: ndarray,
87
117
  target_mdl: Model,
88
118
  identifier: str,
89
- parametric: bool = False,
119
+ parametric: bool = True,
90
120
  ) -> tuple[Any, Any, Any, Any, Any]:
91
121
  """Resamples circuit outcomes a number of times and computes GST estimates for each repetition
92
122
  All results are then returned in order to compute bootstrap-error bars for GST estimates.
@@ -99,40 +129,39 @@ def bootstrap_errors(
99
129
  ----------
100
130
  dataset: xarray.Dataset
101
131
  A dataset containing counts from the experiment and configurations
102
- qubit_layout: List[int]
103
- The list of qubits for the current GST experiment
104
132
  y: ndarray
105
133
  The circuit outcome probabilities as a num_povm x num_circuits array
106
- K : ndarray
134
+ K: ndarray
107
135
  Each subarray along the first axis contains a set of Kraus operators.
108
136
  The second axis enumerates Kraus operators for a gate specified by the first axis.
109
- X : 3D ndarray
137
+ X: 3D ndarray
110
138
  Array where reconstructed CPT superoperators in standard basis are stacked along the first axis.
111
- E : ndarray
139
+ E: ndarray
112
140
  Current POVM estimate
113
- rho : ndarray
141
+ rho: ndarray
114
142
  Current initial state estimate
115
- target_mdl : pygsti model object
143
+ target_mdl: pygsti model object
116
144
  The target gate set
117
- identifier : str
145
+ identifier: str
118
146
  The string identifier of the current benchmark
119
- parametric : bool
147
+ parametric: bool
120
148
  If set to True, parametric bootstrapping is used, else non-parametric bootstrapping. Default: False
121
149
 
122
150
  Returns
123
151
  -------
124
- X_array : ndarray
152
+ X_array: ndarray
125
153
  Array containing all estimated gate tensors of different bootstrapping repetitions along first axis
126
- E_array : ndarray
154
+ E_array: ndarray
127
155
  Array containing all estimated POVM tensors of different bootstrapping repetitions along first axis
128
- rho_array : ndarray
156
+ rho_array: ndarray
129
157
  Array containing all estimated initial states of different bootstrapping repetitions along first axis
130
- df_g_array : ndarray
158
+ df_g_array: ndarray
131
159
  Contains gate quality measures of bootstrapping repetitions
132
- df_o_array : ndarray
160
+ df_o_array: ndarray
133
161
  Contains SPAM and other quality measures of bootstrapping repetitions
134
162
 
135
163
  """
164
+ bootstrap_samples = dataset.attrs["bootstrap_samples"]
136
165
  if parametric:
137
166
  y = np.real(
138
167
  np.array(
@@ -142,86 +171,98 @@ def bootstrap_errors(
142
171
  ]
143
172
  )
144
173
  )
145
- X_array = np.zeros((dataset.attrs["bootstrap_samples"], *X.shape)).astype(complex)
146
- E_array = np.zeros((dataset.attrs["bootstrap_samples"], *E.shape)).astype(complex)
147
- rho_array = np.zeros((dataset.attrs["bootstrap_samples"], *rho.shape)).astype(complex)
174
+ X_list = []
175
+ E_list = []
176
+ rho_list = []
148
177
  df_g_list = []
149
178
  df_o_list = []
150
179
 
151
- for i in range(dataset.attrs["bootstrap_samples"]):
152
- y_sampled = additional_fns.sampled_measurements(y, dataset.attrs["shots"]).copy()
153
- _, X_, E_, rho_, _ = algorithm.run_mGST(
154
- y_sampled,
155
- dataset.attrs["J"],
156
- dataset.attrs["seq_len_list"][-1],
157
- dataset.attrs["num_gates"],
158
- dataset.attrs["pdim"] ** 2,
159
- dataset.attrs["rank"],
160
- dataset.attrs["num_povm"],
161
- dataset.attrs["batch_size"],
162
- dataset.attrs["shots"],
163
- method=dataset.attrs["opt_method"],
164
- max_inits=dataset.attrs["max_inits"],
165
- max_iter=0,
166
- final_iter=dataset.attrs["max_iterations"][1],
167
- threshold_multiplier=dataset.attrs["convergence_criteria"][0],
168
- target_rel_prec=dataset.attrs["convergence_criteria"][1],
169
- init=[K, E, rho],
170
- testing=False,
171
- )
180
+ num_physical_cores = psutil.cpu_count(logical=False)
181
+ num_workers = max(1, num_physical_cores - 1)
172
182
 
173
- X_opt, E_opt, rho_opt = reporting.gauge_opt(X_, E_, rho_, target_mdl, dataset.attrs[f"gauge_weights"])
174
- df_g, df_o = reporting.report(
175
- X_opt,
176
- E_opt,
177
- rho_opt,
178
- dataset.attrs["J"],
179
- y_sampled,
183
+ # Prepare arguments for each process
184
+ args_list = [
185
+ (
186
+ additional_fns.sampled_measurements(y, dataset.attrs["shots"]).copy(),
187
+ dataset.attrs,
188
+ [K, E, rho],
180
189
  target_mdl,
181
- dataset.attrs["gate_labels"][identifier],
190
+ identifier,
182
191
  )
183
- df_g_list.append(df_g.values)
184
- df_o_list.append(df_o.values)
192
+ for _ in range(bootstrap_samples)
193
+ ]
194
+
195
+ # process layouts sequentially if the parallelizing bootstrapping is faster
196
+ if dataset.attrs["parallelization_path"] == "layout":
197
+ qcvv_logger.info(f"Bootstrapping of layout {identifier}")
198
+ all_results = []
199
+ with logging_redirect_tqdm(loggers=[qcvv_logger]):
200
+ for i in trange(len(args_list)):
201
+ arg = args_list[i]
202
+ all_results.append(process_bootstrap_samples(*arg))
203
+ else:
204
+ qcvv_logger.info(f"Parallel bootstrapping using {num_workers} out of {num_physical_cores} physical cores")
205
+ # Execute in parallel
206
+ with mp.Manager() as manager:
207
+ all_results = []
208
+
209
+ # Create a shared counter to track completed tasks
210
+ counter = manager.Value("i", 0)
211
+
212
+ # Create a progress bar that will be updated by all processes
213
+ with logging_redirect_tqdm(loggers=[qcvv_logger]):
214
+ pbar = tqdm(total=bootstrap_samples, desc="Bootstrap samples")
215
+
216
+ def update_progress(_=None):
217
+ counter.value += 1
218
+ pbar.update(1)
219
+
220
+ # Execute in parallel
221
+ with mp.Pool(num_workers) as pool:
222
+ results = [
223
+ pool.apply_async(process_bootstrap_samples, args=arg, callback=update_progress)
224
+ for arg in args_list
225
+ ]
226
+ all_results = [res.get() for res in results] # Wait for all results
185
227
 
186
- X_opt_pp, E_opt_pp, rho_opt_pp = compatibility.std2pp(X_opt, E_opt, rho_opt)
228
+ pbar.close()
187
229
 
188
- X_array[i] = X_opt_pp
189
- E_array[i] = E_opt_pp
190
- rho_array[i] = rho_opt_pp
230
+ for i, (X_opt_pp, E_opt_pp, rho_opt_pp, df_g_values, df_o_values, success) in enumerate(all_results):
231
+ X_list.append(X_opt_pp)
232
+ E_list.append(E_opt_pp)
233
+ rho_list.append(rho_opt_pp)
234
+ df_g_list.append(df_g_values)
235
+ df_o_list.append(df_o_values)
191
236
 
192
- return X_array, E_array, rho_array, np.array(df_g_list), np.array(df_o_list)
237
+ return np.array(X_list), np.array(E_list), np.array(rho_list), np.array(df_g_list), np.array(df_o_list)
193
238
 
194
239
 
195
240
  def generate_non_gate_results(
196
- dataset: xr.Dataset, qubit_layout: List[int], df_o: DataFrame
197
- ) -> tuple[DataFrame, Figure]:
241
+ df_o: DataFrame, bootstrap_results: Union[None, tuple[Any, Any, Any, Any, Any]] = None
242
+ ) -> DataFrame:
198
243
  """
199
244
  Creates error bars (if bootstrapping was used) and formats results for non-gate errors.
200
245
  The resulting tables are also turned into figures, so that they can be saved automatically.
201
246
 
202
247
  Args:
203
- dataset: xr.Dataset
204
- A dataset containing counts from the experiment and configurations
205
- qubit_layout: List[int]
206
- The list of qubits for the current GST experiment
207
248
  df_o: Pandas DataFrame
208
249
  A dataframe containing the non-gate quality metrics (SPAM errors and fit quality)
209
-
250
+ bootstrap_results: Union[None, tuple[Any, Any, Any, Any, Any]]
251
+ If provided, contains the results of the bootstrap analysis.
210
252
  Returns:
211
253
  df_o_final: Pandas DataFrame
212
254
  The final formated results
213
255
  """
214
- identifier = BenchmarkObservationIdentifier(qubit_layout).string_identifier
215
- if dataset.attrs["bootstrap_samples"] > 0:
216
- _, _, _, _, df_o_array = dataset.attrs["results_layout_" + identifier]["bootstrap_data"]
256
+ if bootstrap_results is not None:
257
+ _, _, _, _, df_o_array = bootstrap_results
217
258
  df_o_array[df_o_array == -1] = np.nan
218
259
  percentiles_o_low, percentiles_o_high = np.nanpercentile(df_o_array, [2.5, 97.5], axis=0)
219
260
  df_o_final = DataFrame(
220
261
  {
221
- f"mean_total_variation_distance_estimate_data": reporting.number_to_str(
262
+ f"mean_tvd_estimate_data": reporting.number_to_str(
222
263
  df_o.values[0, 1].copy(), [percentiles_o_high[0, 1], percentiles_o_low[0, 1]], precision=5
223
264
  ),
224
- f"mean_total_variation_distance_target_data": reporting.number_to_str(
265
+ f"mean_tvd_target_data": reporting.number_to_str(
225
266
  df_o.values[0, 2].copy(), [percentiles_o_high[0, 2], percentiles_o_low[0, 2]], precision=5
226
267
  ),
227
268
  f"povm_diamond_distance": reporting.number_to_str(
@@ -236,26 +277,26 @@ def generate_non_gate_results(
236
277
  else:
237
278
  df_o_final = DataFrame(
238
279
  {
239
- f"mean_total_variation_distance_estimate_data": reporting.number_to_str(
240
- df_o.values[0, 1].copy(), precision=5
241
- ),
242
- f"mean_total_variation_distance_target_data": reporting.number_to_str(
243
- df_o.values[0, 2].copy(), precision=5
244
- ),
280
+ f"mean_tvd_estimate_data": reporting.number_to_str(df_o.values[0, 1].copy(), precision=5),
281
+ f"mean_tvd_target_data": reporting.number_to_str(df_o.values[0, 2].copy(), precision=5),
245
282
  f"povm_diamond_distance": reporting.number_to_str(df_o.values[0, 3].copy(), precision=5),
246
283
  f"state_trace_distance": reporting.number_to_str(df_o.values[0, 4].copy(), precision=5),
247
284
  },
248
285
  index=[""],
249
286
  )
250
- fig = dataframe_to_figure(df_o_final, [""]) # dataframe_to_figure(df_o_final, [""])
251
- return df_o_final, fig
287
+ return df_o_final
252
288
 
253
289
 
254
290
  def generate_unit_rank_gate_results(
255
- dataset: xr.Dataset, qubit_layout: List[int], df_g: DataFrame, X_opt: ndarray, K_target: ndarray
256
- ) -> Tuple[DataFrame, DataFrame, Figure, Figure]:
291
+ dataset: xr.Dataset,
292
+ qubit_layout: List[int],
293
+ df_g: DataFrame,
294
+ X_opt: ndarray,
295
+ K_target: ndarray,
296
+ bootstrap_results: Union[None, tuple[Any, Any, Any, Any, Any]] = None,
297
+ ) -> Tuple[DataFrame, DataFrame, dict]:
257
298
  """
258
- Produces all result tables for Kraus rank 1 estimates and turns them into figures.
299
+ Produces all result tables for Kraus rank 1 estimates
259
300
 
260
301
  This includes parameters of the Hamiltonian generators in the Pauli basis for all gates,
261
302
  as well as the usual performance metrics (Fidelities and Diamond distances). If bootstrapping
@@ -278,97 +319,47 @@ def generate_unit_rank_gate_results(
278
319
  The dataframe with properly formated results of standard gate errors
279
320
  df_g_rotation Pandas DataFrame
280
321
  A dataframe containing Hamiltonian (rotation) parameters
281
- fig_g: Figure
282
- A table in Figure format of gate results (fidelities etc.)
283
- fig_rotation: Figure
284
- A table in Figure format of gate Hamiltonian parameters
285
-
322
+ hamiltonian_params: dict
323
+ A dictionary containing the Hamiltonian parameters for each gate in the Pauli basis.
324
+ The keys are gate labels and the values are dictionaries with the parameters.
286
325
 
287
326
  """
288
327
  identifier = BenchmarkObservationIdentifier(qubit_layout).string_identifier
289
- pauli_labels = generate_basis_labels(dataset.attrs["pdim"], basis="Pauli")
290
- if dataset.attrs["bootstrap_samples"] > 0:
291
- X_array, E_array, rho_array, df_g_array, _ = dataset.attrs["results_layout_" + identifier]["bootstrap_data"]
328
+ if bootstrap_results is not None:
329
+ X_array, E_array, rho_array, df_g_array, _ = bootstrap_results
292
330
  df_g_array[df_g_array == -1] = np.nan
293
331
  percentiles_g_low, percentiles_g_high = np.nanpercentile(df_g_array, [2.5, 97.5], axis=0)
294
-
295
- df_g_final = DataFrame(
296
- {
297
- r"average_gate_fidelity": [
298
- reporting.number_to_str(
299
- df_g.values[i, 0], [percentiles_g_high[i, 0], percentiles_g_low[i, 0]], precision=5
300
- )
301
- for i in range(len(dataset.attrs["gate_labels"][identifier]))
302
- ],
303
- r"diamond_distance": [
304
- reporting.number_to_str(
305
- df_g.values[i, 1], [percentiles_g_high[i, 1], percentiles_g_low[i, 1]], precision=5
306
- )
307
- for i in range(dataset.attrs["num_gates"])
308
- ],
309
- }
332
+ df_g_rotation, hamiltonian_params = reporting.generate_rotation_param_results(
333
+ dataset, qubit_layout, X_opt, K_target, X_array, E_array, rho_array
310
334
  )
311
335
 
312
- U_opt = reporting.phase_opt(X_opt, K_target)
313
- pauli_coeffs = reporting.compute_sparsest_Pauli_Hamiltonian(U_opt)
314
-
315
- bootstrap_pauli_coeffs = np.zeros((len(X_array), dataset.attrs["num_gates"], dataset.attrs["pdim"] ** 2))
316
- for i, X_ in enumerate(X_array):
317
- X_std, _, _ = compatibility.pp2std(X_, E_array[i], rho_array[i])
318
- U_opt_ = reporting.phase_opt(X_std, K_target)
319
- pauli_coeffs_ = reporting.compute_sparsest_Pauli_Hamiltonian(U_opt_)
320
- bootstrap_pauli_coeffs[i, :, :] = pauli_coeffs_
321
- pauli_coeffs_low, pauli_coeffs_high = np.nanpercentile(bootstrap_pauli_coeffs, [2.5, 97.5], axis=0)
322
-
323
- df_g_rotation = DataFrame(
324
- np.array(
325
- [
326
- [
327
- reporting.number_to_str(
328
- pauli_coeffs[i, j], [pauli_coeffs_high[i, j], pauli_coeffs_low[i, j]], precision=5
329
- )
330
- for i in range(dataset.attrs["num_gates"])
331
- ]
332
- for j in range(dataset.attrs["pdim"] ** 2)
333
- ]
334
- ).T
335
- )
336
-
337
- df_g_rotation.columns = [f"h_%s" % label for label in pauli_labels]
338
- df_g_rotation.rename(index=dataset.attrs["gate_labels"][identifier], inplace=True)
339
-
340
336
  else:
341
- df_g_final = DataFrame(
342
- {
343
- "average_gate_fidelity": [
344
- reporting.number_to_str(df_g.values[i, 0], precision=5) for i in range(dataset.attrs["num_gates"])
345
- ],
346
- "diamond_distance": [
347
- reporting.number_to_str(df_g.values[i, 1], precision=5) for i in range(dataset.attrs["num_gates"])
348
- ],
349
- }
337
+ df_g_rotation, hamiltonian_params = reporting.generate_rotation_param_results(
338
+ dataset, qubit_layout, X_opt, K_target
350
339
  )
351
- U_opt = reporting.phase_opt(X_opt, K_target)
352
- pauli_coeffs = reporting.compute_sparsest_Pauli_Hamiltonian(U_opt)
353
340
 
354
- df_g_rotation = DataFrame(
355
- np.array(
356
- [
357
- [
358
- reporting.number_to_str(pauli_coeffs[i, j], precision=5)
359
- for i in range(dataset.attrs["num_gates"])
360
- ]
361
- for j in range(dataset.attrs["pdim"] ** 2)
362
- ]
363
- ).T
364
- )
365
- df_g_rotation.columns = [f"h_%s" % label for label in pauli_labels]
366
- df_g_rotation.rename(index=dataset.attrs["gate_labels"][identifier], inplace=True)
367
- df_g_final.rename(index=dataset.attrs["gate_labels"][identifier], inplace=True)
341
+ df_g_final = DataFrame(
342
+ {
343
+ r"average_gate_fidelity": [
344
+ reporting.number_to_str(
345
+ df_g.values[i, 0],
346
+ ([percentiles_g_high[i, 0], percentiles_g_low[i, 0]] if bootstrap_results is not None else None),
347
+ precision=5,
348
+ )
349
+ for i in range(len(dataset.attrs["gate_labels"][identifier]))
350
+ ],
351
+ r"diamond_distance": [
352
+ reporting.number_to_str(
353
+ df_g.values[i, 1],
354
+ ([percentiles_g_high[i, 1], percentiles_g_low[i, 1]] if bootstrap_results is not None else None),
355
+ precision=5,
356
+ )
357
+ for i in range(dataset.attrs["num_gates"])
358
+ ],
359
+ }
360
+ )
368
361
 
369
- fig_g = dataframe_to_figure(df_g_final, dataset.attrs["gate_labels"][identifier])
370
- fig_rotation = dataframe_to_figure(df_g_rotation, dataset.attrs["gate_labels"][identifier])
371
- return df_g_final, df_g_rotation, fig_g, fig_rotation
362
+ return df_g_final, df_g_rotation, hamiltonian_params
372
363
 
373
364
 
374
365
  def generate_gate_results(
@@ -378,10 +369,11 @@ def generate_gate_results(
378
369
  X_opt: ndarray,
379
370
  E_opt: ndarray,
380
371
  rho_opt: ndarray,
372
+ bootstrap_results: Union[None, tuple[Any, Any, Any, Any, Any]] = None,
381
373
  max_evals: int = 6,
382
- ) -> Tuple[DataFrame, DataFrame, Figure, Figure]:
374
+ ) -> Tuple[DataFrame, DataFrame]:
383
375
  """
384
- Produces all result tables for arbitrary Kraus rank estimates and turns them into figures.
376
+ Produces all result tables for arbitrary Kraus rank estimates
385
377
 
386
378
  Args:
387
379
  df_g: Pandas DataFrame
@@ -400,10 +392,6 @@ def generate_gate_results(
400
392
  The dataframe with properly formated results of standard gate errors
401
393
  df_g_evals_final Pandas DataFrame
402
394
  A dataframe containing eigenvalues of the Choi matrices for all gates
403
- fig_g: Figure
404
- A table in Figure format of gate results (fidelities etc.)
405
- fig_choi: Figure
406
- A table in Figure format of eigenvalues of the Choi matrices of all gates
407
395
 
408
396
  """
409
397
  identifier = BenchmarkObservationIdentifier(qubit_layout).string_identifier
@@ -411,22 +399,20 @@ def generate_gate_results(
411
399
  X_opt_pp, _, _ = compatibility.std2pp(X_opt, E_opt, rho_opt)
412
400
  df_g_evals = reporting.generate_Choi_EV_table(X_opt, n_evals, dataset.attrs["gate_labels"][identifier])
413
401
 
414
- if dataset.attrs["bootstrap_samples"] > 0:
415
- X_array, E_array, rho_array, df_g_array, _ = dataset.attrs["results_layout_" + identifier]["bootstrap_data"]
402
+ if bootstrap_results is not None:
403
+ X_array, E_array, rho_array, df_g_array, _ = bootstrap_results
404
+ successful_bootstraps = len(X_array)
416
405
  df_g_array[df_g_array == -1] = np.nan
417
406
  percentiles_g_low, percentiles_g_high = np.nanpercentile(df_g_array, [2.5, 97.5], axis=0)
418
- bootstrap_unitarities = np.array(
419
- [reporting.unitarities(X_array[i]) for i in range(dataset.attrs["bootstrap_samples"])]
420
- )
407
+ bootstrap_unitarities = np.array([reporting.unitarities(X_array[i]) for i in range(successful_bootstraps)])
421
408
  percentiles_u_low, percentiles_u_high = np.nanpercentile(bootstrap_unitarities, [2.5, 97.5], axis=0)
422
409
  X_array_std = [
423
- compatibility.pp2std(X_array[i], E_array[i], rho_array[i])[0]
424
- for i in range(dataset.attrs["bootstrap_samples"])
410
+ compatibility.pp2std(X_array[i], E_array[i], rho_array[i])[0] for i in range(successful_bootstraps)
425
411
  ]
426
412
  bootstrap_evals = np.array(
427
413
  [
428
414
  reporting.generate_Choi_EV_table(X_array_std[i], n_evals, dataset.attrs["gate_labels"][identifier])
429
- for i in range(dataset.attrs["bootstrap_samples"])
415
+ for i in range(successful_bootstraps)
430
416
  ]
431
417
  )
432
418
  percentiles_evals_low, percentiles_evals_high = np.nanpercentile(bootstrap_evals, [2.5, 97.5], axis=0)
@@ -498,59 +484,7 @@ def generate_gate_results(
498
484
  df_g_evals_final = DataFrame(eval_strs).T
499
485
  df_g_evals_final.rename(index=dataset.attrs["gate_labels"][identifier], inplace=True)
500
486
 
501
- fig_g = dataframe_to_figure(df_g_final, dataset.attrs["gate_labels"][identifier])
502
- fig_choi = dataframe_to_figure(df_g_evals_final, dataset.attrs["gate_labels"][identifier])
503
- return df_g_final, df_g_evals_final, fig_g, fig_choi
504
-
505
-
506
- def generate_basis_labels(pdim: int, basis: Union[str, None] = None) -> List[str]:
507
- """Generate a list of labels for the Pauli basis or the standard basis
508
-
509
- Args:
510
- pdim: int
511
- Physical dimension
512
- basis: str
513
- Which basis the labels correspond to, currently default is standard basis and "Pauli" can be choose
514
- for Pauli basis labels like "II", "IX", "XX", ...
515
-
516
- Returns:
517
- labels: List[str]
518
- A list of all string combinations for the given dimension and basis
519
- """
520
- separator = ""
521
- if basis == "Pauli":
522
- pauli_labels_loc = ["I", "X", "Y", "Z"]
523
- pauli_labels_rep = [pauli_labels_loc for _ in range(int(np.log2(pdim)))]
524
- labels = [separator.join(map(str, x)) for x in product(*pauli_labels_rep)]
525
- else:
526
- std_labels_loc = ["0", "1"]
527
- std_labels_rep = [std_labels_loc for _ in range(int(np.log2(pdim)))]
528
- labels = [separator.join(map(str, x)) for x in product(*std_labels_rep)]
529
-
530
- return labels
531
-
532
-
533
- def result_str_to_floats(result_str: str, err: str) -> Tuple[float, float]:
534
- """Converts formated string results from mgst to float (value, uncertainty) pairs
535
-
536
- Args:
537
- result_str: str
538
- The value of a result parameter formated as str
539
- err: str
540
- The error interval of the parameters
541
-
542
- Returns:
543
- value: float
544
- The parameter value as floar
545
- uncertainty: float
546
- A single uncertainty value
547
- """
548
- if err:
549
- value = float(result_str.split("[")[0])
550
- rest = result_str.split("[")[1].split(",")
551
- uncertainty = float(rest[1][:-1]) - float(rest[0])
552
- return value, uncertainty
553
- return float(result_str), np.NaN
487
+ return df_g_final, df_g_evals_final
554
488
 
555
489
 
556
490
  def pandas_results_to_observations(
@@ -583,8 +517,8 @@ def pandas_results_to_observations(
583
517
  BenchmarkObservation(
584
518
  name=f"{name}_{gate_label}:crosstalk_components={qubits}",
585
519
  identifier=identifier,
586
- value=result_str_to_floats(df_g[name].iloc[idx], err)[0],
587
- uncertainty=result_str_to_floats(df_g[name].iloc[idx], err)[1],
520
+ value=reporting.result_str_to_floats(df_g[name].iloc[idx], err)[0],
521
+ uncertainty=reporting.result_str_to_floats(df_g[name].iloc[idx], err)[1],
588
522
  )
589
523
  for name in df_g.columns.tolist()
590
524
  ]
@@ -594,8 +528,8 @@ def pandas_results_to_observations(
594
528
  BenchmarkObservation(
595
529
  name=f"{name}",
596
530
  identifier=identifier,
597
- value=result_str_to_floats(df_o[name].iloc[0], err)[0],
598
- uncertainty=result_str_to_floats(df_o[name].iloc[0], err)[1],
531
+ value=reporting.result_str_to_floats(df_o[name].iloc[0], err)[0],
532
+ uncertainty=reporting.result_str_to_floats(df_o[name].iloc[0], err)[1],
599
533
  )
600
534
  for name in df_o.columns.tolist()
601
535
  ]
@@ -720,7 +654,6 @@ def run_mGST_wrapper(
720
654
  init_params = [K_init, E_target, rho_target]
721
655
  else:
722
656
  init_params = None
723
-
724
657
  K, X, E, rho, _ = algorithm.run_mGST(
725
658
  y,
726
659
  dataset.attrs["J"],
@@ -738,12 +671,199 @@ def run_mGST_wrapper(
738
671
  threshold_multiplier=dataset.attrs["convergence_criteria"][0],
739
672
  target_rel_prec=dataset.attrs["convergence_criteria"][1],
740
673
  init=init_params,
741
- testing=dataset.attrs["testing"],
674
+ verbose_level=dataset.attrs["verbose_level"],
742
675
  )
743
676
 
744
677
  return K, X, E, rho, K_target, X_target, E_target, rho_target
745
678
 
746
679
 
680
+ def process_layout(
681
+ args: Tuple[xr.Dataset, List[int], int]
682
+ ) -> Tuple[List[int], dict[str, Any], List[BenchmarkObservation], DataFrame, DataFrame, DataFrame]:
683
+ """Process a single qubit layout for Gate Set Tomography analysis.
684
+
685
+ This function performs the full GST workflow for a single qubit layout:
686
+ 1. Convert counts to mGST format
687
+ 2. Run mGST reconstruction
688
+ 3. Perform gauge optimization
689
+ 4. Generate reports and metrics
690
+ 5. Run bootstrap analysis if configured
691
+ 6. Format results into dataframes and observations
692
+
693
+ Args:
694
+ args: Tuple
695
+ containing: dataset: xr.Dataset, qubit_layout: List[int], pdim: int
696
+
697
+ Returns:
698
+ qubit_layout: List[int]
699
+ The input qubit layout being processed
700
+ results_dict: dict[str, Any]
701
+ Dictionary containing all raw and processed results
702
+ layout_observations: List[BenchmarkObservation]
703
+ List of benchmark observations for this layout
704
+ df_g_final: DataFrame
705
+ DataFrame containing gate metrics (fidelity, diamond distance, etc.)
706
+ df_o_final: DataFrame
707
+ DataFrame containing non-gate metrics (SPAM errors, fit quality)
708
+ df_g_evals: DataFrame
709
+ DataFrame containing Choi matrix eigenvalues (for rank > 1)
710
+ """
711
+ dataset, qubit_layout, pdim = args
712
+ identifier = BenchmarkObservationIdentifier(qubit_layout).string_identifier
713
+
714
+ qcvv_logger.info(f"Running mGST analysis for layout {qubit_layout}")
715
+
716
+ # Computing circuit outcome probabilities from counts
717
+ y = dataset_counts_to_mgst_format(dataset, qubit_layout)
718
+
719
+ # Main GST reconstruction
720
+ start_timer = perf_counter()
721
+ K, X, E, rho, K_target, X_target, E_target, rho_target = run_mGST_wrapper(dataset, y)
722
+ main_gst_time = perf_counter() - start_timer
723
+
724
+ # Gauge optimization
725
+ start_timer = perf_counter()
726
+ target_mdl = compatibility.arrays_to_pygsti_model(X_target, E_target, rho_target, basis="std")
727
+ X_opt, E_opt, rho_opt = reporting.gauge_opt(X, E, rho, target_mdl, dataset.attrs["gauge_weights"])
728
+ gauge_optimization_time = perf_counter() - start_timer
729
+
730
+ # Quick report
731
+ df_g, _ = reporting.quick_report(
732
+ X_opt, E_opt, rho_opt, dataset.attrs["J"], y, target_mdl, dataset.attrs["gate_labels"][identifier]
733
+ )
734
+
735
+ # Gate set in the Pauli basis
736
+ X_opt_pp, _, _ = compatibility.std2pp(X_opt, E_opt, rho_opt)
737
+ X_target_pp, _, _ = compatibility.std2pp(X_target, E_target, rho_target)
738
+
739
+ # Prepare results dict
740
+ results_dict = {
741
+ "raw_Kraus_operators": K,
742
+ "raw_gates": X,
743
+ "raw_POVM": E.reshape((dataset.attrs["num_povm"], pdim, pdim)),
744
+ "raw_state": rho.reshape((pdim, pdim)),
745
+ "gauge_opt_gates": X_opt,
746
+ "gauge_opt_gates_Pauli_basis": X_opt_pp,
747
+ "gauge_opt_POVM": E_opt.reshape((dataset.attrs["num_povm"], pdim, pdim)),
748
+ "gauge_opt_state": rho_opt.reshape((pdim, pdim)),
749
+ "target_gates": X_target,
750
+ "target_gates_Pauli_basis": X_target_pp,
751
+ "target_POVM": E_target.reshape((dataset.attrs["num_povm"], pdim, pdim)),
752
+ "target_state": rho_target.reshape((pdim, pdim)),
753
+ "main_mGST_time": main_gst_time,
754
+ "gauge_optimization_time": gauge_optimization_time,
755
+ }
756
+
757
+ # Bootstrap
758
+ bootstrap_results = None
759
+ if dataset.attrs["bootstrap_samples"] > 0:
760
+ bootstrap_results = bootstrap_errors(dataset, y, K, X, E, rho, target_mdl, identifier, parametric=False)
761
+ results_dict.update({"bootstrap_data": bootstrap_results})
762
+
763
+ _, df_o_full = reporting.report(
764
+ X_opt, E_opt, rho_opt, dataset.attrs["J"], y, target_mdl, dataset.attrs["gate_labels"][identifier]
765
+ )
766
+ df_o_final = generate_non_gate_results(df_o_full, bootstrap_results)
767
+
768
+ # Result table generation and full report
769
+ if dataset.attrs["rank"] == 1:
770
+ df_g_final, _, hamiltonian_params = generate_unit_rank_gate_results(
771
+ dataset, qubit_layout, df_g, X_opt, K_target, bootstrap_results
772
+ )
773
+ results_dict.update({"hamiltonian_params": hamiltonian_params})
774
+ df_g_evals = pd.DataFrame()
775
+ else:
776
+ df_g_final, df_g_evals = generate_gate_results(
777
+ dataset, qubit_layout, df_g, X_opt, E_opt, rho_opt, bootstrap_results
778
+ )
779
+ results_dict.update({"choi_evals": df_g_evals.to_dict()})
780
+
781
+ layout_observations = pandas_results_to_observations(
782
+ dataset, df_g_final, df_o_final, BenchmarkObservationIdentifier(qubit_layout)
783
+ )
784
+
785
+ results_dict.update({"full_metrics": {"Gates": df_g_final.to_dict(), "Outcomes and SPAM": df_o_final.to_dict()}})
786
+ return qubit_layout, results_dict, layout_observations, df_g_final, df_o_final, df_g_evals
787
+
788
+
789
+ def process_plots(
790
+ dataset: xr.Dataset,
791
+ qubit_layout: List[int],
792
+ results_dict: dict[str, Any],
793
+ df_g_final: DataFrame,
794
+ df_o_final: DataFrame,
795
+ df_g_evals_final: DataFrame,
796
+ ) -> dict[str, Figure]:
797
+ """Process and generate all plots for a single qubit layout.
798
+
799
+ This function creates various visualization plots for gate set tomography results,
800
+ including gate metrics tables, process matrices, and SPAM (State Preparation And
801
+ Measurement) matrices in both real and imaginary parts.
802
+
803
+ Args:
804
+ dataset: xarray Dataset containing experimental data and configuration attributes
805
+ qubit_layout: List of qubit indices defining the current layout
806
+ results_dict: Dictionary containing gauge-optimized gates, POVM elements, and states
807
+ in both standard and Pauli basis
808
+ df_g_final: DataFrame containing gate metrics such as fidelity and diamond distance
809
+ df_o_final: DataFrame containing non-gate metrics such as SPAM errors
810
+ df_g_evals_final: DataFrame containing Choi matrix eigenvalues (can be empty)
811
+
812
+ Returns:
813
+ layout_plots: Dictionary mapping plot names to matplotlib Figure objects.
814
+ Keys follow the pattern "layout_{qubit_layout}_{plot_type}"
815
+ """
816
+ layout_plots = {}
817
+ # Process matrix plots
818
+ pdim = dataset.attrs["pdim"]
819
+ pauli_labels = figure_gen.generate_basis_labels(pdim, basis="Pauli")
820
+ std_labels = figure_gen.generate_basis_labels(pdim)
821
+
822
+ identifier = BenchmarkObservationIdentifier(qubit_layout).string_identifier
823
+
824
+ fig_g = figure_gen.dataframe_to_figure(df_g_final, dataset.attrs["gate_labels"][identifier])
825
+ if not df_g_evals_final.empty:
826
+ fig_choi = figure_gen.dataframe_to_figure(df_g_evals_final, dataset.attrs["gate_labels"][identifier])
827
+ layout_plots[f"layout_{qubit_layout}_choi_eigenvalues"] = fig_choi
828
+ fig_o = figure_gen.dataframe_to_figure(df_o_final, [""])
829
+
830
+ layout_plots[f"layout_{qubit_layout}_gate_metrics"] = fig_g
831
+ layout_plots[f"layout_{qubit_layout}_other_metrics"] = fig_o
832
+ figures = figure_gen.generate_gate_err_pdf(
833
+ "",
834
+ results_dict["gauge_opt_gates_Pauli_basis"],
835
+ results_dict["target_gates_Pauli_basis"],
836
+ basis_labels=pauli_labels,
837
+ gate_labels=dataset.attrs["gate_labels"][identifier],
838
+ return_fig=True,
839
+ )
840
+ for i, figure in enumerate(figures):
841
+ layout_plots[f"layout_{qubit_layout}_process_matrix_{i}"] = figure
842
+
843
+ layout_plots[f"layout_{qubit_layout}_SPAM_matrices_real"] = figure_gen.generate_spam_err_std_pdf(
844
+ "",
845
+ results_dict["gauge_opt_POVM"].reshape((-1, pdim**2)).real,
846
+ results_dict["gauge_opt_state"].reshape(-1).real,
847
+ results_dict["target_POVM"].reshape((-1, pdim**2)).real,
848
+ results_dict["target_state"].reshape(-1).real,
849
+ basis_labels=std_labels,
850
+ title=f"Real part of state and measurement effects in the standard basis\n(red:<0; blue:>0)",
851
+ return_fig=True,
852
+ )
853
+ layout_plots[f"layout_{qubit_layout}_SPAM_matrices_imag"] = figure_gen.generate_spam_err_std_pdf(
854
+ "",
855
+ results_dict["gauge_opt_POVM"].reshape((-1, pdim**2)).imag,
856
+ results_dict["gauge_opt_state"].reshape(-1).imag,
857
+ results_dict["target_POVM"].reshape((-1, pdim**2)).imag,
858
+ results_dict["target_state"].reshape(-1).imag,
859
+ basis_labels=std_labels,
860
+ title=f"Imaginary part of state and measurement effects in the standard basis\n(red:<0; blue:>0)",
861
+ return_fig=True,
862
+ )
863
+ plt.close("all")
864
+ return layout_plots
865
+
866
+
747
867
  def mgst_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
748
868
  """Analysis function for compressive GST
749
869
 
@@ -757,118 +877,97 @@ def mgst_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
757
877
  dataset = run.dataset
758
878
  pdim = dataset.attrs["pdim"]
759
879
  plots = {}
760
- observations = []
761
- for i, qubit_layout in enumerate(dataset.attrs["qubit_layouts"]):
762
- identifier = BenchmarkObservationIdentifier(qubit_layout).string_identifier
763
-
764
- # Computing circuit outcome probabilities from counts
765
- y = dataset_counts_to_mgst_format(dataset, qubit_layout)
766
-
767
- # Main GST reconstruction
768
- start_timer = perf_counter()
769
- K, X, E, rho, K_target, X_target, E_target, rho_target = run_mGST_wrapper(dataset, y)
770
- main_gst_time = perf_counter() - start_timer
880
+ # observations = []
771
881
 
772
- # Gauge optimization
773
- start_timer = perf_counter()
774
- target_mdl = compatibility.arrays_to_pygsti_model(X_target, E_target, rho_target, basis="std")
775
- X_opt, E_opt, rho_opt = reporting.gauge_opt(X, E, rho, target_mdl, dataset.attrs[f"gauge_weights"])
776
- gauge_optimization_time = perf_counter() - start_timer
882
+ # Use all but one physical core
883
+ num_physical_cores = psutil.cpu_count(logical=False)
884
+ num_workers = max(1, num_physical_cores - 1)
777
885
 
778
- # Quick report
779
- df_g, _ = reporting.quick_report(
780
- X_opt, E_opt, rho_opt, dataset.attrs["J"], y, target_mdl, dataset.attrs["gate_labels"][identifier]
781
- )
782
-
783
- # Gate set in the Pauli basis
784
- X_opt_pp, _, _ = compatibility.std2pp(X_opt, E_opt, rho_opt)
785
- X_target_pp, _, _ = compatibility.std2pp(X_target, E_target, rho_target)
786
-
787
- # Saving
788
- dataset.attrs["results_layout_" + identifier] = {
789
- "raw_Kraus_operators": K,
790
- "raw_gates": X,
791
- "raw_POVM": E.reshape((dataset.attrs["num_povm"], pdim, pdim)),
792
- "raw_state": rho.reshape((pdim, pdim)),
793
- "gauge_opt_gates": X_opt,
794
- "gauge_opt_gates_Pauli_basis": X_opt_pp,
795
- "gauge_opt_POVM": E_opt.reshape((dataset.attrs["num_povm"], pdim, pdim)),
796
- "gauge_opt_state": rho_opt.reshape((pdim, pdim)),
797
- "main_mGST_time": main_gst_time,
798
- "gauge_optimization_time": gauge_optimization_time,
799
- }
886
+ # Prepare arguments for each process
887
+ args_list = [(dataset, qubit_layout, pdim) for qubit_layout in dataset.attrs["qubit_layouts"]]
800
888
 
801
- ### Bootstrap
802
- if dataset.attrs["bootstrap_samples"] > 0:
803
- bootstrap_results = bootstrap_errors(dataset, y, K, X, E, rho, target_mdl, identifier)
804
- dataset.attrs["results_layout_" + identifier].update({"bootstrap_data": bootstrap_results})
805
-
806
- _, df_o_full = reporting.report(
807
- X_opt, E_opt, rho_opt, dataset.attrs["J"], y, target_mdl, dataset.attrs["gate_labels"][identifier]
808
- )
809
- df_o_final, fig_o = generate_non_gate_results(dataset, qubit_layout, df_o_full)
889
+ # Determine whether layouts or bootstraps should be processed in parallel
890
+ n_layouts = len(args_list)
891
+ n_bootstraps = dataset.attrs["bootstrap_samples"]
810
892
 
811
- ### Result table generation and full report
812
- if dataset.attrs["rank"] == 1:
813
- df_g_final, df_g_rotation, fig_g, fig_rotation = generate_unit_rank_gate_results(
814
- dataset, qubit_layout, df_g, X_opt, K_target
815
- )
816
- dataset.attrs["results_layout_" + identifier].update({"hamiltonian_parameters": df_g_rotation.to_dict()})
817
- plots[f"layout_{qubit_layout}_hamiltonian_parameters"] = fig_rotation
818
- else:
819
- df_g_final, df_g_evals, fig_g, fig_choi = generate_gate_results(
820
- dataset, qubit_layout, df_g, X_opt, E_opt, rho_opt
821
- )
822
- dataset.attrs["results_layout_" + identifier].update({"choi_evals": df_g_evals.to_dict()})
823
- plots[f"layout_{qubit_layout}_choi_eigenvalues"] = fig_choi
824
- plots[f"layout_{qubit_layout}_gate_metrics"] = fig_g
825
- plots[f"layout_{qubit_layout}_other_metrics"] = fig_o
826
-
827
- observations.extend(
828
- pandas_results_to_observations(
829
- dataset, df_g_final, df_o_final, BenchmarkObservationIdentifier(qubit_layout)
830
- )
831
- )
893
+ # Number of cycles needed for parallel processing in either case; Factor 1/2 due to boostrap runs converging faster than original optimization
894
+ parallel_layout_cycles = np.ceil(n_layouts / num_physical_cores) * (1 + n_bootstraps / 2)
895
+ parallel_bootstrap_cycles = n_layouts * (1 + np.ceil(n_bootstraps / num_physical_cores) / 2)
896
+ dataset.attrs["parallelization_path"] = (
897
+ "layout" if parallel_layout_cycles < parallel_bootstrap_cycles else "bootstrap"
898
+ )
832
899
 
833
- dataset.attrs["results_layout_" + identifier].update(
834
- {"full_metrics": {"Gates": df_g_final.to_dict(), "Outcomes and SPAM": df_o_final.to_dict()}}
835
- )
900
+ # process layouts sequentially if the parallelizing bootstrapping is faster
901
+ if dataset.attrs["parallelization_path"] == "bootstrap":
902
+ all_results = []
903
+ for args in args_list:
904
+ all_results.append(process_layout(args))
905
+ else:
906
+ qcvv_logger.info(f"Parallel layout processing using {num_workers} out of {num_physical_cores} physical cores")
907
+ # Execute in parallel
908
+ with mp.Manager() as manager:
909
+ all_results = []
910
+ # Create a shared counter to track completed tasks
911
+ counter = manager.Value("i", 0)
912
+ total_layouts = len(dataset.attrs["qubit_layouts"])
913
+
914
+ # Define a callback function to update progress
915
+ def update_progress(_=None):
916
+ counter.value += 1
917
+ qcvv_logger.info(f"Completed estimation for {counter.value}/{total_layouts} qubit layouts")
918
+
919
+ # Execute in parallel using apply_async with callback
920
+ with mp.Pool(num_workers) as pool:
921
+ async_results = [
922
+ pool.apply_async(process_layout, args=(arg,), callback=update_progress) for arg in args_list
923
+ ]
924
+ all_results = []
925
+ for i, res in enumerate(async_results):
926
+ try:
927
+ result = res.get()
928
+ all_results.append(result)
929
+ except Exception as e: # pylint: disable=broad-exception-caught
930
+ qcvv_logger.error(f"Error processing layout {i}: {str(e)}")
931
+ # Create an error placeholder with the same structure
932
+ error_result: Tuple[
933
+ List[int], dict[str, Any], List[BenchmarkObservation], DataFrame, DataFrame, DataFrame
934
+ ] = (
935
+ args_list[i][1], # qubit_layout
936
+ {"error": str(e)},
937
+ [],
938
+ pd.DataFrame(),
939
+ pd.DataFrame(),
940
+ pd.DataFrame(),
941
+ )
942
+ all_results.append(error_result)
836
943
 
837
- ### Process matrix plots
838
- pauli_labels = generate_basis_labels(pdim, basis="Pauli")
839
- std_labels = generate_basis_labels(pdim)
840
-
841
- figures = figure_gen.generate_gate_err_pdf(
842
- "",
843
- X_opt_pp,
844
- X_target_pp,
845
- basis_labels=pauli_labels,
846
- gate_labels=dataset.attrs["gate_labels"][identifier],
847
- return_fig=True,
848
- )
849
- for i, figure in enumerate(figures):
850
- plots[f"layout_{qubit_layout}_process_matrix_{i}"] = figure
851
-
852
- plots[f"layout_{qubit_layout}_SPAM_matrices_real"] = figure_gen.generate_spam_err_std_pdf(
853
- "",
854
- E_opt.real,
855
- rho_opt.real,
856
- E_target.real,
857
- rho_target.real,
858
- basis_labels=std_labels,
859
- title=f"Real part of state and measurement effects in the standard basis",
860
- return_fig=True,
861
- )
862
- plots[f"layout_{qubit_layout}_SPAM_matrices_imag"] = figure_gen.generate_spam_err_std_pdf(
863
- "",
864
- E_opt.imag,
865
- rho_opt.imag,
866
- E_target.imag,
867
- rho_target.imag,
868
- basis_labels=std_labels,
869
- title=f"Imaginary part of state and measurement effects in the standard basis",
870
- return_fig=True,
871
- )
872
- plt.close("all")
944
+ # Collect results
945
+ observations_list, df_g_list, df_o_list, df_g_evals_list = [], [], [], []
873
946
 
874
- return BenchmarkAnalysisResult(dataset=dataset, observations=observations, plots=plots)
947
+ for i, (qubit_layout, results_dict, layout_observations, df_g_final, df_o_final, df_g_evals_final) in enumerate(
948
+ all_results
949
+ ):
950
+ identifier = BenchmarkObservationIdentifier(qubit_layout).string_identifier
951
+ # Update dataset
952
+ dataset.attrs["results_layout_" + identifier] = results_dict
953
+ # Collect observations and dataframes
954
+ observations_list.extend(layout_observations)
955
+ df_g_list.append(df_g_final)
956
+ df_o_list.append(df_o_final)
957
+ df_g_evals_list.append(df_g_evals_final)
958
+
959
+ # Generate figures for this layout
960
+ N_layouts = len(dataset.attrs["qubit_layouts"])
961
+ qcvv_logger.info(f"Generating figures for layout {i+1}/{N_layouts}")
962
+ layout_plots = process_plots(dataset, qubit_layout, results_dict, df_g_final, df_o_final, df_g_evals_final)
963
+ plots.update(layout_plots)
964
+
965
+ # Generate additional figures for Hamiltonian parameters if rank is 1
966
+ if dataset.attrs["rank"] == 1:
967
+ qcvv_logger.info(f"Generating additional rank 1 figures for all layouts")
968
+ hamiltonian_plots = figure_gen.generate_hamiltonian_visualizations(dataset)
969
+ plots.update(hamiltonian_plots)
970
+ plt.close("all")
971
+ qcvv_logger.info("Analysis completed")
972
+
973
+ return BenchmarkAnalysisResult(dataset=dataset, observations=observations_list, plots=plots)