iqm-benchmarks 1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of iqm-benchmarks might be problematic. Click here for more details.

Files changed (42) hide show
  1. iqm/benchmarks/__init__.py +31 -0
  2. iqm/benchmarks/benchmark.py +109 -0
  3. iqm/benchmarks/benchmark_definition.py +264 -0
  4. iqm/benchmarks/benchmark_experiment.py +163 -0
  5. iqm/benchmarks/compressive_gst/__init__.py +20 -0
  6. iqm/benchmarks/compressive_gst/compressive_gst.py +1029 -0
  7. iqm/benchmarks/entanglement/__init__.py +18 -0
  8. iqm/benchmarks/entanglement/ghz.py +802 -0
  9. iqm/benchmarks/logging_config.py +29 -0
  10. iqm/benchmarks/optimization/__init__.py +18 -0
  11. iqm/benchmarks/optimization/qscore.py +719 -0
  12. iqm/benchmarks/quantum_volume/__init__.py +21 -0
  13. iqm/benchmarks/quantum_volume/clops.py +726 -0
  14. iqm/benchmarks/quantum_volume/quantum_volume.py +854 -0
  15. iqm/benchmarks/randomized_benchmarking/__init__.py +18 -0
  16. iqm/benchmarks/randomized_benchmarking/clifford_1q.pkl +0 -0
  17. iqm/benchmarks/randomized_benchmarking/clifford_2q.pkl +0 -0
  18. iqm/benchmarks/randomized_benchmarking/clifford_rb/__init__.py +19 -0
  19. iqm/benchmarks/randomized_benchmarking/clifford_rb/clifford_rb.py +386 -0
  20. iqm/benchmarks/randomized_benchmarking/interleaved_rb/__init__.py +19 -0
  21. iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py +555 -0
  22. iqm/benchmarks/randomized_benchmarking/mirror_rb/__init__.py +19 -0
  23. iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py +810 -0
  24. iqm/benchmarks/randomized_benchmarking/multi_lmfit.py +86 -0
  25. iqm/benchmarks/randomized_benchmarking/randomized_benchmarking_common.py +892 -0
  26. iqm/benchmarks/readout_mitigation.py +290 -0
  27. iqm/benchmarks/utils.py +521 -0
  28. iqm_benchmarks-1.3.dist-info/LICENSE +205 -0
  29. iqm_benchmarks-1.3.dist-info/METADATA +190 -0
  30. iqm_benchmarks-1.3.dist-info/RECORD +42 -0
  31. iqm_benchmarks-1.3.dist-info/WHEEL +5 -0
  32. iqm_benchmarks-1.3.dist-info/top_level.txt +2 -0
  33. mGST/LICENSE +21 -0
  34. mGST/README.md +54 -0
  35. mGST/additional_fns.py +962 -0
  36. mGST/algorithm.py +733 -0
  37. mGST/compatibility.py +238 -0
  38. mGST/low_level_jit.py +694 -0
  39. mGST/optimization.py +349 -0
  40. mGST/qiskit_interface.py +282 -0
  41. mGST/reporting/figure_gen.py +334 -0
  42. mGST/reporting/reporting.py +710 -0
@@ -0,0 +1,710 @@
1
+ """
2
+ Generation of error measures and result tables
3
+ """
4
+
5
+ from argparse import Namespace
6
+ import csv
7
+ import os
8
+
9
+ import numpy as np
10
+ import numpy.linalg as la
11
+ import pandas as pd
12
+ from pygsti.algorithms import gaugeopt_to_target
13
+ from pygsti.baseobjs import Basis
14
+ from pygsti.models import gaugegroup
15
+ from pygsti.report.reportables import entanglement_fidelity
16
+ from pygsti.tools import change_basis
17
+ from pygsti.tools.optools import compute_povm_map
18
+ from qiskit.quantum_info import SuperOp
19
+ from qiskit.quantum_info.operators.measures import diamond_norm
20
+ from scipy.linalg import logm
21
+ from scipy.optimize import linear_sum_assignment, minimize
22
+
23
+ from mGST import additional_fns, algorithm, compatibility, low_level_jit
24
+
25
+
26
+ def min_spectral_distance(X1, X2):
27
+ """Computes the average absolute distance between the eigenvlues of two matrices
28
+ The matrices are first diagonalized, then the eigenvalues are matched such that the average
29
+ distance of matched eigenvalue pairs is minimal.
30
+ The big advantage of this distance metric is that it is gauge invariant and it can thus be used
31
+ to if the reconstructed gates are similar to the target gates before any gauge optimization.
32
+
33
+ Parameters
34
+ ----------
35
+ X1: numpy array
36
+ The first matrix
37
+ X2: numpy array
38
+ The second matrix
39
+
40
+ Returns
41
+ -------
42
+ dist: float
43
+ The minimal distance
44
+ """
45
+ r = X1.shape[0]
46
+ eigs = la.eig(X1)[0]
47
+ eigs_t = la.eig(X2)[0]
48
+ cost_matrix = np.array([[np.abs(eigs[i] - eigs_t[j]) for i in range(r)] for j in range(r)])
49
+ row_ind, col_ind = linear_sum_assignment(cost_matrix)
50
+ normalization = np.abs(eigs).sum()
51
+ dist = cost_matrix[row_ind, col_ind].sum() / normalization
52
+ return dist
53
+
54
+
55
+ def MVE_data(X, E, rho, J, y):
56
+ """Mean varation error between measured outcomes and predicted outcomes from a gate set
57
+
58
+ Parameters
59
+ ----------
60
+ X : numpy array
61
+ Gate set
62
+ E : numpy array
63
+ POVM
64
+ rho : numpy array
65
+ Initial state
66
+ J : numpy array
67
+ 2D array where each row contains the gate indices of a gate sequence
68
+ y : numpy array
69
+ 2D array of measurement outcomes for sequences in J;
70
+ Each column contains the outcome probabilities for a fixed sequence
71
+
72
+ Returns
73
+ -------
74
+ dist : float
75
+ Mean variation error
76
+ max_dist : float
77
+ Maximal varaition error
78
+
79
+ Notes:
80
+ For each sequence the total variation error of the two probability distribution
81
+ over the POVM elements is computed. Afterwards the meean over these total
82
+ variation errors is returned.
83
+ """
84
+ m = y.shape[1]
85
+ n_povm = y.shape[0]
86
+ dist: float = 0
87
+ max_dist: float = 0
88
+ curr: float = 0
89
+ for i in range(m):
90
+ j = J[i]
91
+ C = low_level_jit.contract(X, j)
92
+ curr = 0
93
+ for k in range(n_povm):
94
+ y_model = E[k].conj() @ C @ rho
95
+ curr += np.abs(y_model - y[k, i])
96
+ curr = curr / 2
97
+ dist += curr
98
+ max_dist = max(max_dist, curr)
99
+ return dist / m, max_dist
100
+
101
+
102
+ def gauge_opt(X, E, rho, target_mdl, weights):
103
+ """Performs pyGSti gauge optimization to target model
104
+
105
+ Parameters
106
+ ----------
107
+ X : numpy array
108
+ Gate set
109
+ E : numpy array
110
+ POVM
111
+ rho : numpy array
112
+ Initial state
113
+ target_mdl : pygsti model object
114
+ A model containing the target gate set
115
+ weights : dict[str: float]
116
+ A dictionary with keys being gate labels or "spam" and corresponding values being the
117
+ weight of each gate in the gauge optimization.
118
+ Example for uniform weights: dict({"G%i"%i:1 for i in range(d)}, **{"spam":1})
119
+
120
+ Returns
121
+ -------
122
+ X_opt, E_opt, rho_opt: Numpy arrays
123
+ The gauge optimized gates and SPAM arrays
124
+ """
125
+ mdl = compatibility.arrays_to_pygsti_model(X, E, rho, basis="std")
126
+ X_t, E_t, rho_t = compatibility.pygsti_model_to_arrays(target_mdl, basis="std")
127
+ target_mdl = compatibility.arrays_to_pygsti_model(X_t, E_t, rho_t, basis="std") # For consistent gate labels
128
+
129
+ gauge_optimized_mdl = gaugeopt_to_target(
130
+ mdl,
131
+ target_mdl,
132
+ gauge_group=gaugegroup.UnitaryGaugeGroup(target_mdl.state_space, basis="pp"),
133
+ item_weights=weights,
134
+ )
135
+ return compatibility.pygsti_model_to_arrays(gauge_optimized_mdl, basis="std")
136
+
137
+
138
+ def report(X, E, rho, J, y, target_mdl, gate_labels):
139
+ """Generation of pandas dataframes with gate and SPAM quality measures
140
+ The resutls can be converted to .tex tables or other formats to be used for GST reports
141
+
142
+ Parameters
143
+ ----------
144
+ X : numpy array
145
+ Gate set
146
+ E : numpy array
147
+ POVM
148
+ rho : numpy array
149
+ Initial state
150
+ J : numpy array
151
+ 2D array where each row contains the gate indices of a gate sequence
152
+ y : numpy array
153
+ 2D array of measurement outcomes for sequences in J;
154
+ Each column contains the outcome probabilities for a fixed sequence
155
+ target_mdl : pygsti model object
156
+ A model containing the target gate set
157
+ gate_labels : list[str]
158
+ A list of names for the gates in X
159
+
160
+ Returns
161
+ -------
162
+ df_g : Pandas DataFrame
163
+ DataFrame of gate quality measures
164
+ df_o : Pandas DataFrame
165
+ DataFrame of all other quality/error measures
166
+ s_g : Pandas DataFrame.style object
167
+ s_o : Pandas DataFrame.style object
168
+ """
169
+ pdim = int(np.sqrt(rho.shape[0]))
170
+ X_t, E_t, rho_t = compatibility.pygsti_model_to_arrays(target_mdl, basis="std")
171
+ target_mdl = compatibility.arrays_to_pygsti_model(X_t, E_t, rho_t, basis="std") # For consistent gate labels
172
+
173
+ gauge_optimized_mdl = compatibility.arrays_to_pygsti_model(X, E, rho, basis="std")
174
+ E_map = compute_povm_map(gauge_optimized_mdl, "Mdefault")
175
+ E_map_t = compute_povm_map(target_mdl, "Mdefault")
176
+
177
+ final_objf = low_level_jit.objf(X, E, rho, J, y)
178
+ MVE = MVE_data(X, E, rho, J, y)[0]
179
+ MVE_target = MVE_data(X_t, E_t, rho_t, J, y)[0]
180
+
181
+ povm_dd = diamond_norm(SuperOp(E_map) - SuperOp(E_map_t)) / 2
182
+ rho_td = la.norm(rho.reshape((pdim, pdim)) - rho_t.reshape((pdim, pdim)), ord="nuc") / 2
183
+ F_avg = compatibility.average_gate_fidelities(gauge_optimized_mdl, target_mdl, pdim, basis_string="pp")
184
+ DD = [diamond_norm(SuperOp(X[i]) - SuperOp(X_t[i])) / 2 for i in range(len(X))]
185
+ min_spectral_dists = [min_spectral_distance(X[i], X_t[i]) for i in range(X.shape[0])]
186
+
187
+ df_g = pd.DataFrame({"F_avg": F_avg, "Diamond distances": DD, "Min. Spectral distances": min_spectral_dists})
188
+ df_o = pd.DataFrame(
189
+ {
190
+ "Final cost function value": final_objf,
191
+ "Mean total variation dist. to data": MVE,
192
+ "Mean total variation dist. target to data": MVE_target,
193
+ "POVM - Meas. map diamond dist.": povm_dd,
194
+ "State - Trace dist.": rho_td,
195
+ },
196
+ index=[0],
197
+ )
198
+ df_g.rename(index=gate_labels, inplace=True)
199
+ df_o.rename(index={0: ""}, inplace=True)
200
+
201
+ return df_g, df_o
202
+
203
+
204
+ def quick_report(X, E, rho, J, y, target_mdl, gate_labels=None):
205
+ """Generation of pandas dataframes with gate and SPAM quality measures
206
+ The quick report is intended to check on a GST estimate with fast to compute measures
207
+ (no diamond distance) to get a first picture and check whether mGST and the gauge optimization
208
+ produce meaningful results.
209
+
210
+ Parameters
211
+ ----------
212
+ X : numpy array
213
+ Gate set
214
+ E : numpy array
215
+ POVM
216
+ rho : numpy array
217
+ Initial state
218
+ J : numpy array
219
+ 2D array where each row contains the gate indices of a gate sequence
220
+ y : numpy array
221
+ 2D array of measurement outcomes for sequences in J;
222
+ Each column contains the outcome probabilities for a fixed sequence
223
+ target_mdl : pygsti model object
224
+ A model containing the target gate set
225
+ gate_labels : list[str]
226
+ A list of names for the gates in X
227
+
228
+ Returns
229
+ -------
230
+ df_g : Pandas DataFrame
231
+ DataFrame of gate quality measures
232
+ df_o : Pandas DataFrame
233
+ DataFrame of all other quality/error measures
234
+ s_g : Pandas DataFrame.style object
235
+ s_o : Pandas DataFrame.style object
236
+ """
237
+ pdim = int(np.sqrt(rho.shape[0]))
238
+ d = X.shape[0]
239
+ if not gate_labels:
240
+ gate_labels = {i: f"Gate %i" % i for i in range(d)}
241
+
242
+ X_t, E_t, rho_t = compatibility.pygsti_model_to_arrays(target_mdl, basis="std")
243
+ target_mdl = compatibility.arrays_to_pygsti_model(X_t, E_t, rho_t, basis="std") # For consistent gate labels
244
+
245
+ gauge_optimized_mdl = compatibility.arrays_to_pygsti_model(X, E, rho, basis="std")
246
+
247
+ final_objf = low_level_jit.objf(X, E, rho, J, y)
248
+ MVE = MVE_data(X, E, rho, J, y)[0]
249
+ MVE_target = MVE_data(X_t, E_t, rho_t, J, y)[0]
250
+
251
+ E_map = compute_povm_map(gauge_optimized_mdl, "Mdefault")
252
+ E_map_t = compute_povm_map(target_mdl, "Mdefault")
253
+ povm_dd = diamond_norm(SuperOp(E_map) - SuperOp(E_map_t)) / 2
254
+
255
+ rho_td = la.norm(rho.reshape((pdim, pdim)) - rho_t.reshape((pdim, pdim)), ord="nuc") / 2
256
+ F_avg = compatibility.average_gate_fidelities(gauge_optimized_mdl, target_mdl, pdim, basis_string="pp")
257
+ min_spectral_dists = [min_spectral_distance(X[i], X_t[i]) for i in range(X.shape[0])]
258
+
259
+ df_g = pd.DataFrame({"F_avg": F_avg, "Min. Spectral distances": min_spectral_dists})
260
+ df_o = pd.DataFrame(
261
+ {
262
+ "Final cost function": final_objf,
263
+ "Mean TVD estimate-data": MVE,
264
+ "Mean TVD target-data": MVE_target,
265
+ "SPAM error:": rho_td + povm_dd,
266
+ },
267
+ index=[0],
268
+ )
269
+ df_g.rename(index=gate_labels, inplace=True)
270
+ df_o.rename(index={0: ""}, inplace=True)
271
+
272
+ return df_g, df_o
273
+
274
+
275
+ def compute_angles_axes(U_set, alternative_phase=False):
276
+ """Takes the matrix logarithm of the given unitaries and returns the Hamiltonian parameters
277
+ The parametrization is U = exp(-i pi H/2), i.e. H = i log(U)*2/pi
278
+
279
+ Parameters
280
+ ----------
281
+ U_set: list[numpy array]
282
+ A list contining unitary matrices
283
+ alternative_phase: bool
284
+ Whether an attempt should be made to report more intuitive rotations,
285
+ for example a rotation of 3 pi/2 around the -X axis would be turned into
286
+ a pi/2 rotation around the X-axis.
287
+ Returns
288
+ -------
289
+ angles: list[float]
290
+ The rotation angle on the Bloch sphere for all gates
291
+ axes : list[numpy array]
292
+ The normalized rotation axes in the Pauli basis for all gates
293
+ pauli_coeffs : list[numpy array]
294
+ The full list of Pauli basis coefficients of the Hamiltonian for all gates
295
+
296
+ Notes: sqrt(pdim) factor is due to Pauli basis normalization
297
+ """
298
+ d = U_set.shape[0]
299
+ pdim = U_set.shape[1]
300
+ angles = []
301
+ axes = []
302
+ pp_vecs = []
303
+ for i in range(d):
304
+ H = 1j * logm(U_set[i])
305
+ pp_vec = change_basis(H.reshape(-1), "std", "pp")
306
+ original_phase = la.norm(pp_vec[1:]) * 2 / np.sqrt(pdim)
307
+ if alternative_phase and (-np.min(pp_vec) > np.max(pp_vec)) and original_phase > np.pi:
308
+ alt_phase = (-original_phase + 2 * np.pi) % (2 * np.pi)
309
+ pp_vec = -pp_vec
310
+ else:
311
+ alt_phase = original_phase
312
+ angles.append(alt_phase / np.pi)
313
+ axes.append(pp_vec[1:] / la.norm(pp_vec[1:]))
314
+ pp_vecs.append(pp_vec)
315
+ pauli_coeffs = np.array(pp_vecs) / np.sqrt(pdim) / np.pi * 2
316
+ return angles, axes, pauli_coeffs
317
+
318
+
319
+ def compute_sparsest_Pauli_Hamiltonian(U_set):
320
+ """Takes the matrix logarithms of the given unitaries and returns sparsest Hamiltonian parameters in Pauli basis
321
+ The parametrization is U = exp(-i pi H/2), i.e. H = i log(U)*2/pi.
322
+ Different branches in the matrix logarithm lead to different Hamiltonians. This function optimizes over
323
+ combinations of adding 2*pi to different eigenvalues, in order arrive at the branch with the Hamiltonian
324
+ whose Pauli basis representation is the most sparse.
325
+
326
+
327
+ Parameters
328
+ ----------
329
+ U_set : list[numpy array]
330
+ A list contining unitary matrices
331
+
332
+ Returns
333
+ -------
334
+ pauli_coeffs : list[numpy array]
335
+ The full list of Pauli basis coefficients of the Hamiltonian for all gates
336
+
337
+ Notes: sqrt(pdim) factor is due to Pauli basis normalization
338
+ """
339
+ pdim = U_set.shape[1]
340
+ pp_vecs = []
341
+
342
+ for U in U_set:
343
+ evals, evecs = np.linalg.eig(U)
344
+ Pauli_norms = []
345
+ for i in range(2**pdim):
346
+ bits = low_level_jit.local_basis(i, 2, pdim)
347
+ evals_new = 1j * np.log(evals) + 2 * np.pi * bits
348
+ H_new = evecs @ np.diag(evals_new) @ evecs.T.conj()
349
+ pp_vec = change_basis(H_new.reshape(-1), "std", "pp")
350
+ Pauli_norms.append(np.linalg.norm(pp_vec, ord=1))
351
+ opt_bits = low_level_jit.local_basis(np.argsort(Pauli_norms)[0], 2, pdim)
352
+ evals_opt = 1j * np.log(evals) + 2 * np.pi * opt_bits
353
+ H_opt = evecs @ np.diag(evals_opt) @ evecs.T.conj()
354
+ pp_vecs.append(change_basis(H_opt.reshape(-1), "std", "pp"))
355
+ pauli_coeffs = np.array(pp_vecs) / np.sqrt(pdim) / np.pi * 2
356
+ return pauli_coeffs
357
+
358
+
359
+ def phase_err(angle, U, U_t):
360
+ """Computes norm between two input unitaries after a global phase is added to one of them
361
+
362
+ Parameters
363
+ ----------
364
+ angle : float
365
+ The global phase angle (in rad)
366
+ U : numpy array
367
+ The first unitary matrix
368
+ U_t : numpy array
369
+ The second unitary matrix (typically the a target gate)
370
+
371
+ Returns
372
+ -------
373
+ norm : floar
374
+ The norm of the difference
375
+ """
376
+ return la.norm(np.exp(1j * angle) * U - U_t)
377
+
378
+
379
+ def phase_opt(X, K_t):
380
+ """Return rK = 1 gate set with global phase fitting matching to target gate set
381
+
382
+ Parameters
383
+ ----------
384
+ X: 3D numpy array
385
+ Array where CPT superoperators are stacked along the first axis.
386
+ These should correspond to rK = 1 gates for the outputs to be meaningful.
387
+ K_t: 4D numpy array
388
+ Array of target gate Kraus operators
389
+ Each subarray along the first axis contains a set of Kraus operators.
390
+ The second axis enumerates Kraus operators for a gate specified by the first axis.
391
+
392
+ Returns
393
+ -------
394
+ K_opt: 4D numpy array
395
+ Array of Kraus operators with mathed global phase
396
+ """
397
+ d = X.shape[0]
398
+ r = X.shape[1]
399
+ pdim = int(np.sqrt(r))
400
+ K = additional_fns.Kraus_rep(X, d, pdim, 1).reshape((d, pdim, pdim))
401
+ K_t = K_t.reshape(d, pdim, pdim)
402
+ K_opt = np.zeros(K.shape).astype(complex)
403
+ for i in range(d):
404
+ angle_opt = minimize(phase_err, 1e-9, args=(K[i], K_t[i]), method="COBYLA").x
405
+ K_opt[i] = K[i] * np.exp(1j * angle_opt)
406
+ return K_opt
407
+
408
+
409
+ def eff_depol_params_agf(X_opt_pp):
410
+ """Computes the average gate fidelities to the completely depolarizing channel
411
+
412
+ Parameters
413
+ ----------
414
+ X_opt_pp: 3D numpy array
415
+ Array where CPT superoperators in Pauli basis are stacked along the first axis.
416
+
417
+ Returns
418
+ -------
419
+ ent_fids : list[float]
420
+ List of average gate fidelities wrt. the depol. channel corresponding to the gates in X_opt_pp.
421
+ """
422
+ r = X_opt_pp.shape[1]
423
+ pdim = np.sqrt(r)
424
+ ent_fids = []
425
+ basis = Basis.cast("pp", r)
426
+ K_depol = additional_fns.depol(int(np.sqrt(r)), 1)
427
+ X_depol = np.einsum("jkl,jnm -> knlm", K_depol, K_depol.conj()).reshape(r, r)
428
+ for i in range(X_opt_pp.shape[0]):
429
+ ent_fids.append(entanglement_fidelity(X_opt_pp[i], change_basis(X_depol, "std", "pp"), basis))
430
+ return (pdim * np.array(ent_fids) + 1) / (pdim + 1)
431
+
432
+
433
+ def unitarities(X_opt_pp):
434
+ """Computes the unitarities of all gates in the gate set
435
+
436
+ Parameters
437
+ ----------
438
+ X_opt_pp : 3D numpy array
439
+ Array where CPT superoperators in Pauli basis are stacked along the first axis.
440
+
441
+ Returns
442
+ -------
443
+ unitarities : list[float]
444
+ List of unitarities for the gates in X_opt_pp.
445
+ """
446
+ # Definition: Proposition 1 of https://arxiv.org/pdf/1503.07865.pdf
447
+ pdim = int(np.sqrt(X_opt_pp.shape[1]))
448
+ E_u = X_opt_pp[:, 1:, 1:] # unital submatrices
449
+ unitarity_list = np.real(np.einsum("ijk, ijk -> i", E_u.conj(), E_u) / (pdim**2 - 1))
450
+ return unitarity_list
451
+
452
+
453
+ # Spectrum of the Choi matrix
454
+ def generate_Choi_EV_table(X_opt, n_evals, gate_labels, filename=None):
455
+ """Outputs a .tex document containing a table with the larges eigenvalues of the Choi matrix for each gate
456
+
457
+ Parameters
458
+ ----------
459
+ X_opt : 3D numpy array
460
+ Array where CPT superoperators in standard basis are stacked along the first axis.
461
+ n_evals : int
462
+ Number of eigenvalues to be returned
463
+ gate_labels : list[int: str]
464
+ The names of gates in the gate set
465
+ filename :
466
+ The file name of the output .tex file
467
+ """
468
+ d, r, _ = X_opt.shape
469
+ pdim = int(np.sqrt(r))
470
+ Choi_evals_result = np.zeros((d, r))
471
+ X_choi = X_opt.reshape(d, pdim, pdim, pdim, pdim)
472
+ X_choi = np.einsum("ijklm->iljmk", X_choi).reshape((d, pdim**2, pdim**2))
473
+ for j in range(d):
474
+ Choi_evals_result[j, :] = np.sort(np.abs(la.eig(X_choi[j])[0]))[::-1]
475
+ Choi_evals_normalized = np.einsum("ij,i -> ij", Choi_evals_result, 1 / la.norm(Choi_evals_result, axis=1, ord=1))
476
+
477
+ df_g_evals = pd.DataFrame(Choi_evals_normalized)
478
+ df_g_evals.rename(index=gate_labels, inplace=True)
479
+ if filename:
480
+ df_g_evals.style.to_latex(
481
+ filename + ".tex",
482
+ column_format="c|*{%i}{c}" % n_evals,
483
+ position_float="centering",
484
+ hrules=True,
485
+ caption="Eigenvalues of the Choi state",
486
+ position="h!",
487
+ )
488
+ return df_g_evals
489
+
490
+
491
+ def dephasing_dist(prob_vec, X_pp):
492
+ """Returns the distance between a given channel and a local dephasing channel with given probabilities
493
+
494
+ Parameters
495
+ ----------
496
+ prob_vec : list[float]
497
+ A list of dephasing probabilities
498
+ X_pp : 3D numpy array
499
+ Array where CPT superoperators in Pauli basis are stacked along the first axis.
500
+
501
+ Returns
502
+ -------
503
+ norm : float
504
+ The norm between the channel difference
505
+
506
+ """
507
+ X_deph = additional_fns.local_dephasing_pp(prob_vec)
508
+ return la.norm(X_pp - X_deph)
509
+
510
+
511
+ def dephasing_probabilities_2q(X_opt_pp, X_ideal_pp):
512
+ """Determines the local dephasing channel parameters which best describe the noise model
513
+ Works for two qubit gates only
514
+
515
+ Parameters
516
+ ----------
517
+ X_opt_pp : 3D numpy array
518
+ Array where reconstructed CPT superoperators in Pauli basis are stacked along the first axis.
519
+ X_opt_pp : 3D numpy array
520
+ Array where target gate CPT superoperators in Pauli basis are stacked along the first axis.
521
+
522
+ Returns
523
+ -------
524
+ dephasing_probs : list[float]
525
+ The two best fit dephasing probabilities
526
+ """
527
+ dephasing_probs = []
528
+ for i in range(X_opt_pp.shape[0]):
529
+ dephasing_probs.append(minimize(dephasing_dist, [0.1, 0.1], args=X_opt_pp[i] @ la.inv(X_ideal_pp[i])).x)
530
+ return dephasing_probs
531
+
532
+
533
+ def bootstrap_errors(K, X, E, rho, mGST_args, bootstrap_samples, weights, gate_labels, target_mdl, parametric=False):
534
+ """Resamples circuit outcomes a number of times and computes GST estimates for each repetition
535
+ All results are then returned in order to compute bootstrap-error bars for GST estimates.
536
+ Parametric bootstrapping uses the estimated gate set to create a newly sampled data set.
537
+ Non-parametric bootstrapping uses the initial dataset and resamples according to the
538
+ corresp. outcome probabilities.
539
+ Each bootstrap run is initialized with the estimated gate set in order to save processing time.
540
+
541
+ Parameters
542
+ ----------
543
+ K : numpy array
544
+ Each subarray along the first axis contains a set of Kraus operators.
545
+ The second axis enumerates Kraus operators for a gate specified by the first axis.
546
+ X : 3D numpy array
547
+ Array where reconstructed CPT superoperators in standard basis are stacked along the first axis.
548
+ E : numpy array
549
+ Current POVM estimate
550
+ rho : numpy array
551
+ Current initial state estimate
552
+ mGST_args : dict[str: misc]
553
+ Arguments with which the run_mGST function was called
554
+ bootstrap_samples : int
555
+ Number of bootstrapping repretitions
556
+ weights : dict[str: float]
557
+ Gate weights used for gauge optimization
558
+ gate_labels : list[int: str]
559
+ The names of gates in the gate set
560
+ target_mdl : pygsti model object
561
+ The target gate set
562
+ parametric : bool
563
+ If set to True, parametric bootstrapping is used, else non-parametric bootstrapping. Default: False
564
+
565
+ Returns
566
+ -------
567
+ X_array : numpy array
568
+ Array containing all estimated gate tensors of different bootstrapping repretitions along first axis
569
+ E_array : numpy array
570
+ Array containing all estimated POVM tensors of different bootstrapping repretitions along first axis
571
+ rho_array : numpy array
572
+ Array containing all estimated initial states of different bootstrapping repretitions along first axis
573
+ df_g_array : numpy array
574
+ Contains gate quality measures of bootstrapping repetitions
575
+ df_o_array : numpy array
576
+ Contains SPAM and other quality measures of bootstrapping repetitions
577
+
578
+ """
579
+ ns = Namespace(**mGST_args)
580
+ if parametric:
581
+ y = np.real(
582
+ np.array([[E[i].conj() @ low_level_jit.contract(X, j) @ rho for j in ns.J] for i in range(ns.n_povm)])
583
+ )
584
+ else:
585
+ y = ns.y
586
+ X_array = np.zeros((bootstrap_samples, *X.shape)).astype(complex)
587
+ E_array = np.zeros((bootstrap_samples, *E.shape)).astype(complex)
588
+ rho_array = np.zeros((bootstrap_samples, *rho.shape)).astype(complex)
589
+ df_g_list = []
590
+ df_o_list = []
591
+
592
+ for i in range(bootstrap_samples):
593
+ y_sampled = additional_fns.sampled_measurements(y, ns.meas_samples).copy()
594
+ _, X_, E_, rho_, _ = algorithm.run_mGST(
595
+ y_sampled,
596
+ ns.J,
597
+ ns.l,
598
+ ns.d,
599
+ ns.r,
600
+ ns.rK,
601
+ ns.n_povm,
602
+ ns.bsize,
603
+ ns.meas_samples,
604
+ method=ns.method,
605
+ max_inits=ns.max_inits,
606
+ max_iter=0,
607
+ final_iter=ns.final_iter,
608
+ threshold_multiplier=ns.threshold_multiplier,
609
+ target_rel_prec=ns.target_rel_prec,
610
+ init=[K, E, rho],
611
+ testing=False,
612
+ )
613
+
614
+ X_opt, E_opt, rho_opt = gauge_opt(X_, E_, rho_, target_mdl, weights)
615
+ df_g, df_o = report(X_opt, E_opt, rho_opt, ns.J, y_sampled, target_mdl, gate_labels)
616
+ df_g_list.append(df_g.values)
617
+ df_o_list.append(df_o.values)
618
+
619
+ X_opt_pp, E_opt_pp, rho_opt_pp = compatibility.std2pp(X_opt, E_opt, rho_opt)
620
+
621
+ X_array[i, :] = X_opt_pp
622
+ E_array[i, :] = E_opt_pp
623
+ rho_array[i, :] = rho_opt_pp
624
+
625
+ return (X_array, E_array, rho_array, np.array(df_g_list), np.array(df_o_list))
626
+
627
+
628
+ def job_counts_to_mGST_format(self, result_dict):
629
+ """Turns the dictionary of outcomes obtained from qiskit backend
630
+ into the format which is used in mGST
631
+
632
+ Parameters
633
+ ----------
634
+ result_dict: (dict of str: int)
635
+
636
+ Returns
637
+ -------
638
+ y : numpy array
639
+ 2D array of measurement outcomes for sequences in J;
640
+ Each column contains the outcome probabilities for a fixed sequence
641
+
642
+ """
643
+ basis_dict_list = []
644
+ for result in result_dict:
645
+ # Translate dictionary entries of bitstring on the full system to the decimal representation of bitstrings on the active qubits
646
+ basis_dict = {entry: int("".join([entry[::-1][i] for i in self.qubits][::-1]), 2) for entry in result}
647
+ # Sort by index:
648
+ basis_dict = dict(sorted(basis_dict.items(), key=lambda item: item[1]))
649
+ basis_dict_list.append(basis_dict)
650
+ y = []
651
+ for i in range(len(result_dict)):
652
+ row = [result_dict[i][key] for key in basis_dict_list[i]]
653
+ if len(row) < self.num_povm:
654
+ missing_entries = list(np.arange(self.num_povm))
655
+ for given_entry in basis_dict_list[i].values():
656
+ missing_entries.remove(given_entry)
657
+ for missing_entry in missing_entries:
658
+ row.insert(missing_entry, 0) # 0 measurement outcomes in not recorded entry
659
+ y.append(row / np.sum(row))
660
+ y = np.array(y).T
661
+ return y
662
+
663
+
664
+ def save_var_latex(key, value):
665
+ """Saves variables in data file to be read in latex document
666
+ Credit to https://stackoverflow.com/a/66620671
667
+
668
+ Parameters
669
+ ----------
670
+ key : str
671
+ The name of the variable
672
+ value : misc
673
+ The variable content, could be a string as in the name of the experiment, or the number of circuits run (int),
674
+ or any other python variable that needs to be transfered
675
+ """
676
+
677
+ dict_var = {}
678
+
679
+ file_path = os.path.join(os.getcwd(), "report/latex_vars.dat")
680
+
681
+ try:
682
+ with open(file_path, newline="", encoding="ASCII") as file:
683
+ reader = csv.reader(file)
684
+ for row in reader:
685
+ dict_var[row[0]] = row[1]
686
+ except FileNotFoundError:
687
+ pass
688
+
689
+ dict_var[key] = value
690
+
691
+ with open(file_path, "w", encoding="ASCII") as f:
692
+ for key_, value_ in dict_var:
693
+ f.write(f"{key_},{value_}\n")
694
+
695
+
696
+ def number_to_str(number, uncertainty=None, precision=3):
697
+ """Formats a floating point number to a string with the given precision.
698
+
699
+ Parameters:
700
+ number (float): The floating point number to format.
701
+ uncertainty (tuple): The upper and lower values of the confidence interval
702
+ precision (int): The number of decimal places to include in the formatted string.
703
+
704
+ Returns:
705
+ str: The formatted floating point number as a string.
706
+ """
707
+ if uncertainty is None:
708
+ return f"{number:.{precision}f}"
709
+
710
+ return f"{number:.{precision}f} [{uncertainty[1]:.{precision}f},{uncertainty[0]:.{precision}f}]"