iqm-benchmarks 1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of iqm-benchmarks might be problematic. Click here for more details.

Files changed (42) hide show
  1. iqm/benchmarks/__init__.py +31 -0
  2. iqm/benchmarks/benchmark.py +109 -0
  3. iqm/benchmarks/benchmark_definition.py +264 -0
  4. iqm/benchmarks/benchmark_experiment.py +163 -0
  5. iqm/benchmarks/compressive_gst/__init__.py +20 -0
  6. iqm/benchmarks/compressive_gst/compressive_gst.py +1029 -0
  7. iqm/benchmarks/entanglement/__init__.py +18 -0
  8. iqm/benchmarks/entanglement/ghz.py +802 -0
  9. iqm/benchmarks/logging_config.py +29 -0
  10. iqm/benchmarks/optimization/__init__.py +18 -0
  11. iqm/benchmarks/optimization/qscore.py +719 -0
  12. iqm/benchmarks/quantum_volume/__init__.py +21 -0
  13. iqm/benchmarks/quantum_volume/clops.py +726 -0
  14. iqm/benchmarks/quantum_volume/quantum_volume.py +854 -0
  15. iqm/benchmarks/randomized_benchmarking/__init__.py +18 -0
  16. iqm/benchmarks/randomized_benchmarking/clifford_1q.pkl +0 -0
  17. iqm/benchmarks/randomized_benchmarking/clifford_2q.pkl +0 -0
  18. iqm/benchmarks/randomized_benchmarking/clifford_rb/__init__.py +19 -0
  19. iqm/benchmarks/randomized_benchmarking/clifford_rb/clifford_rb.py +386 -0
  20. iqm/benchmarks/randomized_benchmarking/interleaved_rb/__init__.py +19 -0
  21. iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py +555 -0
  22. iqm/benchmarks/randomized_benchmarking/mirror_rb/__init__.py +19 -0
  23. iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py +810 -0
  24. iqm/benchmarks/randomized_benchmarking/multi_lmfit.py +86 -0
  25. iqm/benchmarks/randomized_benchmarking/randomized_benchmarking_common.py +892 -0
  26. iqm/benchmarks/readout_mitigation.py +290 -0
  27. iqm/benchmarks/utils.py +521 -0
  28. iqm_benchmarks-1.3.dist-info/LICENSE +205 -0
  29. iqm_benchmarks-1.3.dist-info/METADATA +190 -0
  30. iqm_benchmarks-1.3.dist-info/RECORD +42 -0
  31. iqm_benchmarks-1.3.dist-info/WHEEL +5 -0
  32. iqm_benchmarks-1.3.dist-info/top_level.txt +2 -0
  33. mGST/LICENSE +21 -0
  34. mGST/README.md +54 -0
  35. mGST/additional_fns.py +962 -0
  36. mGST/algorithm.py +733 -0
  37. mGST/compatibility.py +238 -0
  38. mGST/low_level_jit.py +694 -0
  39. mGST/optimization.py +349 -0
  40. mGST/qiskit_interface.py +282 -0
  41. mGST/reporting/figure_gen.py +334 -0
  42. mGST/reporting/reporting.py +710 -0
mGST/additional_fns.py ADDED
@@ -0,0 +1,962 @@
1
+ """
2
+ Utility functions used by mGST modules
3
+ """
4
+
5
+ import os
6
+ import random
7
+ import warnings
8
+
9
+ import numpy as np
10
+ import numpy.linalg as la
11
+ from scipy.linalg import expm, qr
12
+
13
+ from mGST.low_level_jit import Mp_norm_lower, MVE_lower, contract, local_basis
14
+
15
+
16
+ def transp(dim1, dim2):
17
+ """Superoperator of a map that performs the transpose operation
18
+
19
+ Parameters
20
+ ----------
21
+ dim1 : int
22
+ First dimension of the matrix to be transposed
23
+ dim2 : int
24
+ Second dimension of the matrix to be transposed
25
+
26
+ Returns
27
+ -------
28
+ T: numpy array
29
+ Transpose superoperator that turns a dim1xdim2 matrix into a dim2xdim1 matrix
30
+
31
+ Notes:
32
+ The projection is done with respect to the canonical metrik.
33
+ """
34
+ id1 = np.eye(dim1)
35
+ id2 = np.eye(dim2)
36
+ T = np.einsum("il,jk->ijkl", id2, id1).reshape(dim1 * dim2, dim1 * dim2)
37
+ return T
38
+
39
+
40
+ def randpsd(n, normalized="True"):
41
+ """Generate a random positive semidefinite square matrix
42
+
43
+ Parameters
44
+ ----------
45
+ n : int
46
+ Number of matrix entries
47
+ normalized : {Ture, False}, optional
48
+ Controls if the output is trace normalized, defaults to "True"
49
+
50
+ Returns
51
+ -------
52
+ mat: numpy array
53
+ Random positive definite square matrix
54
+
55
+ Notes:
56
+ Eigenvalues are drawn uniformly from the interval [0,1);
57
+ The basis is generated by diagonalizing a random hermitian matrix (see randHerm function).
58
+ """
59
+ dim = int(np.sqrt(n))
60
+ H = randHerm(dim)
61
+ U, _, _ = np.linalg.svd(H)
62
+ evals = np.random.random_sample(dim)
63
+ if normalized == "True":
64
+ evals /= np.sum(evals)
65
+ mat = np.dot(np.dot(U, np.diag(evals)), U.T.conj())
66
+ mat = mat.reshape(-1)
67
+ return mat
68
+
69
+
70
+ def randvec(n):
71
+ """Generate vector with real and imaginary part drawn from the normal distribution
72
+
73
+ Parameters
74
+ ----------
75
+ n : int
76
+ Number of elements for the random vector
77
+
78
+ Returns
79
+ -------
80
+ g: 1D numpy array
81
+ Length n vector with complex entries whose real and imaginary part is independently drawn
82
+ from the normal distribution with mean 0 and variance 1.
83
+ """
84
+ # randn(n) produces a random vector of length n with mean 0 and variance 1
85
+ g = np.random.randn(n) + 1j * np.random.randn(n)
86
+ g = g / np.linalg.norm(g)
87
+ return g
88
+
89
+
90
+ def randHerm(n):
91
+ """Generate random square hermitian matrix
92
+
93
+ Parameters
94
+ ----------
95
+ n : int
96
+ Matrix dimension
97
+
98
+ Returns
99
+ -------
100
+ G: 2D numpy array
101
+ Random hermitian matrix normalized in spectral norm
102
+
103
+ Notes:
104
+ First a matrix with random complex entries is generated (see randvec function).
105
+ This matrix is then projected onto the space of hermitian
106
+ matrices and normalized in spectral norm.
107
+ """
108
+ G = randvec(n * n).reshape(n, n)
109
+ G = (G + G.T.conj()) / 2
110
+ # ord=2 gives the spectral norm
111
+ G = G / np.linalg.norm(G, ord=2)
112
+ return G
113
+
114
+
115
+ def randHermGS(d, r):
116
+ """Generates random set of operators that are hermiticity preserving
117
+
118
+ Parameters
119
+ ----------
120
+ d : int
121
+ Number of gates
122
+ r : int
123
+ Superoperator dimension of the gates given by the square of the physical dimension
124
+
125
+ Returns
126
+ -------
127
+ X: 3D numpy array
128
+ Array where random hermiticity preserving operators are stacked along the first axis.
129
+
130
+ Notes:
131
+ The function randHerm is used to generate random hermitian Choi matrices,
132
+ whose indies are then rearanged to obtain random hermiticity preserving superoperators.
133
+ """
134
+ dim = int(np.sqrt(r))
135
+ X = np.zeros((r, d, r), dtype="complex")
136
+ for i in range(d):
137
+ H = randHerm(r).reshape(dim, dim, dim, dim)
138
+ H = np.einsum("ijkl->jlik", H)
139
+ X[:, i, :] = H.reshape(r, r)
140
+ return X
141
+
142
+
143
+ def randU(n, a=1):
144
+ """Generates random unitary from a random hermitian generator
145
+
146
+ Parameters
147
+ ----------
148
+ n : int
149
+ Matrix dimension of the unitary
150
+ a : float
151
+ Parameter to control the norm of the hermitian generator
152
+
153
+ Returns
154
+ -------
155
+ U: 2D numpy array
156
+ Matrix exponential of random hermitian matrix times the imaginary unit.
157
+ """
158
+ return expm(1j * a * randHerm(n)).astype(np.complex128)
159
+
160
+
161
+ def randU_Haar(n):
162
+ """Return a Haar distributed random unitary
163
+
164
+ Parameters
165
+ ----------
166
+ n : int
167
+ Matrix dimension of the unitary
168
+
169
+ Returns
170
+ -------
171
+ U: 2D numpy array
172
+ Random unitary matrix distributed according to the Haar measure.
173
+ """
174
+ Z = np.random.randn(n, n) + 1.0j * np.random.randn(n, n)
175
+ [Q, R] = qr(Z)
176
+ D = np.diag(np.diagonal(R) / np.abs(np.diagonal(R)))
177
+ return np.dot(Q, D)
178
+
179
+
180
+ def randKrausSet(d, r, rK, a=1):
181
+ """Generates random set of Kraus operators
182
+
183
+ Parameters
184
+ ----------
185
+ d : int
186
+ Number of gates
187
+ r : int
188
+ Superoperator dimension of the gates given by the square of the physical dimension
189
+ rK : int
190
+ Number of Kraus operators per gate ("Kraus rank")
191
+ a : float
192
+ Parameter to control the norm of the hermitian generator and thereby
193
+ how far the gates are from the identity
194
+
195
+ Returns
196
+ -------
197
+ K: 4D numpy array
198
+ Each subarray along the first axis contains a set of Kraus operators.
199
+ The second axis enumerates Kraus operators for a gate specified by the first axis.
200
+
201
+ Notes:
202
+ Let pdim be the physical dimension. Then a set of Kraus operators is generated
203
+ by taking the first pdim columns of a random unitary of size rK*pdim.
204
+ The random unitary is generated from a random hermitian matrix.
205
+ """
206
+ pdim = int(np.sqrt(r))
207
+ K = np.zeros((d, rK, pdim, pdim)).astype(np.complex128)
208
+ for i in range(d):
209
+ K[i, :, :, :] += randU(pdim * rK, a)[:, :pdim].reshape(rK, pdim, pdim)
210
+ return K
211
+
212
+
213
+ def randKrausSet_Haar(d, r, rK):
214
+ """Generates random set of Kraus operators
215
+
216
+ Parameters
217
+ ----------
218
+ d : int
219
+ Number of gates
220
+ r : int
221
+ Superoperator dimension of the gates given by the square of the physical dimension
222
+ rK : int
223
+ Number of Kraus operators per gate ("Kraus rank")
224
+
225
+ Returns
226
+ -------
227
+ K: 4D numpy array
228
+ Each subarray along the first axis contains a set of Kraus operators.
229
+ The second axis enumerates Kraus operators for a gate specified by the first axis.
230
+
231
+ Notes:
232
+ Let pdim be the physical dimension. Then a set of Kraus operators is
233
+ generated by taking the first pdim columns of a random unitary of size
234
+ rK*pdim. The random unitary is generated according the the Haar measure.
235
+ """
236
+ pdim = int(np.sqrt(r))
237
+ K = np.zeros((d, rK, pdim, pdim)).astype(np.complex128)
238
+ for i in range(d):
239
+ K[i, :, :, :] += randU_Haar(pdim * rK)[:, :pdim].reshape(rK, pdim, pdim)
240
+ return K
241
+
242
+
243
+ def random_gs(d, r, rK, n_povm):
244
+ """Generates a random gate using the Gaussian unitary ensemble, initial state and POVM
245
+
246
+ Parameters
247
+ ----------
248
+ d : int
249
+ Number of gates
250
+ r : int
251
+ Superoperator dimension of the gates given by the square of the physical dimension
252
+ rK : int
253
+ Number of Kraus operators per gate ("Kraus rank")
254
+ n_povm : int
255
+ Number of POVM-Elements
256
+
257
+ Returns
258
+ -------
259
+ K: 4D numpy array
260
+ Each subarray along the first axis contains a set of Kraus operators.
261
+ The second axis enumerates Kraus operators for a gate specified by the first axis.
262
+ X: 3D numpy array
263
+ Array where random CPT superoperators are stacked along the first axis.
264
+
265
+ Notes:
266
+ The Kraus operators are generated from random unitaries, see function randKrausSet
267
+ """
268
+ K = randKrausSet(d, r, rK).copy()
269
+ X = np.einsum("ijkl,ijnm -> iknlm", K, K.conj()).reshape((d, r, r))
270
+ rho = randpsd(r).copy()
271
+ A = randKrausSet(1, r, n_povm)[0].conj()
272
+ E = np.array([(A[i].T.conj() @ A[i]).reshape(-1) for i in range(n_povm)]).copy()
273
+ return K, X, E, rho
274
+
275
+
276
+ def random_gs_Haar(d, r, rK, n_povm):
277
+ """Generates a random gate set with gates from Haar random unitaries, initial state and POVM
278
+
279
+ Parameters
280
+ ----------
281
+ d : int
282
+ Number of gates
283
+ r : int
284
+ Superoperator dimension of the gates given by the square of the physical dimension
285
+ rK : int
286
+ Number of Kraus operators per gate ("Kraus rank")
287
+ n_povm : int
288
+ Number of POVM-Elements
289
+
290
+ Returns
291
+ -------
292
+ K: 4D numpy array
293
+ Each subarray along the first axis contains a set of Kraus operators.
294
+ The second axis enumerates Kraus operators for a gate specified by the first axis.
295
+ X: 3D numpy array
296
+ Array where random CPT superoperators are stacked along the first axis.
297
+
298
+ Notes:
299
+ The Kraus operators are generated from Haar random unitaries, see function randKrausSet_Haar
300
+ """
301
+ K = randKrausSet_Haar(d, r, rK).copy()
302
+ X = np.einsum("ijkl,ijnm -> iknlm", K, K.conj()).reshape((d, r, r))
303
+ rho = randpsd(r).copy()
304
+ A = randKrausSet_Haar(1, r, n_povm)[0].conj()
305
+ E = np.array([(A[i].T.conj() @ A[i]).reshape(-1) for i in range(n_povm)]).copy()
306
+ return K, X, E, rho
307
+
308
+
309
+ def perturbed_target_init(X_target, rK):
310
+ """Generates a small random noise gate around the identity and applies it to the target gate
311
+ The reason for using this gate as an initialization as opposed the the target gate itself, is
312
+ that the non-dominant Kraus operators we start with are now not zero, but small random matrices.
313
+ Observations show that with this start, the non-dominant Kraus operators converge faster.
314
+
315
+ Parameters
316
+ ----------
317
+ X_target : 3D numpy array
318
+ Current gate estimate
319
+ rK : int
320
+ Number of Kraus operators per gate ("Kraus rank") for the initialization
321
+
322
+ Returns
323
+ -------
324
+ K_init: 4D numpy array
325
+ Each subarray along the first axis contains a set of Kraus operators.
326
+ The second axis enumerates Kraus operators for a gate specified by the first axis.
327
+ """
328
+ d, r, _ = X_target.shape
329
+ pdim = int(np.sqrt(r))
330
+ K_perturb = randKrausSet(d, r, rK, a=0.1)
331
+ X_perturb = np.einsum("ijkl,ijnm -> iknlm", K_perturb, K_perturb.conj()).reshape((d, r, r))
332
+ X_init = np.einsum("ikl,ilm ->ikm", X_perturb, X_target)
333
+ K_init = Kraus_rep(X_init, d, pdim, rK)
334
+ return K_init
335
+
336
+
337
+ def basis(size, index):
338
+ """Creates standard basis vectors
339
+
340
+ Parameters
341
+ ----------
342
+ size : int
343
+ Vector space dimension
344
+ index : int
345
+ Index of basis vector
346
+
347
+ Returns
348
+ -------
349
+ vec: 1D numpy array
350
+ Vector with entry 1 at position given by index and zeros elsewhere
351
+ """
352
+ vec = np.zeros(size)
353
+ vec[index] = 1.0
354
+ return vec
355
+
356
+
357
+ def depol(pdim, p):
358
+ """Kraus representation of depolarizing channel
359
+
360
+ Parameters
361
+ ----------
362
+ pdim : int
363
+ Physical dimension
364
+ p : float
365
+ Error probability
366
+
367
+ Returns
368
+ -------
369
+ K_depol: 4D numpy array
370
+ Each entry along the first axis contains a set of Kraus operators.
371
+ The second axis enumerates Kraus operators for a gate specified by the first axis.
372
+
373
+ Notes:
374
+ The depolarizing channel is defined as L(rho) = (1-p)*rho + p/pdim*Id.
375
+ """
376
+ phi_plus = np.sum([np.kron(basis(pdim, i), basis(pdim, i)) for i in range(pdim)], axis=0)
377
+ choi_state = p / pdim * np.eye(pdim**2) + (1 - p) * np.kron(phi_plus, phi_plus.reshape(pdim**2, 1))
378
+ K_depol = la.cholesky(choi_state)
379
+ return K_depol.reshape(pdim, pdim, pdim**2).swapaxes(0, 2)
380
+
381
+
382
+ def varassign(v, X, E, rho, argument):
383
+ """Assigns input to specified gate set variables
384
+
385
+ Parameters
386
+ ----------
387
+ v : numpy array
388
+ New set of variables
389
+ X : numpy array
390
+ Current gate estimate
391
+ E : numpy array
392
+ Current POVM estimate
393
+ rho : numpy array
394
+ Current initial state estimate
395
+ argument : {"X", "E", "rho"}
396
+ Which part of the gate set is updated
397
+
398
+ Returns
399
+ -------
400
+ [.,.,.]: 3 element list
401
+ List in the order [X,E,rho] where either X, E or rho is repaced by v,
402
+ depending on the input to the "arguement" variable
403
+ """
404
+ if argument in ["X", "K"]:
405
+ return [v, E, rho]
406
+ if argument == "E":
407
+ return [X, v, rho]
408
+
409
+ return [X, E, v]
410
+
411
+
412
+ def batch(y, J, bsize):
413
+ """Returns random batch of sequences and corresponding measurements
414
+
415
+ Parameters
416
+ ----------
417
+ y : numpy array
418
+ 2D array of measurement outcomes for sequences in J;
419
+ Each column contains the outcome probabilities for a fixed sequence
420
+ J : numpy array
421
+ 2D array where each row contains the gate indices of a gate sequence
422
+ bsize : int
423
+ Size of the batch (number of sequences)
424
+
425
+ Returns
426
+ -------
427
+ y_b : numpy array
428
+ Randomly subsampled columns of y
429
+ J_b : numpy array
430
+ Randomly subsampled rows of J in accordance the the columns selected in y_b
431
+ """
432
+ if y.shape[1] <= bsize:
433
+ return y, J
434
+ if bsize < 1: # if batch size is given as ratio
435
+ bsize = int(bsize * len(J) // 1)
436
+ batchmask = np.array([1] * bsize + [0] * (len(J) - bsize))
437
+ np.random.shuffle(batchmask)
438
+ J_b = J[batchmask == 1]
439
+ y_b = y[:, batchmask == 1]
440
+ return y_b, J_b
441
+
442
+
443
+ def F_avg_X(X, K):
444
+ """Returns the average gate fidelity between two gates given by a
445
+ superoperator and a set of Kraus operators
446
+
447
+ Parameters
448
+ ----------
449
+ X : 2D numpy array
450
+ CPT superoperator of size (pysical dimension**2) x (pysical dimension **2)
451
+ K : 3D numpy array
452
+ Array of Kraus operators with size (Kraus rank) x (pysical dimension) x (pysical dimension)
453
+
454
+ Returns
455
+ -------
456
+ Fid : float
457
+ Average gate fidelity
458
+
459
+ Notes:
460
+ Not gauge optimization involved; Average gate fidelity is gauge dependent.
461
+ """
462
+ pdim = K.shape[2]
463
+ d = K.shape[0]
464
+ Fid_list = []
465
+ for k in range(d):
466
+ choi_inner_prod = np.einsum("imjl,pml,pij", X[k].reshape(pdim, pdim, pdim, pdim), K[k], K[k].conj())
467
+
468
+ unitality_term = np.einsum("imkk,pml,pil", X[k].reshape(pdim, pdim, pdim, pdim), K[k], K[k].conj())
469
+ Fid = (choi_inner_prod + unitality_term) / pdim / (pdim + 1)
470
+ Fid_list.append(Fid)
471
+ return np.average(np.real(Fid_list)), np.real(Fid_list)
472
+
473
+
474
+ def MVE(X_true, E_true, rho_true, X, E, rho, d, length, n_povm, samples=10000):
475
+ """Mean varation error between the outputs of two gate sets on random sequences
476
+
477
+ Parameters
478
+ ----------
479
+ X_true : numpy array
480
+ Target gates
481
+ E_true : numpy array
482
+ Target POVM
483
+ rho_true : numpy array
484
+ Target initial state
485
+ X : numpy array
486
+ Current gate estimate
487
+ E : numpy array
488
+ Current POVM estimate
489
+ rho : numpy array
490
+ Current initial state estimate
491
+ d : int
492
+ Number of different gates in the gate set
493
+ length : int
494
+ Length of the test sequences
495
+ n_povm : int
496
+ Number of POVM elements
497
+ samples : int
498
+ Number of random gate sequences over which the mean variation error is computed
499
+
500
+ Returns
501
+ -------
502
+ MVE : float
503
+ Mean varaition error
504
+
505
+ Notes:
506
+ Sequences are drawn without replacement from initally d**l possibilities.
507
+ For each sequence the total variation error of the two probability distribution
508
+ over the POVM elements is computed. Afterwards the meean over these total
509
+ variation errors is returned.
510
+ """
511
+ if samples == "all" or np.log(samples) / np.log(d) > length:
512
+ J = np.random.randint(0, d, length * d**length).reshape(d**length, length)
513
+ else:
514
+ J = np.random.randint(0, d, length * samples).reshape(samples, length)
515
+ return MVE_lower(X_true, E_true, rho_true, X, E, rho, J, n_povm)
516
+
517
+
518
+ def Mp_norm(X_true, E_true, rho_true, X, E, rho, d, length, n_povm, p, samples=10000):
519
+ """Mean of the p-norm deviation between the outputs of two gate sets on random sequences
520
+
521
+ Parameters
522
+ ----------
523
+ X_true : numpy array
524
+ Target gates
525
+ E_true : numpy array
526
+ Target POVM
527
+ rho_true : numpy array
528
+ Target initial state
529
+ X : numpy array
530
+ Current gate estimate
531
+ E : numpy array
532
+ Current POVM estimate
533
+ rho : numpy array
534
+ Current initial state estimate
535
+ d : int
536
+ Number of different gates in the gate set
537
+ length : int
538
+ Length of the test sequences
539
+ n_povm : int
540
+ Number of POVM elements
541
+ p : int
542
+ Defines the l_p - norm that is used to compare probability distributions
543
+ samples : int
544
+ Number of random gate sequences over which the mean variation error is computed
545
+
546
+ Returns
547
+ -------
548
+ MPE : float
549
+ Mean l_p - norm error
550
+
551
+ Notes:
552
+ Sequences are drawn without replacement from initally d**l possibilities.
553
+ For each sequence the l_p - norm error of the two probability distribution
554
+ over the POVM elements is computed. Afterwards the meean over these total
555
+ variation errors is returned.
556
+
557
+ """
558
+ if samples == "all" or np.log(samples) / np.log(d) > length:
559
+ J = np.random.randint(0, d, length * d**length).reshape(d**length, length)
560
+ else:
561
+ J = np.random.randint(0, d, length * samples).reshape(samples, length)
562
+ return Mp_norm_lower(X_true, E_true, rho_true, X, E, rho, J, n_povm, p)
563
+
564
+
565
+ def Kraus_rep(X, d, pdim, rK):
566
+ """Compute the Kraus representations for all gates in the gate set
567
+
568
+ Parameters
569
+ ----------
570
+ X : numpy array
571
+ Current gate estimate
572
+ d : int
573
+ Number of gates
574
+ pdim : int
575
+ Physical dimension
576
+ rK : int
577
+ Target Kraus rank
578
+
579
+ Returns
580
+ -------
581
+ K: 4D numpy array
582
+ Each subarray along the first axis contains a set of Kraus operators.
583
+ The second axis enumerates Kraus operators for a gate specified by the first axis.
584
+
585
+ Notes:
586
+ The Kraus representation is obtained from a singular value decomposition
587
+ of the Choi matrix. If parameter rK is smaller than the true rank of the
588
+ Choi matrix, a rank rK approximation is used. Approximations are only CPT
589
+ in the special case where the original gate was already of rank <= rK.
590
+ """
591
+ X_choi = X.reshape(d, pdim, pdim, pdim, pdim)
592
+ X_choi = np.einsum("ijklm->iljmk", X_choi).reshape((d, pdim**2, pdim**2))
593
+ K = np.zeros((d, rK, pdim, pdim)).astype(np.complex128)
594
+ for i in range(d):
595
+ w, v = la.eigh(X_choi[i])
596
+ if np.min(w) < -1e-12:
597
+ raise ValueError("Choi Matrix is not positive definite within tolerance 1e-12")
598
+ K[i] = np.einsum("ijk->kji", (v[:, -rK:] @ np.diag(np.sqrt(np.abs(w[-rK:])))).reshape(pdim, pdim, rK))
599
+ # Trace normalization of Choi Matrix
600
+ K[i] = K[i] / np.sqrt(np.einsum("ijk,ijk", K[i], K[i].conj())) * np.sqrt(pdim)
601
+ return np.array(K)
602
+
603
+
604
+ def sampled_measurements(y, n):
605
+ """Compute finite sample estimates of input probabilities
606
+
607
+ Parameters
608
+ ----------
609
+ y : numpy array
610
+ 2D array of measurement outcomes for different sequences;
611
+ Each column contains the outcome probabilities for a fixed sequence
612
+ n : Number of samples for each experiment
613
+
614
+ Returns
615
+ -------
616
+ y_sampled : numpy array
617
+ 2D array of sampled measurement outcomes for different sequences;
618
+ Each column contains the sampled outcome probabilities for a fixed sequence
619
+
620
+ Notes:
621
+ The entries of each column of y form a probability distribution.
622
+ From this distribution n random samples are drawn which give estimates for
623
+ the initial probabilities. This simulates finite sample size data.
624
+ """
625
+ n_povm, m = y.shape
626
+ if any(y.reshape(-1) > 1) or any(y.reshape(-1) < 0):
627
+ y_new = np.maximum(np.minimum(y, 1), 0)
628
+ if np.sum(np.abs(y_new - y)) > 1e-6:
629
+ warnings.warn(
630
+ f"Warning: Probabilities capped to interval [0,1]",
631
+ f"l1-difference to input:%f" % np.sum(np.abs(y_new - y)),
632
+ )
633
+ y = y_new
634
+ rng = np.random.default_rng()
635
+ y_sampled = np.zeros(y.shape)
636
+ for i in range(m):
637
+ y_sampled[:, i] = rng.multinomial(n, [y[o, i] for o in range(n_povm)]) / n
638
+ return y_sampled
639
+
640
+
641
+ def random_len_seq(d, min_l, max_l, N):
642
+ """Generate random gate sequence instructions which contain sequences of different lengths,
643
+ the lengths are drawn uniformly at random from (min_l, ..., max_l)
644
+
645
+ Parameters
646
+ ----------
647
+ d : int
648
+ Number of gates
649
+ min_l : int
650
+ Minimum sequence length
651
+ max_l : int
652
+ Maximum sequence length
653
+ N : int
654
+ Number of random sequences
655
+
656
+ Returns
657
+ -------
658
+ J : numpy array
659
+ 2D array where each row contains the gate indices of a gate sequence
660
+
661
+ """
662
+ seq_lengths = np.random.randint(min_l, max_l + 1, N)
663
+ J = []
664
+ for length in seq_lengths:
665
+ j_curr = np.random.randint(0, d, length)
666
+ J.append(list(np.pad(j_curr, (0, max_l - length), "constant", constant_values=-1)))
667
+ return np.array(J)
668
+
669
+
670
+ def generate_fids(d, length, m_f):
671
+ """Generate random fiducial sequencecs
672
+
673
+ Parameters
674
+ ----------
675
+ d : int
676
+ Number of gates
677
+ length : int
678
+ Total sequence length
679
+ m_f : int
680
+ Number of random fiducial sequences
681
+
682
+ Returns
683
+ -------
684
+ J_fid : numpy array
685
+ Sequence list of only the fiducial sequences
686
+ J_fid2 : numpy array
687
+ Sequence list for all combinations of two concatenated fiducial sequences
688
+ J_meas : numpy array
689
+ Sequence list for all combinations of fiducials seuqneces with a gate
690
+ in between: fiducial1 -- gate -- fiducial2
691
+ """
692
+ fid_length = (length - 1) // 2
693
+ fid = random.sample(range(d**fid_length), m_f).copy()
694
+ J_fid = [list(local_basis(ind, d, fid_length)) for ind in fid]
695
+ J_fid2 = np.array([seqL + seqR for seqL in J_fid for seqR in J_fid])
696
+ J_meas = np.zeros((d, m_f**2, length), dtype="int")
697
+ for k in range(d):
698
+ J_meas[k] = np.array([seqL + [k] + seqR for seqL in J_fid for seqR in J_fid])
699
+ return np.array(J_fid), J_fid2, J_meas
700
+
701
+
702
+ def is_positive(X, E, rho):
703
+ """Print the results for checks whether a gate set is physical.
704
+
705
+ This includes all positivity and normalization constraints.
706
+
707
+ Parameters
708
+ ----------
709
+ X : numpy array
710
+ Gate set
711
+ E : numpy array
712
+ POVM
713
+ rho : numpy array
714
+ Initial state
715
+ """
716
+ d, r, _ = X.shape
717
+ pdim = int(np.sqrt(r))
718
+ n_povm = E.shape[0]
719
+
720
+ X_choi = X.reshape(d, pdim, pdim, pdim, pdim)
721
+ X_choi = np.einsum("ijklm->iljmk", X_choi).reshape((d, r, r))
722
+
723
+ eigvals = np.array([la.eigvals(X_choi[i]) for i in range(d)])
724
+ partial_traces = np.einsum("aiikl -> akl", X.reshape(d, pdim, pdim, pdim, pdim))
725
+ povm_eigvals = np.array([la.eigvals(E[i].reshape(pdim, pdim)) for i in range(n_povm)])
726
+ if np.any(np.imag(eigvals.reshape(-1) > 1e-10)):
727
+ print("Gates are not all hermitian.")
728
+ else:
729
+ for i in range(d):
730
+ print(f"Gate %i positive:" % i, np.all(eigvals[i, :] > -1e-10))
731
+ print(f"Gate %i trace preserving:" % i, la.norm(partial_traces[i] - np.eye(pdim)) < 1e-10)
732
+ print(f"Initial state positive:", np.all(la.eigvals(rho.reshape(pdim, pdim)) > -1e-10))
733
+ print(f"Initial state normalization:", np.trace(rho.reshape(pdim, pdim)))
734
+ print(
735
+ "fPOVM valid:",
736
+ np.all(
737
+ [
738
+ la.norm(np.sum(E, axis=0).reshape(pdim, pdim) - np.eye(pdim)) < 1e-10,
739
+ np.all(povm_eigvals.reshape(-1) > -1e-10),
740
+ ]
741
+ ),
742
+ )
743
+
744
+
745
+ def tvd(X, E, rho, J, y_data):
746
+ """Return the total variation distance between model probabilities for the circuits in J
747
+ and the probabilities given by y_data.
748
+
749
+ Parameters
750
+ ----------
751
+ X : numpy array
752
+ Gate set
753
+ E : numpy array
754
+ POVM
755
+ rho : numpy array
756
+ Initial state
757
+ y_data : numpy array
758
+ 2D array of measurement outcomes for sequences in J;
759
+ Each column contains the outcome probabilities for a fixed sequence
760
+ J : numpy array
761
+ 2D array where each row contains the gate indices of a gate sequence
762
+ bsize : int
763
+ Size of the batch (number of sequences)
764
+
765
+ Returns
766
+ -------
767
+ dist : float
768
+ The total variation distance.
769
+ """
770
+ n_povm = y_data.shape[0]
771
+ y_model = np.real(np.array([[E[i].conj() @ contract(X, j) @ rho for j in J] for i in range(n_povm)]))
772
+ dist = la.norm(y_model - y_data, ord=1) / 2
773
+ return dist
774
+
775
+
776
+ def random_seq_design(
777
+ d, l_min, l_cut, l_max, N_short, N_long
778
+ ): # Draws without replacement but ineficiently (not working for sequence length > 24)
779
+ """Generate a set of random sequences ith given lengths
780
+ This sequence lengths (circuit depths) are chosen for a mix of very short sequences (better convergence) and some
781
+ slightly longer sequences to reduce the generalization error.
782
+ This sequence design is heuristic and intended for coarse and fast estimates. For very accurate estimates at the
783
+ cost of higher measurement effort it is recommended to use pyGSTi with long sequence GST.
784
+
785
+ Parameters
786
+ ----------
787
+ d : int
788
+ The number of gates in the gate set
789
+ l_min : int
790
+ Minimum sequence lenght
791
+ l_cut : int
792
+ Cutoff sequence lenght: N_short sequences are equally distributed among lengths l_min < l < l_cut
793
+ l_max : int
794
+ N_long sequences are equally distributed among lengths l_cut + 1 < l < l_max. Currently l_max < 24 only.
795
+ N_short : int
796
+ Number of short sequences
797
+ N_long : int
798
+ Number of long sequences
799
+
800
+ Returns
801
+ -------
802
+ J : numpy array
803
+ 2D array where each row contains the gate indices of a gate sequence
804
+
805
+ """
806
+ # Open problems:
807
+ # - Change randomness to work with longer sequences;
808
+ # - Handle case where Number of sequences is smaller than the available lengths
809
+ if l_max >= 24:
810
+ raise ValueError("Currently only sequences lenghts < 24 are supported.")
811
+
812
+ J = np.array([-np.ones(l_max)])
813
+ # Short sequences:
814
+ seq_counts = []
815
+ N_remaining = N_short
816
+ for l in range(l_min, l_cut + 1):
817
+ seq_counts.append(int(np.min([np.floor(N_remaining / (l_cut + 1 - l)), d**l])))
818
+ if seq_counts[-1] > 0:
819
+ ind_curr = np.array(random.sample(range(d**l), seq_counts[-1]))
820
+ J_curr = np.array(
821
+ [np.pad(local_basis(ind, d, l), (0, l_max - l), "constant", constant_values=-1) for ind in ind_curr]
822
+ )
823
+ J = np.concatenate((J, J_curr), axis=0)
824
+ N_remaining = N_short - 1 - np.sum(seq_counts)
825
+ if N_remaining > 0:
826
+ print(
827
+ "Number of possible sequences without replacement for the given sequence\
828
+ length range is lower than the desired total number of sequences"
829
+ )
830
+
831
+ # Long sequences:
832
+ seq_counts = []
833
+ N_remaining = N_long
834
+ for l in range(l_cut + 1, l_max + 1):
835
+ seq_counts.append(int(np.min([np.floor(N_remaining / (l_max + 1 - l)), d**l])))
836
+ if seq_counts[-1] > 0:
837
+ ind_curr = np.array(random.sample(range(d**l), seq_counts[-1]))
838
+ J_curr = np.array(
839
+ [np.pad(local_basis(ind, d, l), (0, l_max - l), "constant", constant_values=-1) for ind in ind_curr]
840
+ )
841
+ J = np.concatenate((J, J_curr), axis=0)
842
+ N_remaining = N_long - np.sum(seq_counts)
843
+ return J.astype(int)
844
+
845
+
846
+ def n_params(pdim, d, rK, n_povm):
847
+ """Returns the number of free parameters in a gate set
848
+ Unitary gauge parameters and unitary freedom in representing the Kraus operators and the POVM elements are
849
+ subtracted. For details, see
850
+ https://docserv.uni-duesseldorf.de/servlets/DerivateServlet/Derivate-71735/dissertation_brieger-2%281%29.pdf
851
+
852
+ Parameters
853
+ ----------
854
+ pdim : int
855
+ The physical dimension, i.e. 2^(#qubits)
856
+ d : int
857
+ The number of gates in the gate set
858
+ rK : int
859
+ The Kraus rank of the gates
860
+ n_povm : int
861
+ The number of POVM elements; For computational basis measurements n_povm = pdim
862
+
863
+ Returns
864
+ -------
865
+ n_params : int
866
+ The number of free parameters
867
+ """
868
+ # Order: gates + stat + povm - povm_freedom - gauge freedom - Kraus freedom
869
+ return d * (pdim**2 * (2 * rK - 1) - rK**2) + pdim * (n_povm * pdim - pdim)
870
+
871
+
872
+ def multikron(matrix_array):
873
+ """Computes the Kronecker product of all matrices in an array
874
+ The order is matrix_array[0] otimes matrix_array[1] otimes ...
875
+
876
+ Parameters
877
+ ----------
878
+ matrix_array: numpy array
879
+ An array containing matrices
880
+
881
+ Returns
882
+ -------
883
+ res: numpy array
884
+ The resulting tensor product (potentially a very large array)
885
+ """
886
+ res = matrix_array[0]
887
+ for i in range(1, matrix_array.shape[0]):
888
+ res = np.kron(res, matrix_array[i])
889
+ return res
890
+
891
+
892
+ def local_dephasing_pp(prob_vec):
893
+ """Returns the tensor product of single qubit dephasing channels in Pauli basis
894
+
895
+ Parameters
896
+ ----------
897
+ prob_vec : list[float]
898
+ A list of dephasing probabilities
899
+
900
+ Returns
901
+ -------
902
+ D_final : numpy array
903
+ Process matrix of the tensor product of local dephasing channels
904
+
905
+ """
906
+ D_loc_array = np.array([[[1, 0, 0, 0], [0, 1 - 2 * p, 0, 0], [0, 0, 1 - 2 * p, 0], [0, 0, 0, 1]] for p in prob_vec])
907
+ D_final = multikron(D_loc_array)
908
+ return D_final
909
+
910
+
911
+ def outcome_probs_from_files(folder_name, basis_dict, n_povm, N):
912
+ """Searches a specified folder for .txt files containing circuit outcomes and combines the results
913
+ Each text file needs to have line in the following format:
914
+ 1: 1,0,1,0,1,1,0,0,0
915
+ 2: 1,1,1,1,0,1,1,1,1
916
+ Here the first number specifies the circuit and basis outcome of each shot is written to the right.
917
+
918
+ Parameters
919
+ ----------
920
+ folder_name : str
921
+ The relative or absolute name of the data folder
922
+ basis_dict : dict[str: int]
923
+ Translation between label for each shot in the .txt files and the numbering of the POVM element.
924
+ Example (two qubits): {"00": 0, "01": 1, "10": 2, "11": 3,}
925
+ n_povm : int
926
+ The number of POVM elements in the data set
927
+ N : int
928
+ The number of circuits in the data set
929
+
930
+ Returns
931
+ -------
932
+ y : numpy array
933
+ 2D array of measurement outcomes for all measured sequences;
934
+ Each column contains the outcome probabilities for a fixed sequence
935
+ avg_counts : int
936
+ The average number of shots per circuit after combining data sets
937
+ """
938
+ filenames = os.listdir(path=folder_name)
939
+ datafile_names = [s for s in filenames if ".txt" in s]
940
+
941
+ # array of outcome probabilities:
942
+ y = np.zeros((n_povm, N))
943
+ sample_counts = np.zeros((len(datafile_names), N))
944
+ k = 0
945
+ for filename in datafile_names:
946
+ with open(folder_name + "/" + filename, encoding="ASCII") as file:
947
+ i = 0
948
+ for line in file:
949
+ # removing row index at beginning and \n mark at end
950
+ line_entries = line.rstrip().split(": ")
951
+ # splitting result string at commas
952
+ result_list = line_entries[1:][0].split(",")
953
+ # translate each measurement result onto basis index
954
+ for entry in result_list:
955
+ j = basis_dict[entry]
956
+ y[j, i] += 1
957
+ sample_counts[k, i] = len(result_list)
958
+ i += 1
959
+ k += 1
960
+ total_counts = np.sum(sample_counts, axis=0)
961
+ avg_counts = int(np.average(total_counts))
962
+ return y / total_counts, avg_counts