qoro-divi 0.2.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. divi/__init__.py +8 -0
  2. divi/_pbar.py +73 -0
  3. divi/circuits.py +139 -0
  4. divi/exp/cirq/__init__.py +7 -0
  5. divi/exp/cirq/_lexer.py +126 -0
  6. divi/exp/cirq/_parser.py +889 -0
  7. divi/exp/cirq/_qasm_export.py +37 -0
  8. divi/exp/cirq/_qasm_import.py +35 -0
  9. divi/exp/cirq/exception.py +21 -0
  10. divi/exp/scipy/_cobyla.py +342 -0
  11. divi/exp/scipy/pyprima/LICENCE.txt +28 -0
  12. divi/exp/scipy/pyprima/__init__.py +263 -0
  13. divi/exp/scipy/pyprima/cobyla/__init__.py +0 -0
  14. divi/exp/scipy/pyprima/cobyla/cobyla.py +599 -0
  15. divi/exp/scipy/pyprima/cobyla/cobylb.py +849 -0
  16. divi/exp/scipy/pyprima/cobyla/geometry.py +240 -0
  17. divi/exp/scipy/pyprima/cobyla/initialize.py +269 -0
  18. divi/exp/scipy/pyprima/cobyla/trustregion.py +540 -0
  19. divi/exp/scipy/pyprima/cobyla/update.py +331 -0
  20. divi/exp/scipy/pyprima/common/__init__.py +0 -0
  21. divi/exp/scipy/pyprima/common/_bounds.py +41 -0
  22. divi/exp/scipy/pyprima/common/_linear_constraints.py +46 -0
  23. divi/exp/scipy/pyprima/common/_nonlinear_constraints.py +64 -0
  24. divi/exp/scipy/pyprima/common/_project.py +224 -0
  25. divi/exp/scipy/pyprima/common/checkbreak.py +107 -0
  26. divi/exp/scipy/pyprima/common/consts.py +48 -0
  27. divi/exp/scipy/pyprima/common/evaluate.py +101 -0
  28. divi/exp/scipy/pyprima/common/history.py +39 -0
  29. divi/exp/scipy/pyprima/common/infos.py +30 -0
  30. divi/exp/scipy/pyprima/common/linalg.py +452 -0
  31. divi/exp/scipy/pyprima/common/message.py +336 -0
  32. divi/exp/scipy/pyprima/common/powalg.py +131 -0
  33. divi/exp/scipy/pyprima/common/preproc.py +393 -0
  34. divi/exp/scipy/pyprima/common/present.py +5 -0
  35. divi/exp/scipy/pyprima/common/ratio.py +56 -0
  36. divi/exp/scipy/pyprima/common/redrho.py +49 -0
  37. divi/exp/scipy/pyprima/common/selectx.py +346 -0
  38. divi/interfaces.py +25 -0
  39. divi/parallel_simulator.py +258 -0
  40. divi/qasm.py +220 -0
  41. divi/qem.py +191 -0
  42. divi/qlogger.py +119 -0
  43. divi/qoro_service.py +343 -0
  44. divi/qprog/__init__.py +13 -0
  45. divi/qprog/_graph_partitioning.py +619 -0
  46. divi/qprog/_mlae.py +182 -0
  47. divi/qprog/_qaoa.py +440 -0
  48. divi/qprog/_vqe.py +275 -0
  49. divi/qprog/_vqe_sweep.py +144 -0
  50. divi/qprog/batch.py +235 -0
  51. divi/qprog/optimizers.py +75 -0
  52. divi/qprog/quantum_program.py +493 -0
  53. divi/utils.py +116 -0
  54. qoro_divi-0.2.0b1.dist-info/LICENSE +190 -0
  55. qoro_divi-0.2.0b1.dist-info/LICENSES/Apache-2.0.txt +73 -0
  56. qoro_divi-0.2.0b1.dist-info/METADATA +57 -0
  57. qoro_divi-0.2.0b1.dist-info/RECORD +58 -0
  58. qoro_divi-0.2.0b1.dist-info/WHEEL +4 -0
@@ -0,0 +1,452 @@
1
+ """
2
+ This module provides some basic linear algebra procedures.
3
+
4
+ Translated from Zaikun Zhang's modern-Fortran reference implementation in PRIMA.
5
+
6
+ Dedicated to late Professor M. J. D. Powell FRS (1936--2015).
7
+
8
+ Python translation by Nickolai Belakovski.
9
+ """
10
+
11
+ import numpy as np
12
+
13
+ from .consts import DEBUGGING, EPS, REALMAX, REALMIN
14
+ from .present import present
15
+
16
+ # We use naive implementations of matrix multiplication and other routines for two
17
+ # reasons:
18
+ # 1. When Fortran is compiled in debug mode, and Python is using these routines, we
19
+ # can get bit for bit identical results as compared to Fortran. This is helpful
20
+ # for comparing the two implementations. It will be particularly helpful when porting
21
+ # the other implementations like LINCOA, etc.
22
+ # 2. On some problems this algorithm is very sensitive to errors in finite precision
23
+ # arithmetic. Switching to naive implementation will slow down the algorithm, but
24
+ # may be more stable.
25
+ USE_NAIVE_MATH = False
26
+
27
+
28
+ def inprod(x, y):
29
+ if not USE_NAIVE_MATH:
30
+ return np.dot(x, y)
31
+ result = 0
32
+ for i in range(len(x)):
33
+ result += x[i] * y[i]
34
+ return result
35
+
36
+
37
+ def matprod12(x, y):
38
+ result = np.zeros(y.shape[1])
39
+ for i in range(y.shape[1]):
40
+ result[i] = inprod(x, y[:, i])
41
+ return result
42
+
43
+
44
+ def matprod21(x, y):
45
+ result = np.zeros(x.shape[0])
46
+ for i in range(x.shape[1]):
47
+ result += x[:, i] * y[i]
48
+ return result
49
+
50
+
51
+ def matprod22(x, y):
52
+ result = np.zeros((x.shape[0], y.shape[1]))
53
+ for i in range(y.shape[1]):
54
+ for j in range(x.shape[1]):
55
+ result[:, j] += x[:, i] * y[i, j]
56
+ return result
57
+
58
+
59
+ def matprod(x, y):
60
+ if not USE_NAIVE_MATH:
61
+ return x @ y
62
+ if len(x.shape) == 1 and len(y.shape) == 1:
63
+ return inprod(x, y)
64
+ elif len(x.shape) == 1 and len(y.shape) == 2:
65
+ return matprod12(x, y)
66
+ elif len(x.shape) == 2 and len(y.shape) == 1:
67
+ return matprod21(x, y)
68
+ elif len(x.shape) == 2 and len(y.shape) == 2:
69
+ return matprod22(x, y)
70
+ else:
71
+ raise ValueError(f"Invalid shapes for x and y: {x.shape} and {y.shape}")
72
+
73
+
74
+ def outprod(x, y):
75
+ if not USE_NAIVE_MATH:
76
+ return np.outer(x, y)
77
+ result = np.zeros((len(x), len(y)))
78
+ for i in range(len(x)):
79
+ result[:, i] = x * y[i]
80
+ return result
81
+
82
+
83
+ def lsqr(A, b, Q, Rdiag):
84
+ if not USE_NAIVE_MATH:
85
+ return np.linalg.lstsq(A, b, rcond=None)[0]
86
+
87
+ m = A.shape[0]
88
+ n = A.shape[1]
89
+
90
+ rank = min(m, n)
91
+
92
+ x = np.zeros(n)
93
+ y = b.copy()
94
+
95
+ for i in range(rank - 1, -1, -1):
96
+ yq = inprod(y, Q[:, i])
97
+ yqa = inprod(np.abs(y), np.abs(Q[:, i]))
98
+ if isminor(yq, yqa):
99
+ x[i] = 0
100
+ else:
101
+ x[i] = yq / Rdiag[i]
102
+ y = y - x[i] * A[:, i]
103
+ return x
104
+
105
+
106
+ def hypot(x1, x2):
107
+ if not USE_NAIVE_MATH:
108
+ return np.hypot(x1, x2)
109
+ if not np.isfinite(x1):
110
+ r = abs(x1)
111
+ elif not np.isfinite(x2):
112
+ r = abs(x2)
113
+ else:
114
+ y = abs(np.array([x1, x2]))
115
+ y = np.array([min(y), max(y)])
116
+ if y[0] > np.sqrt(REALMIN) and y[1] < np.sqrt(REALMAX / 2.1):
117
+ r = np.sqrt(sum(y * y))
118
+ elif y[1] > 0:
119
+ r = y[1] * np.sqrt((y[0] / y[1]) * (y[0] / y[1]) + 1)
120
+ else:
121
+ r = 0
122
+ return r
123
+
124
+
125
+ def norm(x):
126
+ if not USE_NAIVE_MATH:
127
+ return np.linalg.norm(x)
128
+ # NOTE: Avoid np.pow! And exponentiation in general!
129
+ # It appears that in Fortran, x*x and x**2 are the same, but in Python they are not!
130
+ # Try it with x = 5 - 1e-15
131
+ result = np.sqrt(sum([xi * xi for xi in x]))
132
+ return result
133
+
134
+
135
+ def istril(A, tol=0):
136
+ return primasum(abs(A) - np.tril(abs(A))) <= tol
137
+
138
+
139
+ def istriu(A, tol=0):
140
+ return primasum(abs(A) - np.triu(abs(A))) <= tol
141
+
142
+
143
+ def inv(A):
144
+ if not USE_NAIVE_MATH:
145
+ return np.linalg.inv(A)
146
+ A = A.copy()
147
+ n = A.shape[0]
148
+ if istril(A):
149
+ # This case is invoked in COBYLA.
150
+ R = A.T
151
+ B = np.zeros((n, n))
152
+ for i in range(n):
153
+ B[i, i] = 1 / R[i, i]
154
+ B[:i, i] = -matprod(B[:i, :i], R[:i, i]) / R[i, i]
155
+ return B.T
156
+ elif istriu(A):
157
+ B = np.zeros((n, n))
158
+ for i in range(n):
159
+ B[i, i] = 1 / A[i, i]
160
+ B[:i, i] = -matprod(B[:i, :i], A[:i, i]) / A[i, i]
161
+ else:
162
+ # This is NOT the best algorithm for the inverse, but since the QR subroutine is available ...
163
+ Q, R, P = qr(A)
164
+ R = R.T
165
+ B = np.zeros((n, n))
166
+ for i in range(n - 1, -1, -1):
167
+ B[:, i] = (Q[:, i] - matprod(B[:, i + 1 : n], R[i + 1 : n, i])) / R[i, i]
168
+ InvP = np.zeros(n, dtype=int)
169
+ InvP[P] = np.linspace(0, n - 1, n)
170
+ B = B[:, InvP].T
171
+ return B
172
+
173
+
174
+ def qr(A):
175
+ m = A.shape[0]
176
+ n = A.shape[1]
177
+
178
+ Q = np.eye(m)
179
+ T = A.T
180
+ P = np.linspace(0, n - 1, n, dtype=int)
181
+
182
+ for j in range(n):
183
+ k = np.argmax(primasum(primapow2(T[j : n + 1, j : m + 1]), axis=1), axis=0)
184
+ if k > 0 and k <= n - j - 1:
185
+ k += j
186
+ P[j], P[k] = P[k], P[j]
187
+ T[[j, k], :] = T[[k, j], :]
188
+ for i in range(m - 1, j, -1):
189
+ G = planerot(T[j, [j, i]]).T
190
+ T[j, [j, i]] = np.append(hypot(T[j, j], T[j, i]), 0)
191
+ T[j + 1 : n + 1, [j, i]] = matprod(T[j + 1 : n + 1, [j, i]], G)
192
+ Q[:, [j, i]] = matprod(Q[:, [j, i]], G)
193
+
194
+ R = T.T
195
+
196
+ return Q, R, P
197
+
198
+
199
+ def primasum(x, axis=None):
200
+ """
201
+ According to its documentation, np.sum will sometimes do partial pairwise summation.
202
+ For our purposes, when comparing, we want don't want to do anything fancy, and we
203
+ just want to add things up one at a time.
204
+ """
205
+ if not USE_NAIVE_MATH:
206
+ return np.sum(x, axis=axis)
207
+ if axis is None:
208
+ if x.ndim == 2:
209
+ # Sum columns first, then sum the result
210
+ return sum(primasum(x, axis=0))
211
+ else:
212
+ return sum(x)
213
+ elif axis == 0:
214
+ result = np.zeros(x.shape[1])
215
+ for i in range(x.shape[1]):
216
+ result[i] = sum(x[:, i])
217
+ return result
218
+ elif axis == 1:
219
+ result = np.zeros(x.shape[0])
220
+ for i in range(x.shape[0]):
221
+ result[i] = sum(x[i, :])
222
+ return result
223
+
224
+
225
+ def primapow2(x):
226
+ """
227
+ Believe it or now, x**2 is not always the same as x*x in Python. In Fortran they
228
+ appear to be identical. Here's a quick one-line to find an example on your system
229
+ (well, two liner after importing numpy):
230
+ list(filter(lambda x: x[1], [(x:=np.random.random(), x**2 - x*x != 0) for _ in range(10000)]))
231
+ """
232
+ return x * x
233
+
234
+
235
+ def planerot(x):
236
+ """
237
+ As in MATLAB, planerot(x) returns a 2x2 Givens matrix G for x in R2 so that Y=G@x has Y[1] = 0.
238
+ Roughly speaking, G = np.array([[x[0]/R, x[1]/R], [-x[1]/R, x[0]/R]]), where R = np.linalg.norm(x).
239
+ 0. We need to take care of the possibilities of R=0, Inf, NaN, and over/underflow.
240
+ 1. The G defined above is continuous with respect to X except at 0. Following this definition,
241
+ G = np.array([[np.sign(x[0]), 0], [0, np.sign(x[0])]]) if x[1] == 0,
242
+ G = np.array([[0, np.sign(x[1])], [np.sign(x[1]), 0]]) if x[0] == 0
243
+ Yet some implementations ignore the signs, leading to discontinuity and numerical instability.
244
+ 2. Difference from MATLAB: if x contains NaN of consists of only Inf, MATLAB returns a NaN matrix,
245
+ but we return an identity matrix or a matrix of +/-np.sqrt(2). We intend to keep G always orthogonal.
246
+ """
247
+
248
+ # Preconditions
249
+ if DEBUGGING:
250
+ assert len(x) == 2, "x must be a 2-vector"
251
+
252
+ # ==================
253
+ # Calculation starts
254
+ # ==================
255
+
256
+ # Define C = X(1) / R and S = X(2) / R with R = HYPOT(X(1), X(2)). Handle Inf/NaN, over/underflow.
257
+ if any(np.isnan(x)):
258
+ # In this case, MATLAB sets G to NaN(2, 2). We refrain from doing so to keep G orthogonal.
259
+ c = 1
260
+ s = 0
261
+ elif all(np.isinf(x)):
262
+ # In this case, MATLAB sets G to NaN(2, 2). We refrain from doing so to keep G orthogonal.
263
+ c = 1 / np.sqrt(2) * np.sign(x[0])
264
+ s = 1 / np.sqrt(2) * np.sign(x[1])
265
+ elif abs(x[0]) <= 0 and abs(x[1]) <= 0: # X(1) == 0 == X(2).
266
+ c = 1
267
+ s = 0
268
+ elif abs(x[1]) <= EPS * abs(x[0]):
269
+ # N.B.:
270
+ # 0. With <= instead of <, this case covers X(1) == 0 == X(2), which is treated above separately
271
+ # to avoid the confusing SIGN(., 0) (see 1).
272
+ # 1. SIGN(A, 0) = ABS(A) in Fortran but sign(0) = 0 in MATLAB, Python, Julia, and R#
273
+ # 2. Taking SIGN(X(1)) into account ensures the continuity of G with respect to X except at 0.
274
+ c = np.sign(x[0])
275
+ s = 0
276
+ elif abs(x[0]) <= EPS * abs(x[1]):
277
+ # N.B.: SIGN(A, X) = ABS(A) * sign of X /= A * sign of X # Therefore, it is WRONG to define G
278
+ # as SIGN(RESHAPE([ZERO, -ONE, ONE, ZERO], [2, 2]), X(2)). This mistake was committed on
279
+ # 20211206 and took a whole day to debug! NEVER use SIGN on arrays unless you are really sure.
280
+ c = 0
281
+ s = np.sign(x[1])
282
+ else:
283
+ # Here is the normal case. It implements the Givens rotation in a stable & continuous way as in:
284
+ # Bindel, D., Demmel, J., Kahan, W., and Marques, O. (2002). On computing Givens rotations
285
+ # reliably and efficiently. ACM Transactions on Mathematical Software (TOMS), 28(2), 206-238.
286
+ # N.B.: 1. Modern compilers compute SQRT(REALMIN) and SQRT(REALMAX/2.1) at compilation time.
287
+ # 2. The direct calculation without involving T and U seems to work better; use it if possible.
288
+ if all(
289
+ np.logical_and(
290
+ np.sqrt(REALMIN) < np.abs(x), np.abs(x) < np.sqrt(REALMAX / 2.1)
291
+ )
292
+ ):
293
+ # Do NOT use HYPOTENUSE here; the best implementation for one may be suboptimal for the other
294
+ r = norm(x)
295
+ c = x[0] / r
296
+ s = x[1] / r
297
+ elif abs(x[0]) > abs(x[1]):
298
+ t = x[1] / x[0]
299
+ u = max(
300
+ 1, abs(t), np.sqrt(1 + t * t)
301
+ ) # MAXVAL: precaution against rounding error.
302
+ u *= np.sign(x[0]) ##MATLAB: u = sign(x(1))*sqrt(1 + t**2)
303
+ c = 1 / u
304
+ s = t / u
305
+ else:
306
+ t = x[0] / x[1]
307
+ u = max(
308
+ [1, abs(t), np.sqrt(1 + t * t)]
309
+ ) # MAXVAL: precaution against rounding error.
310
+ u *= np.sign(x[1]) ##MATLAB: u = sign(x(2))*sqrt(1 + t**2)
311
+ c = t / u
312
+ s = 1 / u
313
+
314
+ G = np.array([[c, s], [-s, c]]) # MATLAB: G = [c, s; -s, c]
315
+
316
+ # ====================#
317
+ # Calculation ends #
318
+ # ====================#
319
+
320
+ # Postconditions
321
+ if DEBUGGING:
322
+ assert G.shape == (2, 2)
323
+ assert np.all(np.isfinite(G))
324
+ assert abs(G[0, 0] - G[1, 1]) + abs(G[0, 1] + G[1, 0]) <= 0
325
+ tol = np.maximum(1.0e-10, np.minimum(1.0e-1, 1.0e6 * EPS))
326
+ assert isorth(G, tol)
327
+ if all(np.logical_and(np.isfinite(x), np.abs(x) < np.sqrt(REALMAX / 2.1))):
328
+ r = np.linalg.norm(x)
329
+ assert max(abs(G @ x - [r, 0])) <= max(tol, tol * r), "G @ X = [||X||, 0]"
330
+
331
+ return G
332
+
333
+
334
+ def isminor(x, ref):
335
+ """
336
+ This function tests whether x is minor compared to ref. It is used by Powell, e.g., in COBYLA.
337
+ In precise arithmetic, isminor(x, ref) is true if and only if x == 0; in floating point
338
+ arithmetic, isminor(x, ref) is true if x is 0 or its nonzero value can be attributed to
339
+ computer rounding errors according to ref.
340
+ Larger sensitivity means the function is more strict/precise, the value 0.1 being due to Powell.
341
+
342
+ For example:
343
+ isminor(1e-20, 1e300) -> True, because in floating point arithmetic 1e-20 cannot be added to
344
+ 1e300 without being rounded to 1e300.
345
+ isminor(1e300, 1e-20) -> False, because in floating point arithmetic adding 1e300 to 1e-20
346
+ dominates the latter number.
347
+ isminor(3, 4) -> False, because 3 can be added to 4 without being rounded off
348
+ """
349
+
350
+ sensitivity = 0.1
351
+ refa = abs(ref) + sensitivity * abs(x)
352
+ refb = abs(ref) + 2 * sensitivity * abs(x)
353
+ return np.logical_or(abs(ref) >= refa, refa >= refb)
354
+
355
+
356
+ def isinv(A, B, tol=None):
357
+ """
358
+ This procedure tests whether A = B^{-1} up to the tolerance TOL.
359
+ """
360
+
361
+ # Sizes
362
+ n = np.size(A, 0)
363
+
364
+ # Preconditions
365
+ if DEBUGGING:
366
+ assert np.size(A, 0) == np.size(A, 1)
367
+ assert np.size(B, 0) == np.size(B, 1)
368
+ assert np.size(A, 0) == np.size(B, 0)
369
+ if present(tol):
370
+ assert tol >= 0
371
+
372
+ # ====================#
373
+ # Calculation starts #
374
+ # ====================#
375
+
376
+ tol = (
377
+ tol
378
+ if present(tol)
379
+ else np.minimum(1e-3, 1e2 * EPS * np.maximum(np.size(A, 0), np.size(A, 1)))
380
+ )
381
+ tol = np.max([tol, tol * np.max(abs(A)), tol * np.max(abs(B))])
382
+ is_inv = ((abs(matprod(A, B)) - np.eye(n)) <= tol).all() or (
383
+ (abs(matprod(B, A) - np.eye(n))) <= tol
384
+ ).all()
385
+
386
+ # ===================#
387
+ # Calculation ends #
388
+ # ===================#
389
+ return is_inv
390
+
391
+
392
+ def isorth(A, tol=None):
393
+ """
394
+ This function tests whether the matrix A has orthonormal columns up to the tolerance TOL.
395
+ """
396
+
397
+ # Preconditions
398
+ if DEBUGGING:
399
+ if present(tol):
400
+ assert tol >= 0
401
+
402
+ # ====================#
403
+ # Calculation starts #
404
+ # ====================#
405
+
406
+ num_vars = np.size(A, 1)
407
+
408
+ if num_vars > np.size(A, 0):
409
+ is_orth = False
410
+ elif np.isnan(primasum(abs(A))):
411
+ is_orth = False
412
+ else:
413
+ if present(tol):
414
+ is_orth = (
415
+ abs(matprod(A.T, A) - np.eye(num_vars))
416
+ <= np.maximum(tol, tol * np.max(abs(A)))
417
+ ).all()
418
+ else:
419
+ is_orth = (abs(matprod(A.T, A) - np.eye(num_vars)) <= 0).all()
420
+
421
+ # ====================#
422
+ # Calculation ends #
423
+ # ====================#
424
+ return is_orth
425
+
426
+
427
+ def get_arrays_tol(*arrays):
428
+ """
429
+ Get a relative tolerance for a set of arrays. Borrowed from COBYQA
430
+
431
+ Parameters
432
+ ----------
433
+ *arrays: tuple
434
+ Set of `numpy.ndarray` to get the tolerance for.
435
+
436
+ Returns
437
+ -------
438
+ float
439
+ Relative tolerance for the set of arrays.
440
+
441
+ Raises
442
+ ------
443
+ ValueError
444
+ If no array is provided.
445
+ """
446
+ if len(arrays) == 0:
447
+ raise ValueError("At least one array must be provided.")
448
+ size = max(array.size for array in arrays)
449
+ weight = max(
450
+ np.max(np.abs(array[np.isfinite(array)]), initial=1.0) for array in arrays
451
+ )
452
+ return 10.0 * EPS * max(size, 1.0) * weight