Trajectree 0.0.1__py3-none-any.whl → 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. trajectree/__init__.py +0 -3
  2. trajectree/fock_optics/devices.py +1 -1
  3. trajectree/fock_optics/light_sources.py +2 -2
  4. trajectree/fock_optics/measurement.py +3 -3
  5. trajectree/fock_optics/utils.py +6 -6
  6. trajectree/trajectory.py +2 -2
  7. {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/METADATA +2 -3
  8. trajectree-0.0.2.dist-info/RECORD +16 -0
  9. trajectree/quimb/docs/_pygments/_pygments_dark.py +0 -118
  10. trajectree/quimb/docs/_pygments/_pygments_light.py +0 -118
  11. trajectree/quimb/docs/conf.py +0 -158
  12. trajectree/quimb/docs/examples/ex_mpi_expm_evo.py +0 -62
  13. trajectree/quimb/quimb/__init__.py +0 -507
  14. trajectree/quimb/quimb/calc.py +0 -1491
  15. trajectree/quimb/quimb/core.py +0 -2279
  16. trajectree/quimb/quimb/evo.py +0 -712
  17. trajectree/quimb/quimb/experimental/__init__.py +0 -0
  18. trajectree/quimb/quimb/experimental/autojittn.py +0 -129
  19. trajectree/quimb/quimb/experimental/belief_propagation/__init__.py +0 -109
  20. trajectree/quimb/quimb/experimental/belief_propagation/bp_common.py +0 -397
  21. trajectree/quimb/quimb/experimental/belief_propagation/d1bp.py +0 -316
  22. trajectree/quimb/quimb/experimental/belief_propagation/d2bp.py +0 -653
  23. trajectree/quimb/quimb/experimental/belief_propagation/hd1bp.py +0 -571
  24. trajectree/quimb/quimb/experimental/belief_propagation/hv1bp.py +0 -775
  25. trajectree/quimb/quimb/experimental/belief_propagation/l1bp.py +0 -316
  26. trajectree/quimb/quimb/experimental/belief_propagation/l2bp.py +0 -537
  27. trajectree/quimb/quimb/experimental/belief_propagation/regions.py +0 -194
  28. trajectree/quimb/quimb/experimental/cluster_update.py +0 -286
  29. trajectree/quimb/quimb/experimental/merabuilder.py +0 -865
  30. trajectree/quimb/quimb/experimental/operatorbuilder/__init__.py +0 -15
  31. trajectree/quimb/quimb/experimental/operatorbuilder/operatorbuilder.py +0 -1631
  32. trajectree/quimb/quimb/experimental/schematic.py +0 -7
  33. trajectree/quimb/quimb/experimental/tn_marginals.py +0 -130
  34. trajectree/quimb/quimb/experimental/tnvmc.py +0 -1483
  35. trajectree/quimb/quimb/gates.py +0 -36
  36. trajectree/quimb/quimb/gen/__init__.py +0 -2
  37. trajectree/quimb/quimb/gen/operators.py +0 -1167
  38. trajectree/quimb/quimb/gen/rand.py +0 -713
  39. trajectree/quimb/quimb/gen/states.py +0 -479
  40. trajectree/quimb/quimb/linalg/__init__.py +0 -6
  41. trajectree/quimb/quimb/linalg/approx_spectral.py +0 -1109
  42. trajectree/quimb/quimb/linalg/autoblock.py +0 -258
  43. trajectree/quimb/quimb/linalg/base_linalg.py +0 -719
  44. trajectree/quimb/quimb/linalg/mpi_launcher.py +0 -397
  45. trajectree/quimb/quimb/linalg/numpy_linalg.py +0 -244
  46. trajectree/quimb/quimb/linalg/rand_linalg.py +0 -514
  47. trajectree/quimb/quimb/linalg/scipy_linalg.py +0 -293
  48. trajectree/quimb/quimb/linalg/slepc_linalg.py +0 -892
  49. trajectree/quimb/quimb/schematic.py +0 -1518
  50. trajectree/quimb/quimb/tensor/__init__.py +0 -401
  51. trajectree/quimb/quimb/tensor/array_ops.py +0 -610
  52. trajectree/quimb/quimb/tensor/circuit.py +0 -4824
  53. trajectree/quimb/quimb/tensor/circuit_gen.py +0 -411
  54. trajectree/quimb/quimb/tensor/contraction.py +0 -336
  55. trajectree/quimb/quimb/tensor/decomp.py +0 -1255
  56. trajectree/quimb/quimb/tensor/drawing.py +0 -1646
  57. trajectree/quimb/quimb/tensor/fitting.py +0 -385
  58. trajectree/quimb/quimb/tensor/geometry.py +0 -583
  59. trajectree/quimb/quimb/tensor/interface.py +0 -114
  60. trajectree/quimb/quimb/tensor/networking.py +0 -1058
  61. trajectree/quimb/quimb/tensor/optimize.py +0 -1818
  62. trajectree/quimb/quimb/tensor/tensor_1d.py +0 -4778
  63. trajectree/quimb/quimb/tensor/tensor_1d_compress.py +0 -1854
  64. trajectree/quimb/quimb/tensor/tensor_1d_tebd.py +0 -662
  65. trajectree/quimb/quimb/tensor/tensor_2d.py +0 -5954
  66. trajectree/quimb/quimb/tensor/tensor_2d_compress.py +0 -96
  67. trajectree/quimb/quimb/tensor/tensor_2d_tebd.py +0 -1230
  68. trajectree/quimb/quimb/tensor/tensor_3d.py +0 -2869
  69. trajectree/quimb/quimb/tensor/tensor_3d_tebd.py +0 -46
  70. trajectree/quimb/quimb/tensor/tensor_approx_spectral.py +0 -60
  71. trajectree/quimb/quimb/tensor/tensor_arbgeom.py +0 -3237
  72. trajectree/quimb/quimb/tensor/tensor_arbgeom_compress.py +0 -565
  73. trajectree/quimb/quimb/tensor/tensor_arbgeom_tebd.py +0 -1138
  74. trajectree/quimb/quimb/tensor/tensor_builder.py +0 -5411
  75. trajectree/quimb/quimb/tensor/tensor_core.py +0 -11179
  76. trajectree/quimb/quimb/tensor/tensor_dmrg.py +0 -1472
  77. trajectree/quimb/quimb/tensor/tensor_mera.py +0 -204
  78. trajectree/quimb/quimb/utils.py +0 -892
  79. trajectree/quimb/tests/__init__.py +0 -0
  80. trajectree/quimb/tests/test_accel.py +0 -501
  81. trajectree/quimb/tests/test_calc.py +0 -788
  82. trajectree/quimb/tests/test_core.py +0 -847
  83. trajectree/quimb/tests/test_evo.py +0 -565
  84. trajectree/quimb/tests/test_gen/__init__.py +0 -0
  85. trajectree/quimb/tests/test_gen/test_operators.py +0 -361
  86. trajectree/quimb/tests/test_gen/test_rand.py +0 -296
  87. trajectree/quimb/tests/test_gen/test_states.py +0 -261
  88. trajectree/quimb/tests/test_linalg/__init__.py +0 -0
  89. trajectree/quimb/tests/test_linalg/test_approx_spectral.py +0 -368
  90. trajectree/quimb/tests/test_linalg/test_base_linalg.py +0 -351
  91. trajectree/quimb/tests/test_linalg/test_mpi_linalg.py +0 -127
  92. trajectree/quimb/tests/test_linalg/test_numpy_linalg.py +0 -84
  93. trajectree/quimb/tests/test_linalg/test_rand_linalg.py +0 -134
  94. trajectree/quimb/tests/test_linalg/test_slepc_linalg.py +0 -283
  95. trajectree/quimb/tests/test_tensor/__init__.py +0 -0
  96. trajectree/quimb/tests/test_tensor/test_belief_propagation/__init__.py +0 -0
  97. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d1bp.py +0 -39
  98. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d2bp.py +0 -67
  99. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hd1bp.py +0 -64
  100. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hv1bp.py +0 -51
  101. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l1bp.py +0 -142
  102. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l2bp.py +0 -101
  103. trajectree/quimb/tests/test_tensor/test_circuit.py +0 -816
  104. trajectree/quimb/tests/test_tensor/test_contract.py +0 -67
  105. trajectree/quimb/tests/test_tensor/test_decomp.py +0 -40
  106. trajectree/quimb/tests/test_tensor/test_mera.py +0 -52
  107. trajectree/quimb/tests/test_tensor/test_optimizers.py +0 -488
  108. trajectree/quimb/tests/test_tensor/test_tensor_1d.py +0 -1171
  109. trajectree/quimb/tests/test_tensor/test_tensor_2d.py +0 -606
  110. trajectree/quimb/tests/test_tensor/test_tensor_2d_tebd.py +0 -144
  111. trajectree/quimb/tests/test_tensor/test_tensor_3d.py +0 -123
  112. trajectree/quimb/tests/test_tensor/test_tensor_arbgeom.py +0 -226
  113. trajectree/quimb/tests/test_tensor/test_tensor_builder.py +0 -441
  114. trajectree/quimb/tests/test_tensor/test_tensor_core.py +0 -2066
  115. trajectree/quimb/tests/test_tensor/test_tensor_dmrg.py +0 -388
  116. trajectree/quimb/tests/test_tensor/test_tensor_spectral_approx.py +0 -63
  117. trajectree/quimb/tests/test_tensor/test_tensor_tebd.py +0 -270
  118. trajectree/quimb/tests/test_utils.py +0 -85
  119. trajectree-0.0.1.dist-info/RECORD +0 -126
  120. {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/WHEEL +0 -0
  121. {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/licenses/LICENSE +0 -0
  122. {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/top_level.txt +0 -0
@@ -1,1255 +0,0 @@
1
- """Functions for decomposing and projecting matrices.
2
- """
3
-
4
- import functools
5
- import operator
6
- import warnings
7
-
8
- import numpy as np
9
- import scipy.linalg as scla
10
- import scipy.linalg.interpolative as sli
11
- import scipy.sparse.linalg as spla
12
- from autoray import (
13
- astype,
14
- backend_like,
15
- compose,
16
- dag,
17
- do,
18
- get_dtype_name,
19
- get_lib_fn,
20
- infer_backend,
21
- lazy,
22
- reshape,
23
- )
24
-
25
- from ..core import njit
26
- from ..linalg import base_linalg, rand_linalg
27
-
28
- _CUTOFF_MODE_MAP = {
29
- "abs": 1,
30
- "rel": 2,
31
- "sum2": 3,
32
- "rsum2": 4,
33
- "sum1": 5,
34
- "rsum1": 6,
35
- }
36
-
37
-
38
- def map_cutoff_mode(cutoff_mode):
39
- """Map mode to an integer for compatibility with numba."""
40
- return _CUTOFF_MODE_MAP.get(cutoff_mode, cutoff_mode)
41
-
42
-
43
- # some convenience functions for multiplying diagonals
44
-
45
-
46
- def rdmul(x, d):
47
- """Right-multiplication a matrix by a vector representing a diagonal."""
48
- return x * reshape(d, (1, -1))
49
-
50
-
51
- def rddiv(x, d):
52
- """Right-multiplication of a matrix by a vector representing an inverse
53
- diagonal.
54
- """
55
- return x / reshape(d, (1, -1))
56
-
57
-
58
- def ldmul(d, x):
59
- """Left-multiplication a matrix by a vector representing a diagonal."""
60
- return x * reshape(d, (-1, 1))
61
-
62
-
63
- def lddiv(d, x):
64
- """Left-multiplication of a matrix by a vector representing an inverse
65
- diagonal.
66
- """
67
- return x / reshape(d, (-1, 1))
68
-
69
-
70
- @njit # pragma: no cover
71
- def dag_numba(x):
72
- return np.conjugate(x.T)
73
-
74
-
75
- @njit # pragma: no cover
76
- def rdmul_numba(x, d):
77
- return x * d.reshape(1, -1)
78
-
79
-
80
- @njit # pragma: no cover
81
- def rddiv_numba(x, d):
82
- return x / d.reshape(1, -1)
83
-
84
-
85
- @njit # pragma: no cover
86
- def ldmul_numba(d, x):
87
- return x * d.reshape(-1, 1)
88
-
89
-
90
- @njit # pragma: no cover
91
- def lddiv_numba(d, x):
92
- return x / d.reshape(-1, 1)
93
-
94
-
95
- @compose
96
- def sgn(x):
97
- """Get the 'sign' of ``x``, such that ``x / sgn(x)`` is real and
98
- non-negative.
99
- """
100
- x0 = x == 0.0
101
- return (x + x0) / (do("abs", x) + x0)
102
-
103
-
104
- @sgn.register("numpy")
105
- @njit # pragma: no cover
106
- def sgn_numba(x):
107
- x0 = x == 0.0
108
- return (x + x0) / (np.abs(x) + x0)
109
-
110
-
111
- @sgn.register("tensorflow")
112
- def sgn_tf(x):
113
- with backend_like(x):
114
- x0 = do("cast", do("equal", x, 0.0), x.dtype)
115
- xa = do("cast", do("abs", x), x.dtype)
116
- return (x + x0) / (xa + x0)
117
-
118
-
119
- def _trim_and_renorm_svd_result(
120
- U, s, VH, cutoff, cutoff_mode, max_bond, absorb, renorm
121
- ):
122
- """Give full SVD decomposion result ``U``, ``s``, ``VH``, optionally trim,
123
- renormalize, and absorb the singular values. See ``svd_truncated`` for
124
- details.
125
- """
126
- if (cutoff > 0.0) or (renorm > 0):
127
- if cutoff_mode == 1: # 'abs'
128
- n_chi = do("count_nonzero", s > cutoff)
129
-
130
- elif cutoff_mode == 2: # 'rel'
131
- n_chi = do("count_nonzero", s > cutoff * s[0])
132
-
133
- elif cutoff_mode in (3, 4, 5, 6):
134
- if cutoff_mode in (3, 4):
135
- pow = 2
136
- else:
137
- pow = 1
138
-
139
- sp = s**pow
140
- csp = do("cumsum", sp, 0)
141
- tot = csp[-1]
142
-
143
- if cutoff_mode in (4, 6):
144
- n_chi = do("count_nonzero", csp < (1 - cutoff) * tot) + 1
145
- else:
146
- n_chi = do("count_nonzero", (tot - csp) > cutoff) + 1
147
-
148
- n_chi = max(n_chi, 1)
149
- if max_bond > 0:
150
- n_chi = min(n_chi, max_bond)
151
-
152
- elif max_bond > 0:
153
- # only maximum bond specified
154
- n_chi = max_bond
155
- else:
156
- # neither maximum bond dimension nor cutoff specified
157
- n_chi = s.shape[0]
158
-
159
- if n_chi < s.shape[0]:
160
- s = s[:n_chi]
161
- U = U[:, :n_chi]
162
- VH = VH[:n_chi, :]
163
-
164
- if renorm > 0:
165
- norm = (tot / csp[n_chi - 1]) ** (1 / pow)
166
- s *= norm
167
-
168
- # XXX: tensorflow can't multiply mixed dtypes
169
- if infer_backend(s) == "tensorflow":
170
- dtype = get_dtype_name(U)
171
- if "complex" in dtype:
172
- s = astype(s, dtype)
173
-
174
- if absorb is None:
175
- return U, s, VH
176
- if absorb == -1:
177
- U = rdmul(U, s)
178
- elif absorb == 1:
179
- VH = ldmul(s, VH)
180
- else:
181
- s = do("sqrt", s)
182
- U = rdmul(U, s)
183
- VH = ldmul(s, VH)
184
-
185
- return U, None, VH
186
-
187
-
188
- @compose
189
- def svd_truncated(
190
- x,
191
- cutoff=-1.0,
192
- cutoff_mode=4,
193
- max_bond=-1,
194
- absorb=0,
195
- renorm=0,
196
- backend=None,
197
- ):
198
- """Truncated svd or raw array ``x``.
199
-
200
- Parameters
201
- ----------
202
- cutoff : float, optional
203
- Singular value cutoff threshold, if ``cutoff <= 0.0``, then only
204
- ``max_bond`` is used.
205
- cutoff_mode : {1, 2, 3, 4, 5, 6}, optional
206
- How to perform the trim:
207
-
208
- - 1: ['abs'], trim values below ``cutoff``
209
- - 2: ['rel'], trim values below ``s[0] * cutoff``
210
- - 3: ['sum2'], trim s.t. ``sum(s_trim**2) < cutoff``.
211
- - 4: ['rsum2'], trim s.t. ``sum(s_trim**2) < sum(s**2) * cutoff``.
212
- - 5: ['sum1'], trim s.t. ``sum(s_trim**1) < cutoff``.
213
- - 6: ['rsum1'], trim s.t. ``sum(s_trim**1) < sum(s**1) * cutoff``.
214
-
215
- max_bond : int, optional
216
- An explicit maximum bond dimension, use -1 for none.
217
- absorb : {-1, 0, 1, None}, optional
218
- How to absorb the singular values. -1: left, 0: both, 1: right and
219
- None: don't absorb (return).
220
- renorm : {0, 1}, optional
221
- Whether to renormalize the singular values (depends on `cutoff_mode`).
222
- """
223
- with backend_like(backend):
224
- U, s, VH = do("linalg.svd", x)
225
- return _trim_and_renorm_svd_result(
226
- U, s, VH, cutoff, cutoff_mode, max_bond, absorb, renorm
227
- )
228
-
229
-
230
- @njit(["i4(f4[:], f4, i4)", "i4(f8[:], f8, i4)"]) # pragma: no cover
231
- def _compute_number_svals_to_keep_numba(s, cutoff, cutoff_mode):
232
- """Find the number of singular values to keep of ``s`` given ``cutoff`` and
233
- ``cutoff_mode``.
234
- """
235
- if cutoff_mode == 1: # 'abs'
236
- n_chi = np.sum(s > cutoff)
237
-
238
- elif cutoff_mode == 2: # 'rel'
239
- n_chi = np.sum(s > cutoff * s[0])
240
-
241
- elif cutoff_mode in (3, 4, 5, 6):
242
- if cutoff_mode in (3, 4):
243
- pow = 2
244
- else:
245
- pow = 1
246
-
247
- target = cutoff
248
- if cutoff_mode in (4, 6):
249
- target *= np.sum(s**pow)
250
-
251
- n_chi = s.size
252
- ssum = 0.0
253
- for i in range(s.size - 1, -1, -1):
254
- s2 = s[i] ** pow
255
- if not np.isnan(s2):
256
- ssum += s2
257
- if ssum > target:
258
- break
259
- n_chi -= 1
260
-
261
- return max(n_chi, 1)
262
-
263
-
264
- @njit(["f4(f4[:], i4, f4)", "f8(f8[:], i4, f8)"]) # pragma: no cover
265
- def _compute_svals_renorm_factor_numba(s, n_chi, renorm):
266
- """Find the normalization constant for ``s`` such that the new sum squared
267
- of the ``n_chi`` largest values equals the sum squared of all the old ones.
268
- """
269
- s_tot_keep = 0.0
270
- s_tot_lose = 0.0
271
- for i in range(s.size):
272
- s2 = s[i] ** renorm
273
- if not np.isnan(s2):
274
- if i < n_chi:
275
- s_tot_keep += s2
276
- else:
277
- s_tot_lose += s2
278
- return ((s_tot_keep + s_tot_lose) / s_tot_keep) ** (1 / renorm)
279
-
280
-
281
- @njit # pragma: no cover
282
- def _trim_and_renorm_svd_result_numba(
283
- U, s, VH, cutoff, cutoff_mode, max_bond, absorb, renorm
284
- ):
285
- """Accelerate version of ``_trim_and_renorm_svd_result``."""
286
- if (cutoff > 0.0) or (renorm > 0):
287
- n_chi = _compute_number_svals_to_keep_numba(s, cutoff, cutoff_mode)
288
-
289
- if max_bond > 0:
290
- n_chi = min(n_chi, max_bond)
291
-
292
- if n_chi < s.size:
293
- if renorm > 0:
294
- s = s[:n_chi] * _compute_svals_renorm_factor_numba(
295
- s, n_chi, renorm
296
- )
297
- else:
298
- s = s[:n_chi]
299
-
300
- U = U[:, :n_chi]
301
- VH = VH[:n_chi, :]
302
-
303
- elif (max_bond != -1) and (max_bond < s.shape[0]):
304
- U = U[:, :max_bond]
305
- s = s[:max_bond]
306
- VH = VH[:max_bond, :]
307
-
308
- s = np.ascontiguousarray(s)
309
-
310
- if absorb is None:
311
- return U, s, VH
312
- elif absorb == -1:
313
- U = rdmul_numba(U, s)
314
- elif absorb == 1:
315
- VH = ldmul_numba(s, VH)
316
- else:
317
- s **= 0.5
318
- U = rdmul_numba(U, s)
319
- VH = ldmul_numba(s, VH)
320
-
321
- return U, None, VH
322
-
323
-
324
- @njit # pragma: no cover
325
- def svd_truncated_numba(
326
- x, cutoff=-1.0, cutoff_mode=4, max_bond=-1, absorb=0, renorm=0
327
- ):
328
- """Accelerated version of ``svd_truncated`` for numpy arrays."""
329
- U, s, VH = np.linalg.svd(x, full_matrices=False)
330
- return _trim_and_renorm_svd_result_numba(
331
- U, s, VH, cutoff, cutoff_mode, max_bond, absorb, renorm
332
- )
333
-
334
-
335
- @svd_truncated.register("numpy")
336
- def svd_truncated_numpy(
337
- x, cutoff=-1.0, cutoff_mode=4, max_bond=-1, absorb=0, renorm=0
338
- ):
339
- """Numpy version of ``svd_truncated``, trying the accelerated version
340
- first, then falling back to the more stable scipy version.
341
- """
342
- try:
343
- return svd_truncated_numba(
344
- x, cutoff, cutoff_mode, max_bond, absorb, renorm
345
- )
346
- except ValueError as e: # pragma: no cover
347
- warnings.warn(f"Got: {e}, falling back to scipy gesvd driver.")
348
- U, s, VH = scla.svd(x, full_matrices=False, lapack_driver="gesvd")
349
- return _trim_and_renorm_svd_result_numba(
350
- U, s, VH, cutoff, cutoff_mode, max_bond, absorb, renorm
351
- )
352
-
353
-
354
- @svd_truncated.register("autoray.lazy")
355
- @lazy.core.lazy_cache("svd_truncated")
356
- def svd_truncated_lazy(
357
- x,
358
- cutoff=-1.0,
359
- cutoff_mode=4,
360
- max_bond=-1,
361
- absorb=0,
362
- renorm=0,
363
- ):
364
- if cutoff != 0.0:
365
- raise ValueError("Can't handle dynamic cutoffs in lazy mode.")
366
-
367
- m, n = x.shape
368
- k = min(m, n)
369
- if max_bond > 0:
370
- k = min(k, max_bond)
371
-
372
- lsvdt = x.to(
373
- fn=get_lib_fn(x.backend, "svd_truncated"),
374
- args=(x, cutoff, cutoff_mode, max_bond, absorb, renorm),
375
- shape=(3,),
376
- )
377
-
378
- U = lsvdt.to(operator.getitem, (lsvdt, 0), shape=(m, k))
379
- if absorb is None:
380
- s = lsvdt.to(operator.getitem, (lsvdt, 1), shape=(k,))
381
- else:
382
- s = None
383
- VH = lsvdt.to(operator.getitem, (lsvdt, 2), shape=(k, n))
384
-
385
- return U, s, VH
386
-
387
-
388
- @compose
389
- def lu_truncated(
390
- x,
391
- cutoff=-1.0,
392
- cutoff_mode=4,
393
- max_bond=-1,
394
- absorb=0,
395
- renorm=0,
396
- backend=None,
397
- ):
398
- if absorb != 0:
399
- raise NotImplementedError(
400
- f"Can't handle absorb{absorb} in lu_truncated."
401
- )
402
- elif renorm != 0:
403
- raise NotImplementedError(
404
- f"Can't handle renorm={renorm} in lu_truncated."
405
- )
406
- elif max_bond != -1:
407
- # use argsort(sl * su) to handle this?
408
- raise NotImplementedError(
409
- f"Can't handle max_bond={max_bond} in lu_truncated."
410
- )
411
-
412
- with backend_like(backend):
413
- PL, U = do("scipy.linalg.lu", x, permute_l=True)
414
-
415
- sl = do("sum", do("abs", PL), axis=0)
416
- su = do("sum", do("abs", U), axis=1)
417
-
418
- if cutoff_mode == 2:
419
- abs_cutoff_l = cutoff * do("max", sl)
420
- abs_cutoff_u = cutoff * do("max", su)
421
- elif cutoff_mode == 1:
422
- abs_cutoff_l = abs_cutoff_u = cutoff
423
- else:
424
- raise NotImplementedError(
425
- f"Can't handle cutoff_mode={cutoff_mode} in lu_truncated."
426
- )
427
-
428
- idx = (sl > abs_cutoff_l) & (su > abs_cutoff_u)
429
-
430
- PL = PL[:, idx]
431
- U = U[idx, :]
432
-
433
- return PL, None, U
434
-
435
-
436
- def svdvals(x):
437
- """SVD-decomposition, but return singular values only."""
438
- return np.linalg.svd(x, full_matrices=False, compute_uv=False)
439
-
440
-
441
- @njit # pragma: no cover
442
- def _svd_via_eig_truncated_numba(
443
- x, cutoff=-1.0, cutoff_mode=4, max_bond=-1, absorb=0, renorm=0
444
- ):
445
- """SVD-split via eigen-decomposition."""
446
- if x.shape[0] > x.shape[1]:
447
- # Get sU, V
448
- s2, V = np.linalg.eigh(dag_numba(x) @ x)
449
- U = x @ V
450
- VH = dag_numba(V)
451
- # small negative eigenvalues turn into nan when sqrtd
452
- s2[s2 < 0.0] = 0.0
453
- s = np.sqrt(s2)
454
- U = rddiv_numba(U, s)
455
- else:
456
- # Get U, sV
457
- s2, U = np.linalg.eigh(x @ dag_numba(x))
458
- VH = dag_numba(U) @ x
459
- s2[s2 < 0.0] = 0.0
460
- s = np.sqrt(s2)
461
- VH = lddiv_numba(s, VH)
462
-
463
- # we need singular values and vectors in descending order
464
- U, s, VH = U[:, ::-1], s[::-1], VH[::-1, :]
465
-
466
- return _trim_and_renorm_svd_result_numba(
467
- U, s, VH, cutoff, cutoff_mode, max_bond, absorb, renorm
468
- )
469
-
470
-
471
- def svd_via_eig_truncated(
472
- x, cutoff=-1.0, cutoff_mode=4, max_bond=-1, absorb=0, renorm=0
473
- ):
474
- if isinstance(x, np.ndarray):
475
- return _svd_via_eig_truncated_numba(
476
- x, cutoff, cutoff_mode, max_bond, absorb, renorm
477
- )
478
-
479
- if x.shape[0] > x.shape[1]:
480
- # Get sU, V
481
- s2, V = do("linalg.eigh", dag(x) @ x)
482
- U = x @ V
483
- VH = dag(V)
484
- # small negative eigenvalues turn into nan when sqrtd
485
- s2 = do("clip", s2, 0.0, None)
486
- s = do("sqrt", s2)
487
- U = rddiv(U, s)
488
- else:
489
- # Get U, sV
490
- s2, U = do("linalg.eigh", x @ dag(x))
491
- VH = dag(U) @ x
492
- s2 = do("clip", s2, 0.0, None)
493
- s = do("sqrt", s2)
494
- VH = lddiv(s, VH)
495
-
496
- # we need singular values and vectors in descending order
497
- U, s, VH = do("flip", U, (1,)), do("flip", s, (0,)), do("flip", VH, (0,))
498
-
499
- return _trim_and_renorm_svd_result(
500
- U, s, VH, cutoff, cutoff_mode, max_bond, absorb, renorm
501
- )
502
-
503
-
504
- @njit # pragma: no cover
505
- def svdvals_eig(x): # pragma: no cover
506
- """SVD-decomposition via eigen, but return singular values only."""
507
- if x.shape[0] > x.shape[1]:
508
- s2 = np.linalg.eigvalsh(dag_numba(x) @ x)
509
- else:
510
- s2 = np.linalg.eigvalsh(x @ dag_numba(x))
511
-
512
- s2[s2 < 0.0] = 0.0
513
- return s2[::-1] ** 0.5
514
-
515
-
516
- @compose
517
- def eigh_truncated(
518
- x,
519
- cutoff=-1.0,
520
- cutoff_mode=4,
521
- max_bond=-1,
522
- absorb=0,
523
- renorm=0,
524
- backend=None,
525
- ):
526
- with backend_like(backend):
527
- s, U = do("linalg.eigh", x)
528
-
529
- # make sure largest singular value first
530
- k = do("argsort", -do("abs", s))
531
- s, U = s[k], U[:, k]
532
-
533
- # absorb phase into V
534
- V = ldmul(sgn(s), dag(U))
535
- s = do("abs", s)
536
- return _trim_and_renorm_svd_result(
537
- U, s, V, cutoff, cutoff_mode, max_bond, absorb, renorm
538
- )
539
-
540
-
541
- @eigh_truncated.register("numpy")
542
- @njit # pragma: no cover
543
- def eigh_truncated_numba(
544
- x, cutoff=-1.0, cutoff_mode=4, max_bond=-1, absorb=0, renorm=0
545
- ):
546
- """SVD-decomposition, using hermitian eigen-decomposition, only works if
547
- ``x`` is hermitian.
548
- """
549
- s, U = np.linalg.eigh(x)
550
-
551
- # make sure largest singular value first
552
- k = np.argsort(-np.abs(s))
553
- s, U = s[k], U[:, k]
554
-
555
- # absorb phase into V
556
- V = ldmul_numba(sgn_numba(s), dag_numba(U))
557
- s = np.abs(s)
558
- return _trim_and_renorm_svd_result_numba(
559
- U, s, V, cutoff, cutoff_mode, max_bond, absorb, renorm
560
- )
561
-
562
-
563
- def _choose_k(x, cutoff, max_bond):
564
- """Choose the number of singular values to target."""
565
- d = min(x.shape)
566
-
567
- if cutoff != 0.0:
568
- k = rand_linalg.estimate_rank(
569
- x, cutoff, k_max=None if max_bond < 0 else max_bond
570
- )
571
- else:
572
- k = min(d, max_bond)
573
-
574
- # if computing more than half of spectrum then just use dense method
575
- return "full" if k > d // 2 else k
576
-
577
-
578
- def svds(x, cutoff=0.0, cutoff_mode=4, max_bond=-1, absorb=0, renorm=0):
579
- """SVD-decomposition using iterative methods. Allows the
580
- computation of only a certain number of singular values, e.g. max_bond,
581
- from the get-go, and is thus more efficient. Can also supply
582
- ``scipy.sparse.linalg.LinearOperator``.
583
- """
584
- k = _choose_k(x, cutoff, max_bond)
585
-
586
- if k == "full":
587
- if not isinstance(x, np.ndarray):
588
- x = x.to_dense()
589
- return svd_truncated(x, cutoff, cutoff_mode, max_bond, absorb)
590
-
591
- U, s, VH = base_linalg.svds(x, k=k)
592
- return _trim_and_renorm_svd_result_numba(
593
- U, s, VH, cutoff, cutoff_mode, max_bond, absorb, renorm
594
- )
595
-
596
-
597
- def isvd(x, cutoff=0.0, cutoff_mode=4, max_bond=-1, absorb=0, renorm=0):
598
- """SVD-decomposition using interpolative matrix random methods. Allows the
599
- computation of only a certain number of singular values, e.g. max_bond,
600
- from the get-go, and is thus more efficient. Can also supply
601
- ``scipy.sparse.linalg.LinearOperator``.
602
- """
603
- k = _choose_k(x, cutoff, max_bond)
604
-
605
- if k == "full":
606
- if not isinstance(x, np.ndarray):
607
- x = x.to_dense()
608
- return svd_truncated(x, cutoff, cutoff_mode, max_bond, absorb)
609
-
610
- U, s, V = sli.svd(x, k)
611
- VH = dag_numba(V)
612
- return _trim_and_renorm_svd_result_numba(
613
- U, s, VH, cutoff, cutoff_mode, max_bond, absorb, renorm
614
- )
615
-
616
-
617
- def _rsvd_numpy(x, cutoff=0.0, cutoff_mode=4, max_bond=-1, absorb=0, renorm=0):
618
- if max_bond > 0:
619
- if cutoff > 0.0:
620
- # adapt and block
621
- U, s, VH = rand_linalg.rsvd(x, cutoff, k_max=max_bond)
622
- else:
623
- U, s, VH = rand_linalg.rsvd(x, max_bond)
624
- else:
625
- U, s, VH = rand_linalg.rsvd(x, cutoff)
626
-
627
- return _trim_and_renorm_svd_result_numba(
628
- U, s, VH, cutoff, cutoff_mode, max_bond, absorb, renorm
629
- )
630
-
631
-
632
- def rsvd(x, cutoff=0.0, cutoff_mode=4, max_bond=-1, absorb=0, renorm=0):
633
- """SVD-decomposition using randomized methods (due to Halko). Allows the
634
- computation of only a certain number of singular values, e.g. max_bond,
635
- from the get-go, and is thus more efficient. Can also supply
636
- ``scipy.sparse.linalg.LinearOperator``.
637
- """
638
- if isinstance(x, (np.ndarray, spla.LinearOperator)):
639
- return _rsvd_numpy(x, cutoff, cutoff_mode, max_bond, absorb, renorm)
640
-
641
- U, s, VH = do("linalg.rsvd", x, max_bond)
642
- return _trim_and_renorm_svd_result(
643
- U, s, VH, cutoff, cutoff_mode, max_bond, absorb, renorm
644
- )
645
-
646
-
647
- def eigsh(x, cutoff=0.0, cutoff_mode=4, max_bond=-1, absorb=0, renorm=0):
648
- """SVD-decomposition using iterative hermitian eigen decomp, thus assuming
649
- that ``x`` is hermitian. Allows the computation of only a certain number of
650
- singular values, e.g. max_bond, from the get-go, and is thus more
651
- efficient. Can also supply ``scipy.sparse.linalg.LinearOperator``.
652
- """
653
- k = _choose_k(x, cutoff, max_bond)
654
-
655
- if k == "full":
656
- if not isinstance(x, np.ndarray):
657
- x = x.to_dense()
658
- return eigh_truncated(x, cutoff, cutoff_mode, max_bond, absorb)
659
-
660
- s, U = base_linalg.eigh(x, k=k)
661
- s, U = s[::-1], U[:, ::-1] # make sure largest singular value first
662
- V = ldmul_numba(sgn(s), dag_numba(U))
663
- s = np.abs(s)
664
- return _trim_and_renorm_svd_result_numba(
665
- U, s, V, cutoff, cutoff_mode, max_bond, absorb, renorm
666
- )
667
-
668
-
669
- @compose
670
- def qr_stabilized(x, backend=None):
671
- """QR-decomposition, with stabilized R factor."""
672
- with backend_like(backend):
673
- Q, R = do("linalg.qr", x)
674
- # stabilize the diagonal of R
675
- rd = do("diag", R)
676
- s = sgn(rd)
677
- Q = rdmul(Q, do("conj", s))
678
- R = ldmul(s, R)
679
- return Q, None, R
680
-
681
-
682
- @qr_stabilized.register("numpy")
683
- @njit # pragma: no cover
684
- def qr_stabilized_numba(x):
685
- Q, R = np.linalg.qr(x)
686
- for i in range(R.shape[0]):
687
- rii = R[i, i]
688
- si = sgn_numba(rii)
689
- if si != 1.0:
690
- Q[:, i] *= np.conj(si)
691
- R[i, i:] *= si
692
- return Q, None, R
693
-
694
-
695
- @qr_stabilized.register("autoray.lazy")
696
- @lazy.core.lazy_cache("qr_stabilized")
697
- def qr_stabilized_lazy(x):
698
- m, n = x.shape
699
- k = min(m, n)
700
- lqrs = x.to(
701
- fn=get_lib_fn(x.backend, "qr_stabilized"),
702
- args=(x,),
703
- shape=(3,),
704
- )
705
- Q = lqrs.to(operator.getitem, (lqrs, 0), shape=(m, k))
706
- R = lqrs.to(operator.getitem, (lqrs, 2), shape=(k, n))
707
- return Q, None, R
708
-
709
-
710
- @compose
711
- def lq_stabilized(x, backend=None):
712
- with backend_like(backend):
713
- Q, _, L = qr_stabilized(do("transpose", x))
714
- return do("transpose", L), None, do("transpose", Q)
715
-
716
-
717
- @lq_stabilized.register("numpy")
718
- @njit # pragma: no cover
719
- def lq_stabilized_numba(x):
720
- Q, _, L = qr_stabilized_numba(x.T)
721
- return L.T, None, Q.T
722
-
723
-
724
- @njit # pragma: no cover
725
- def _cholesky_numba(x, cutoff=-1, cutoff_mode=4, max_bond=-1, absorb=0):
726
- """SVD-decomposition, using cholesky decomposition, only works if
727
- ``x`` is positive definite.
728
- """
729
- L = np.linalg.cholesky(x)
730
- return L, None, dag_numba(L)
731
-
732
-
733
- def cholesky(x, cutoff=-1, cutoff_mode=4, max_bond=-1, absorb=0):
734
- try:
735
- return _cholesky_numba(x, cutoff, cutoff_mode, max_bond, absorb)
736
- except np.linalg.LinAlgError as e:
737
- if cutoff < 0:
738
- raise e
739
- # try adding cutoff identity - assuming it is approx allowable error
740
- xi = x + 2 * cutoff * np.eye(x.shape[0])
741
- return _cholesky_numba(xi, cutoff, cutoff_mode, max_bond, absorb)
742
-
743
-
744
- @compose
745
- def polar_right(x):
746
- """Polar decomposition of ``x``."""
747
- W, s, VH = do("linalg.svd", x)
748
- U = W @ VH
749
- P = dag(VH) @ ldmul(s, VH)
750
- return U, None, P
751
-
752
-
753
- @polar_right.register("numpy")
754
- @njit # pragma: no cover
755
- def polar_right_numba(x):
756
- W, s, VH = np.linalg.svd(x, full_matrices=0)
757
- U = W @ VH
758
- P = dag_numba(VH) @ ldmul_numba(s, VH)
759
- return U, None, P
760
-
761
-
762
- @compose
763
- def polar_left(x):
764
- """Polar decomposition of ``x``."""
765
- W, s, VH = do("linalg.svd", x)
766
- U = W @ VH
767
- P = rdmul(W, s) @ dag(W)
768
- return P, None, U
769
-
770
-
771
- @polar_left.register("numpy")
772
- @njit # pragma: no cover
773
- def polar_left_numba(x):
774
- W, s, VH = np.linalg.svd(x, full_matrices=0)
775
- U = W @ VH
776
- P = rdmul_numba(W, s) @ dag_numba(W)
777
- return P, None, U
778
-
779
-
780
- # ------ similarity transforms for compressing effective environments ------- #
781
-
782
-
783
- def _similarity_compress_eig(X, max_bond, renorm):
784
- # eigen decompose X -> V w V^-1
785
- el, ev = do("linalg.eig", X)
786
- evi = do("linalg.inv", ev)
787
-
788
- # choose largest abs value eigenpairs
789
- sel = do("argsort", do("abs", el))[-max_bond:]
790
- Cl = ev[:, sel]
791
- Cr = evi[sel, :]
792
-
793
- if renorm:
794
- trace_old = do("sum", el)
795
- trace_new = do("sum", el[sel])
796
- Cl = Cl * trace_old / trace_new
797
-
798
- return Cl, Cr
799
-
800
-
801
- @njit(
802
- [
803
- "(c8[:,:], i4, i4)",
804
- "(c16[:,:], i4, i4)",
805
- ]
806
- ) # pragma: no cover
807
- def _similarity_compress_eig_numba(X, max_bond, renorm):
808
- el, ev = np.linalg.eig(X)
809
- evi = np.linalg.inv(ev)
810
- sel = np.argsort(np.abs(el))[-max_bond:]
811
- Cl = ev[:, sel]
812
- Cr = evi[sel, :]
813
- if renorm:
814
- trace_old = np.sum(el)
815
- trace_new = np.sum(el[sel])
816
- Cl = Cl * trace_old / trace_new
817
- return Cl, Cr
818
-
819
-
820
- def _similarity_compress_eigh(X, max_bond, renorm):
821
- XX = (X + dag(X)) / 2
822
- el, ev = do("linalg.eigh", XX)
823
- sel = do("argsort", do("abs", el))[-max_bond:]
824
- Cl = ev[:, sel]
825
- Cr = dag(Cl)
826
- if renorm:
827
- trace_old = do("trace", X)
828
- trace_new = do("trace", Cr @ (X @ Cl))
829
- Cl = Cl * trace_old / trace_new
830
- return Cl, Cr
831
-
832
-
833
- @njit # pragma: no cover
834
- def _similarity_compress_eigh_numba(X, max_bond, renorm):
835
- XX = (X + dag_numba(X)) / 2
836
- el, ev = np.linalg.eigh(XX)
837
- sel = np.argsort(-np.abs(el))[:max_bond]
838
- Cl = ev[:, sel]
839
- Cr = dag_numba(Cl)
840
- if renorm:
841
- trace_old = np.trace(X)
842
- trace_new = np.trace(Cr @ (X @ Cl))
843
- Cl = Cl * trace_old / trace_new
844
- return Cl, Cr
845
-
846
-
847
- def _similarity_compress_svd(X, max_bond, renorm, asymm):
848
- U, _, VH = do("linalg.svd", X)
849
- U = U[:, :max_bond]
850
-
851
- Cl = U
852
- if asymm:
853
- VH = VH[:max_bond, :]
854
- Cr = dag(U)
855
- Cl = dag(VH)
856
- else:
857
- Cr = dag(U)
858
-
859
- if renorm:
860
- # explicitly maintain trace value
861
- trace_old = do("trace", X)
862
- trace_new = do("trace", Cr @ (X @ Cl))
863
- Cl = Cl * (trace_old / trace_new)
864
-
865
- return Cl, Cr
866
-
867
-
868
- @njit # pragma: no cover
869
- def _similarity_compress_svd_numba(X, max_bond, renorm, asymm):
870
- U, _, VH = np.linalg.svd(X)
871
- U = U[:, :max_bond]
872
- Cl = U
873
-
874
- if asymm:
875
- VH = VH[:max_bond, :]
876
- Cr = dag_numba(U)
877
- Cl = dag_numba(VH)
878
- else:
879
- Cr = dag_numba(U)
880
-
881
- if renorm:
882
- trace_old = np.trace(X)
883
- trace_new = np.trace(Cr @ (X @ Cl))
884
- Cl = Cl * trace_old / trace_new
885
- return Cl, Cr
886
-
887
-
888
- def _similarity_compress_biorthog(X, max_bond, renorm):
889
- U, s, VH = do("linalg.svd", X)
890
-
891
- B = U[:, :max_bond]
892
- AH = VH[:max_bond, :]
893
-
894
- Uab, sab, VHab = do("linalg.svd", AH @ B)
895
- sab = (sab + 1e-12 * do("max", sab)) ** -0.5
896
- sab_inv = do("reshape", sab, (1, -1))
897
- P = Uab * sab_inv
898
- Q = dag(VHab) * sab_inv
899
-
900
- Cl = B @ Q
901
- Cr = dag(P) @ AH
902
-
903
- if renorm:
904
- trace_old = do("trace", X)
905
- trace_new = do("trace", Cr @ (X @ Cl))
906
- Cl = Cl * trace_old / trace_new
907
-
908
- return Cl, Cr
909
-
910
-
911
- @njit # pragma: no cover
912
- def _similarity_compress_biorthog_numba(X, max_bond, renorm):
913
- U, s, VH = np.linalg.svd(X)
914
-
915
- B = U[:, :max_bond]
916
- AH = VH[:max_bond, :]
917
-
918
- Uab, sab, VHab = np.linalg.svd(AH @ B)
919
-
920
- # smudge factor
921
- sab += 1e-12 * np.max(sab)
922
- sab **= -0.5
923
-
924
- sab_inv = sab.reshape((1, -1))
925
- P = Uab * sab_inv
926
- Q = dag_numba(VHab) * sab_inv
927
-
928
- Cl = B @ Q
929
- Cr = dag_numba(P) @ AH
930
-
931
- if renorm:
932
- trace_old = np.trace(X)
933
- trace_new = np.trace(Cr @ (X @ Cl))
934
- Cl = Cl * trace_old / trace_new
935
-
936
- return Cl, Cr
937
-
938
-
939
- _similarity_compress_fns = {
940
- ("eig", False): _similarity_compress_eig,
941
- ("eig", True): _similarity_compress_eig_numba,
942
- ("eigh", False): _similarity_compress_eigh,
943
- ("eigh", True): _similarity_compress_eigh_numba,
944
- ("svd", False): functools.partial(_similarity_compress_svd, asymm=0),
945
- ("svd", True): functools.partial(_similarity_compress_svd_numba, asymm=0),
946
- ("biorthog", False): _similarity_compress_biorthog,
947
- ("biorthog", True): _similarity_compress_biorthog_numba,
948
- }
949
-
950
-
951
- def similarity_compress(X, max_bond, renorm=False, method="eigh"):
952
- if method == "eig":
953
- if get_dtype_name(X) == "float64":
954
- X = astype(X, "complex128")
955
- elif get_dtype_name(X) == "float32":
956
- X = astype(X, "complex64")
957
-
958
- isnumpy = isinstance(X, np.ndarray)
959
- # if isnumpy:
960
- # X = np.ascontiguousarray(X)
961
- fn = _similarity_compress_fns[method, isnumpy]
962
- return fn(X, max_bond, int(renorm))
963
-
964
-
965
- @compose
966
- def isometrize_qr(x, backend=None):
967
- """Perform isometrization using the QR decomposition."""
968
- with backend_like(backend):
969
- Q, R = do("linalg.qr", x)
970
- # stabilize qr by fixing diagonal of R in canonical, positive form (we
971
- # don't actaully do anything to R, just absorb the necessary sign -> Q)
972
- rd = do("diag", R)
973
- s = do("sign", rd) + (rd == 0)
974
- Q = Q * reshape(s, (1, -1))
975
- return Q
976
-
977
-
978
- @compose
979
- def isometrize_svd(x, backend=None):
980
- """Perform isometrization using the SVD decomposition."""
981
- U, _, VH = do("linalg.svd", x, like=backend)
982
- return U @ VH
983
-
984
-
985
- @compose
986
- def isometrize_exp(x, backend):
987
- r"""Perform isometrization using anti-symmetric matrix exponentiation.
988
-
989
- .. math::
990
-
991
- U_A = \exp \left( X - X^\dagger \right)
992
-
993
- If ``x`` is rectangular it is completed with zeros first.
994
- """
995
- with backend_like(backend):
996
- m, n = x.shape
997
- d = max(m, n)
998
- x = do(
999
- "pad", x, [[0, d - m], [0, d - n]], "constant", constant_values=0.0
1000
- )
1001
- x = x - dag(x)
1002
- Q = do("linalg.expm", x)
1003
- return Q[:m, :n]
1004
-
1005
-
1006
- @compose
1007
- def isometrize_cayley(x, backend):
1008
- r"""Perform isometrization using an anti-symmetric Cayley transform.
1009
-
1010
- .. math::
1011
-
1012
- U_A = (I + \dfrac{A}{2})(I - \dfrac{A}{2})^{-1}
1013
-
1014
- where :math:`A = X - X^\dagger`. If ``x`` is rectangular it is completed
1015
- with zeros first.
1016
- """
1017
- with backend_like(backend):
1018
- m, n = x.shape
1019
- d = max(m, n)
1020
- x = do(
1021
- "pad", x, [[0, d - m], [0, d - n]], "constant", constant_values=0.0
1022
- )
1023
- x = x - dag(x)
1024
- x = x / 2.0
1025
- Id = do("eye", d, like=x)
1026
- Q = do("linalg.solve", Id - x, Id + x)
1027
- return Q[:m, :n]
1028
-
1029
-
1030
- @compose
1031
- def isometrize_modified_gram_schmidt(A, backend=None):
1032
- """Perform isometrization explicitly using the modified Gram Schmidt
1033
- procedure (this is slow but a useful reference).
1034
- """
1035
- with backend_like(backend):
1036
- Q = []
1037
- for j in range(A.shape[1]):
1038
- q = A[:, j]
1039
- for i in range(0, j):
1040
- rij = do("tensordot", do("conj", Q[i]), q, 1)
1041
- q = q - rij * Q[i]
1042
- Q.append(q / do("linalg.norm", q))
1043
- Q = do("stack", tuple(Q), axis=1)
1044
- return Q
1045
-
1046
-
1047
- @compose
1048
- def isometrize_householder(X, backend=None):
1049
- with backend_like(backend):
1050
- X = do("tril", X, -1)
1051
- tau = 2.0 / (1.0 + do("sum", do("conj", X) * X, 0))
1052
- Q = do("linalg.householder_product", X, tau)
1053
- return Q
1054
-
1055
-
1056
- def isometrize_torch_householder(x):
1057
- """Isometrize ``x`` using the Householder reflection method, as implemented
1058
- by the ``torch_householder`` package.
1059
- """
1060
- from torch_householder import torch_householder_orgqr
1061
-
1062
- return torch_householder_orgqr(x)
1063
-
1064
-
1065
- _ISOMETRIZE_METHODS = {
1066
- "qr": isometrize_qr,
1067
- "svd": isometrize_svd,
1068
- "mgs": isometrize_modified_gram_schmidt,
1069
- "exp": isometrize_exp,
1070
- "cayley": isometrize_cayley,
1071
- "householder": isometrize_householder,
1072
- "torch_householder": isometrize_torch_householder,
1073
- }
1074
-
1075
-
1076
- def isometrize(x, method="qr"):
1077
- """Generate an isometric (or unitary if square) / orthogonal matrix from
1078
- array ``x``.
1079
-
1080
- Parameters
1081
- ----------
1082
- x : array
1083
- The matrix to project into isometrix form.
1084
- method : str, optional
1085
- The method used to generate the isometry. The options are:
1086
-
1087
- - "qr": use the Q factor of the QR decomposition of ``x`` with the
1088
- constraint that the diagonal of ``R`` is positive.
1089
- - "svd": uses ``U @ VH`` of the SVD decomposition of ``x``. This is
1090
- useful for finding the 'closest' isometric matrix to ``x``, such as
1091
- when it has been expanded with noise etc. But is less stable for
1092
- differentiation / optimization.
1093
- - "exp": use the matrix exponential of ``x - dag(x)``, first
1094
- completing ``x`` with zeros if it is rectangular. This is a good
1095
- parametrization for optimization, but more expensive for non-square
1096
- ``x``.
1097
- - "cayley": use the Cayley transform of ``x - dag(x)``, first
1098
- completing ``x`` with zeros if it is rectangular. This is a good
1099
- parametrization for optimization (one the few compatible with
1100
- `HIPS/autograd` e.g.), but more expensive for non-square ``x``.
1101
- - "householder": use the Householder reflection method directly. This
1102
- requires that the backend implements "linalg.householder_product".
1103
- - "torch_householder": use the Householder reflection method directly,
1104
- using the ``torch_householder`` package. This requires that the
1105
- package is installed and that the backend is ``"torch"``. This is
1106
- generally the best parametrizing method for "torch" if available.
1107
- - "mgs": use a python implementation of the modified Gram Schmidt
1108
- method directly. This is slow if not compiled but a useful reference.
1109
-
1110
- Not all backends support all methods or differentiating through all
1111
- methods.
1112
-
1113
- Returns
1114
- -------
1115
- Q : array
1116
- The isometrization / orthogonalization of ``x``.
1117
- """
1118
- m, n = x.shape
1119
- fat = m < n
1120
- if fat:
1121
- x = do("transpose", x)
1122
- Q = _ISOMETRIZE_METHODS[method](x)
1123
- if fat:
1124
- Q = do("transpose", Q)
1125
- return Q
1126
-
1127
-
1128
- @compose
1129
- def squared_op_to_reduced_factor(x2, dl, dr, right=True):
1130
- """Given the square, ``x2``, of an operator ``x``, compute either the left
1131
- or right reduced factor matrix of the unsquared operator ``x`` with
1132
- original shape ``(dl, dr)``.
1133
- """
1134
- s2, W = do("linalg.eigh", x2)
1135
-
1136
- if right:
1137
- if dl < dr:
1138
- # know exactly low-rank, so truncate
1139
- keep = dl
1140
- else:
1141
- keep = None
1142
- else:
1143
- if dl > dr:
1144
- # know exactly low-rank, so truncate
1145
- keep = dr
1146
- else:
1147
- keep = None
1148
-
1149
- if keep is not None:
1150
- # outer dimension smaller -> exactly low-rank
1151
- s2 = s2[-keep:]
1152
- W = W[:, -keep:]
1153
-
1154
- # might have negative eigenvalues due to numerical error from squaring
1155
- s2 = do("clip", s2, s2[-1] * 1e-12, None)
1156
- s = do("sqrt", s2)
1157
-
1158
- if right:
1159
- factor = ldmul(s, dag(W))
1160
- else: # 'left'
1161
- factor = rdmul(W, s)
1162
-
1163
- return factor
1164
-
1165
-
1166
- @squared_op_to_reduced_factor.register("numpy")
1167
- @njit
1168
- def squared_op_to_reduced_factor_numba(x2, dl, dr, right=True):
1169
- s2, W = np.linalg.eigh(x2)
1170
-
1171
- if right:
1172
- if dl < dr:
1173
- # know exactly low-rank, so truncate
1174
- keep = dl
1175
- else:
1176
- keep = None
1177
- else:
1178
- if dl > dr:
1179
- # know exactly low-rank, so truncate
1180
- keep = dr
1181
- else:
1182
- keep = None
1183
-
1184
- if keep is not None:
1185
- # outer dimension smaller -> exactly low-rank
1186
- s2 = s2[-keep:]
1187
- W = W[:, -keep:]
1188
-
1189
- # might have negative eigenvalues due to numerical error from squaring
1190
- s2 = np.clip(s2, 0.0, None)
1191
- s = np.sqrt(s2)
1192
-
1193
- if right:
1194
- factor = ldmul_numba(s, dag_numba(W))
1195
- else: # 'left'
1196
- factor = rdmul_numba(W, s)
1197
-
1198
- return factor
1199
-
1200
-
1201
- def compute_oblique_projectors(
1202
- Rl, Rr, max_bond, cutoff, absorb="both", cutoff_mode=4, **compress_opts
1203
- ):
1204
- """Compute the oblique projectors for two reduced factor matrices that
1205
- describe a gauge on a bond. Concretely, assuming that ``Rl`` and ``Rr`` are
1206
- the reduced factor matrices for local operator ``A``, such that:
1207
-
1208
- .. math::
1209
-
1210
- A = Q_L R_L R_R Q_R
1211
-
1212
- with ``Q_L`` and ``Q_R`` isometric matrices, then the optimal inner
1213
- truncation is given by:
1214
-
1215
- .. math::
1216
-
1217
- A' = Q_L P_L P_R' Q_R
1218
-
1219
- Parameters
1220
- ----------
1221
- Rl : array
1222
- The left reduced factor matrix.
1223
- Rr : array
1224
- The right reduced factor matrix.
1225
-
1226
- Returns
1227
- -------
1228
- Pl : array
1229
- The left oblique projector.
1230
- Pr : array
1231
- The right oblique projector.
1232
- """
1233
- if absorb != "both":
1234
- raise NotImplementedError("only absorb='both' supported")
1235
-
1236
- if max_bond is None:
1237
- max_bond = -1
1238
-
1239
- cutoff_mode = map_cutoff_mode(cutoff_mode)
1240
-
1241
- Ut, st, VHt = svd_truncated(
1242
- Rl @ Rr,
1243
- max_bond=max_bond,
1244
- cutoff=cutoff,
1245
- absorb=None,
1246
- cutoff_mode=cutoff_mode,
1247
- **compress_opts,
1248
- )
1249
- st_sqrt = do("sqrt", st)
1250
-
1251
- # then form the 'oblique' projectors
1252
- Pl = Rr @ rddiv(dag(VHt), st_sqrt)
1253
- Pr = lddiv(st_sqrt, dag(Ut)) @ Rl
1254
-
1255
- return Pl, Pr