Trajectree 0.0.1__py3-none-any.whl → 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. trajectree/__init__.py +0 -3
  2. trajectree/fock_optics/devices.py +1 -1
  3. trajectree/fock_optics/light_sources.py +2 -2
  4. trajectree/fock_optics/measurement.py +3 -3
  5. trajectree/fock_optics/utils.py +6 -6
  6. trajectree/trajectory.py +2 -2
  7. {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/METADATA +2 -3
  8. trajectree-0.0.2.dist-info/RECORD +16 -0
  9. trajectree/quimb/docs/_pygments/_pygments_dark.py +0 -118
  10. trajectree/quimb/docs/_pygments/_pygments_light.py +0 -118
  11. trajectree/quimb/docs/conf.py +0 -158
  12. trajectree/quimb/docs/examples/ex_mpi_expm_evo.py +0 -62
  13. trajectree/quimb/quimb/__init__.py +0 -507
  14. trajectree/quimb/quimb/calc.py +0 -1491
  15. trajectree/quimb/quimb/core.py +0 -2279
  16. trajectree/quimb/quimb/evo.py +0 -712
  17. trajectree/quimb/quimb/experimental/__init__.py +0 -0
  18. trajectree/quimb/quimb/experimental/autojittn.py +0 -129
  19. trajectree/quimb/quimb/experimental/belief_propagation/__init__.py +0 -109
  20. trajectree/quimb/quimb/experimental/belief_propagation/bp_common.py +0 -397
  21. trajectree/quimb/quimb/experimental/belief_propagation/d1bp.py +0 -316
  22. trajectree/quimb/quimb/experimental/belief_propagation/d2bp.py +0 -653
  23. trajectree/quimb/quimb/experimental/belief_propagation/hd1bp.py +0 -571
  24. trajectree/quimb/quimb/experimental/belief_propagation/hv1bp.py +0 -775
  25. trajectree/quimb/quimb/experimental/belief_propagation/l1bp.py +0 -316
  26. trajectree/quimb/quimb/experimental/belief_propagation/l2bp.py +0 -537
  27. trajectree/quimb/quimb/experimental/belief_propagation/regions.py +0 -194
  28. trajectree/quimb/quimb/experimental/cluster_update.py +0 -286
  29. trajectree/quimb/quimb/experimental/merabuilder.py +0 -865
  30. trajectree/quimb/quimb/experimental/operatorbuilder/__init__.py +0 -15
  31. trajectree/quimb/quimb/experimental/operatorbuilder/operatorbuilder.py +0 -1631
  32. trajectree/quimb/quimb/experimental/schematic.py +0 -7
  33. trajectree/quimb/quimb/experimental/tn_marginals.py +0 -130
  34. trajectree/quimb/quimb/experimental/tnvmc.py +0 -1483
  35. trajectree/quimb/quimb/gates.py +0 -36
  36. trajectree/quimb/quimb/gen/__init__.py +0 -2
  37. trajectree/quimb/quimb/gen/operators.py +0 -1167
  38. trajectree/quimb/quimb/gen/rand.py +0 -713
  39. trajectree/quimb/quimb/gen/states.py +0 -479
  40. trajectree/quimb/quimb/linalg/__init__.py +0 -6
  41. trajectree/quimb/quimb/linalg/approx_spectral.py +0 -1109
  42. trajectree/quimb/quimb/linalg/autoblock.py +0 -258
  43. trajectree/quimb/quimb/linalg/base_linalg.py +0 -719
  44. trajectree/quimb/quimb/linalg/mpi_launcher.py +0 -397
  45. trajectree/quimb/quimb/linalg/numpy_linalg.py +0 -244
  46. trajectree/quimb/quimb/linalg/rand_linalg.py +0 -514
  47. trajectree/quimb/quimb/linalg/scipy_linalg.py +0 -293
  48. trajectree/quimb/quimb/linalg/slepc_linalg.py +0 -892
  49. trajectree/quimb/quimb/schematic.py +0 -1518
  50. trajectree/quimb/quimb/tensor/__init__.py +0 -401
  51. trajectree/quimb/quimb/tensor/array_ops.py +0 -610
  52. trajectree/quimb/quimb/tensor/circuit.py +0 -4824
  53. trajectree/quimb/quimb/tensor/circuit_gen.py +0 -411
  54. trajectree/quimb/quimb/tensor/contraction.py +0 -336
  55. trajectree/quimb/quimb/tensor/decomp.py +0 -1255
  56. trajectree/quimb/quimb/tensor/drawing.py +0 -1646
  57. trajectree/quimb/quimb/tensor/fitting.py +0 -385
  58. trajectree/quimb/quimb/tensor/geometry.py +0 -583
  59. trajectree/quimb/quimb/tensor/interface.py +0 -114
  60. trajectree/quimb/quimb/tensor/networking.py +0 -1058
  61. trajectree/quimb/quimb/tensor/optimize.py +0 -1818
  62. trajectree/quimb/quimb/tensor/tensor_1d.py +0 -4778
  63. trajectree/quimb/quimb/tensor/tensor_1d_compress.py +0 -1854
  64. trajectree/quimb/quimb/tensor/tensor_1d_tebd.py +0 -662
  65. trajectree/quimb/quimb/tensor/tensor_2d.py +0 -5954
  66. trajectree/quimb/quimb/tensor/tensor_2d_compress.py +0 -96
  67. trajectree/quimb/quimb/tensor/tensor_2d_tebd.py +0 -1230
  68. trajectree/quimb/quimb/tensor/tensor_3d.py +0 -2869
  69. trajectree/quimb/quimb/tensor/tensor_3d_tebd.py +0 -46
  70. trajectree/quimb/quimb/tensor/tensor_approx_spectral.py +0 -60
  71. trajectree/quimb/quimb/tensor/tensor_arbgeom.py +0 -3237
  72. trajectree/quimb/quimb/tensor/tensor_arbgeom_compress.py +0 -565
  73. trajectree/quimb/quimb/tensor/tensor_arbgeom_tebd.py +0 -1138
  74. trajectree/quimb/quimb/tensor/tensor_builder.py +0 -5411
  75. trajectree/quimb/quimb/tensor/tensor_core.py +0 -11179
  76. trajectree/quimb/quimb/tensor/tensor_dmrg.py +0 -1472
  77. trajectree/quimb/quimb/tensor/tensor_mera.py +0 -204
  78. trajectree/quimb/quimb/utils.py +0 -892
  79. trajectree/quimb/tests/__init__.py +0 -0
  80. trajectree/quimb/tests/test_accel.py +0 -501
  81. trajectree/quimb/tests/test_calc.py +0 -788
  82. trajectree/quimb/tests/test_core.py +0 -847
  83. trajectree/quimb/tests/test_evo.py +0 -565
  84. trajectree/quimb/tests/test_gen/__init__.py +0 -0
  85. trajectree/quimb/tests/test_gen/test_operators.py +0 -361
  86. trajectree/quimb/tests/test_gen/test_rand.py +0 -296
  87. trajectree/quimb/tests/test_gen/test_states.py +0 -261
  88. trajectree/quimb/tests/test_linalg/__init__.py +0 -0
  89. trajectree/quimb/tests/test_linalg/test_approx_spectral.py +0 -368
  90. trajectree/quimb/tests/test_linalg/test_base_linalg.py +0 -351
  91. trajectree/quimb/tests/test_linalg/test_mpi_linalg.py +0 -127
  92. trajectree/quimb/tests/test_linalg/test_numpy_linalg.py +0 -84
  93. trajectree/quimb/tests/test_linalg/test_rand_linalg.py +0 -134
  94. trajectree/quimb/tests/test_linalg/test_slepc_linalg.py +0 -283
  95. trajectree/quimb/tests/test_tensor/__init__.py +0 -0
  96. trajectree/quimb/tests/test_tensor/test_belief_propagation/__init__.py +0 -0
  97. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d1bp.py +0 -39
  98. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d2bp.py +0 -67
  99. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hd1bp.py +0 -64
  100. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hv1bp.py +0 -51
  101. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l1bp.py +0 -142
  102. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l2bp.py +0 -101
  103. trajectree/quimb/tests/test_tensor/test_circuit.py +0 -816
  104. trajectree/quimb/tests/test_tensor/test_contract.py +0 -67
  105. trajectree/quimb/tests/test_tensor/test_decomp.py +0 -40
  106. trajectree/quimb/tests/test_tensor/test_mera.py +0 -52
  107. trajectree/quimb/tests/test_tensor/test_optimizers.py +0 -488
  108. trajectree/quimb/tests/test_tensor/test_tensor_1d.py +0 -1171
  109. trajectree/quimb/tests/test_tensor/test_tensor_2d.py +0 -606
  110. trajectree/quimb/tests/test_tensor/test_tensor_2d_tebd.py +0 -144
  111. trajectree/quimb/tests/test_tensor/test_tensor_3d.py +0 -123
  112. trajectree/quimb/tests/test_tensor/test_tensor_arbgeom.py +0 -226
  113. trajectree/quimb/tests/test_tensor/test_tensor_builder.py +0 -441
  114. trajectree/quimb/tests/test_tensor/test_tensor_core.py +0 -2066
  115. trajectree/quimb/tests/test_tensor/test_tensor_dmrg.py +0 -388
  116. trajectree/quimb/tests/test_tensor/test_tensor_spectral_approx.py +0 -63
  117. trajectree/quimb/tests/test_tensor/test_tensor_tebd.py +0 -270
  118. trajectree/quimb/tests/test_utils.py +0 -85
  119. trajectree-0.0.1.dist-info/RECORD +0 -126
  120. {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/WHEEL +0 -0
  121. {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/licenses/LICENSE +0 -0
  122. {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/top_level.txt +0 -0
@@ -1,385 +0,0 @@
1
- """Tools for computing distances between and fitting tensor networks."""
2
- from autoray import dag, do
3
-
4
- from .contraction import contract_strategy
5
- from ..utils import check_opt
6
-
7
-
8
- def tensor_network_distance(
9
- tnA,
10
- tnB,
11
- xAA=None,
12
- xAB=None,
13
- xBB=None,
14
- method="auto",
15
- normalized=False,
16
- **contract_opts,
17
- ):
18
- r"""Compute the Frobenius norm distance between two tensor networks:
19
-
20
- .. math::
21
-
22
- D(A, B)
23
- = | A - B |_{\mathrm{fro}}
24
- = \mathrm{Tr} [(A - B)^{\dagger}(A - B)]^{1/2}
25
- = ( \langle A | A \rangle - 2 \mathrm{Re} \langle A | B \rangle|
26
- + \langle B | B \rangle ) ^{1/2}
27
-
28
- which should have matching outer indices. Note the default approach to
29
- computing the norm is precision limited to about ``eps**0.5`` where ``eps``
30
- is the precision of the data type, e.g. ``1e-8`` for float64. This is due
31
- to the subtraction in the above expression.
32
-
33
- Parameters
34
- ----------
35
- tnA : TensorNetwork or Tensor
36
- The first tensor network operator.
37
- tnB : TensorNetwork or Tensor
38
- The second tensor network operator.
39
- xAA : None or scalar
40
- The value of ``A.H @ A`` if you already know it (or it doesn't matter).
41
- xAB : None or scalar
42
- The value of ``A.H @ B`` if you already know it (or it doesn't matter).
43
- xBB : None or scalar
44
- The value of ``B.H @ B`` if you already know it (or it doesn't matter).
45
- method : {'auto', 'overlap', 'dense'}, optional
46
- How to compute the distance. If ``'overlap'``, the default, the
47
- distance will be computed as the sum of overlaps, without explicitly
48
- forming the dense operators. If ``'dense'``, the operators will be
49
- directly formed and the norm computed, which can be quicker when the
50
- exterior dimensions are small. If ``'auto'``, the dense method will
51
- be used if the total operator (outer) size is ``<= 2**16``.
52
- normalized : bool, optional
53
- If ``True``, then normalize the distance by the norm of the two
54
- operators, i.e. ``2 * D(A, B) / (|A| + |B|)``. The resulting distance
55
- lies between 0 and 2 and is more useful for assessing convergence.
56
- contract_opts
57
- Supplied to :meth:`~quimb.tensor.tensor_core.TensorNetwork.contract`.
58
-
59
- Returns
60
- -------
61
- D : float
62
- """
63
- check_opt("method", method, ("auto", "dense", "overlap"))
64
-
65
- tnA = tnA.as_network()
66
- tnB = tnB.as_network()
67
-
68
- oix = tnA.outer_inds()
69
- if set(oix) != set(tnB.outer_inds()):
70
- raise ValueError(
71
- "Can only compute distance between tensor "
72
- "networks with matching outer indices."
73
- )
74
-
75
- if method == "auto":
76
- d = tnA.inds_size(oix)
77
- if d <= 1 << 16:
78
- method = "dense"
79
- else:
80
- method = "overlap"
81
-
82
- # directly from vectorizations of both
83
- if method == "dense":
84
- tnA = tnA.contract(..., output_inds=oix, preserve_tensor=True)
85
- tnB = tnB.contract(..., output_inds=oix, preserve_tensor=True)
86
-
87
- # overlap method
88
- if xAA is None:
89
- xAA = (tnA | tnA.H).contract(..., **contract_opts)
90
- if xAB is None:
91
- xAB = (tnA | tnB.H).contract(..., **contract_opts)
92
- if xBB is None:
93
- xBB = (tnB | tnB.H).contract(..., **contract_opts)
94
-
95
- dAB = do("abs", xAA - 2 * do("real", xAB) + xBB) ** 0.5
96
-
97
- if normalized:
98
- dAB *= 2 / (do("abs", xAA)**0.5 + do("abs", xBB)**0.5)
99
-
100
- return dAB
101
-
102
-
103
-
104
-
105
- def tensor_network_fit_autodiff(
106
- tn,
107
- tn_target,
108
- steps=1000,
109
- tol=1e-9,
110
- autodiff_backend="autograd",
111
- contract_optimize="auto-hq",
112
- distance_method="auto",
113
- inplace=False,
114
- progbar=False,
115
- **kwargs,
116
- ):
117
- """Optimize the fit of ``tn`` with respect to ``tn_target`` using
118
- automatic differentation. This minimizes the norm of the difference
119
- between the two tensor networks, which must have matching outer indices,
120
- using overlaps.
121
-
122
- Parameters
123
- ----------
124
- tn : TensorNetwork
125
- The tensor network to fit.
126
- tn_target : TensorNetwork
127
- The target tensor network to fit ``tn`` to.
128
- steps : int, optional
129
- The maximum number of autodiff steps.
130
- tol : float, optional
131
- The target norm distance.
132
- autodiff_backend : str, optional
133
- Which backend library to use to perform the gradient computation.
134
- contract_optimize : str, optional
135
- The contraction path optimized used to contract the overlaps.
136
- distance_method : {'auto', 'dense', 'overlap'}, optional
137
- Supplied to :func:`~quimb.tensor.tensor_core.tensor_network_distance`,
138
- controls how the distance is computed.
139
- inplace : bool, optional
140
- Update ``tn`` in place.
141
- progbar : bool, optional
142
- Show a live progress bar of the fitting process.
143
- kwargs
144
- Passed to :class:`~quimb.tensor.tensor_core.optimize.TNOptimizer`.
145
-
146
- See Also
147
- --------
148
- tensor_network_distance, tensor_network_fit_als
149
- """
150
- from .optimize import TNOptimizer
151
- from .tensor_core import tensor_network_distance
152
-
153
- xBB = (tn_target | tn_target.H).contract(
154
- ...,
155
- output_inds=(),
156
- optimize=contract_optimize,
157
- )
158
-
159
- tnopt = TNOptimizer(
160
- tn=tn,
161
- loss_fn=tensor_network_distance,
162
- loss_constants={"tnB": tn_target, "xBB": xBB},
163
- loss_kwargs={"method": distance_method, "optimize": contract_optimize},
164
- autodiff_backend=autodiff_backend,
165
- progbar=progbar,
166
- **kwargs,
167
- )
168
-
169
- tn_fit = tnopt.optimize(steps, tol=tol)
170
-
171
- if not inplace:
172
- return tn_fit
173
-
174
- for t1, t2 in zip(tn, tn_fit):
175
- t1.modify(data=t2.data)
176
-
177
- return tn
178
-
179
-
180
- def _tn_fit_als_core(
181
- var_tags,
182
- tnAA,
183
- tnAB,
184
- xBB,
185
- tol,
186
- contract_optimize,
187
- steps,
188
- enforce_pos,
189
- pos_smudge,
190
- solver="solve",
191
- progbar=False,
192
- ):
193
- from .tensor_core import group_inds
194
-
195
- # shared intermediates + greedy = good reuse of contractions
196
- with contract_strategy(contract_optimize):
197
- # prepare each of the contractions we are going to repeat
198
- env_contractions = []
199
- for tg in var_tags:
200
- # varying tensor and conjugate in norm <A|A>
201
- tk = tnAA["__KET__", tg]
202
- tb = tnAA["__BRA__", tg]
203
-
204
- # get inds, and ensure any bonds come last, for linalg.solve
205
- lix, bix, rix = group_inds(tb, tk)
206
- tk.transpose_(*rix, *bix)
207
- tb.transpose_(*lix, *bix)
208
-
209
- # form TNs with 'holes', i.e. environment tensors networks
210
- A_tn = tnAA.select((tg,), "!all")
211
- y_tn = tnAB.select((tg,), "!all")
212
-
213
- env_contractions.append((tk, tb, lix, bix, rix, A_tn, y_tn))
214
-
215
- if tol != 0.0:
216
- old_d = float("inf")
217
-
218
- if progbar:
219
- import tqdm
220
-
221
- pbar = tqdm.trange(steps)
222
- else:
223
- pbar = range(steps)
224
-
225
- # the main iterative sweep on each tensor, locally optimizing
226
- for _ in pbar:
227
- for tk, tb, lix, bix, rix, A_tn, y_tn in env_contractions:
228
- Ni = A_tn.to_dense(lix, rix)
229
- Wi = y_tn.to_dense(rix, bix)
230
-
231
- if enforce_pos:
232
- el, ev = do("linalg.eigh", Ni)
233
- el = do("clip", el, el[-1] * pos_smudge, None)
234
- Ni_p = ev * do("reshape", el, (1, -1)) @ dag(ev)
235
- else:
236
- Ni_p = Ni
237
-
238
- if solver == "solve":
239
- x = do("linalg.solve", Ni_p, Wi)
240
- elif solver == "lstsq":
241
- x = do("linalg.lstsq", Ni_p, Wi, rcond=pos_smudge)[0]
242
-
243
- x_r = do("reshape", x, tk.shape)
244
- # n.b. because we are using virtual TNs -> updates propagate
245
- tk.modify(data=x_r)
246
- tb.modify(data=do("conj", x_r))
247
-
248
- # assess | A - B | for convergence or printing
249
- if (tol != 0.0) or progbar:
250
- xAA = do("trace", dag(x) @ (Ni @ x)) # <A|A>
251
- xAB = do("trace", do("real", dag(x) @ Wi)) # <A|B>
252
- d = do("abs", (xAA - 2 * xAB + xBB)) ** 0.5
253
- if abs(d - old_d) < tol:
254
- break
255
- old_d = d
256
-
257
- if progbar:
258
- pbar.set_description(str(d))
259
-
260
-
261
- def tensor_network_fit_als(
262
- tn,
263
- tn_target,
264
- tags=None,
265
- steps=100,
266
- tol=1e-9,
267
- solver="solve",
268
- enforce_pos=False,
269
- pos_smudge=None,
270
- tnAA=None,
271
- tnAB=None,
272
- xBB=None,
273
- contract_optimize="greedy",
274
- inplace=False,
275
- progbar=False,
276
- ):
277
- """Optimize the fit of ``tn`` with respect to ``tn_target`` using
278
- alternating least squares (ALS). This minimizes the norm of the difference
279
- between the two tensor networks, which must have matching outer indices,
280
- using overlaps.
281
-
282
- Parameters
283
- ----------
284
- tn : TensorNetwork
285
- The tensor network to fit.
286
- tn_target : TensorNetwork
287
- The target tensor network to fit ``tn`` to.
288
- tags : sequence of str, optional
289
- If supplied, only optimize tensors matching any of given tags.
290
- steps : int, optional
291
- The maximum number of ALS steps.
292
- tol : float, optional
293
- The target norm distance.
294
- solver : {'solve', 'lstsq', ...}, optional
295
- The underlying driver function used to solve the local minimization,
296
- e.g. ``numpy.linalg.solve`` for ``'solve'`` with ``numpy`` backend.
297
- enforce_pos : bool, optional
298
- Whether to enforce positivity of the locally formed environments,
299
- which can be more stable.
300
- pos_smudge : float, optional
301
- If enforcing positivity, the level below which to clip eigenvalues
302
- for make the local environment positive definite.
303
- tnAA : TensorNetwork, optional
304
- If you have already formed the overlap ``tn.H & tn``, maybe
305
- approximately, you can supply it here. The unconjugated layer should
306
- have tag ``'__KET__'`` and the conjugated layer ``'__BRA__'``. Each
307
- tensor being optimized should have tag ``'__VAR{i}__'``.
308
- tnAB : TensorNetwork, optional
309
- If you have already formed the overlap ``tn_target.H & tn``, maybe
310
- approximately, you can supply it here. Each tensor being optimized
311
- should have tag ``'__VAR{i}__'``.
312
- xBB : float, optional
313
- If you have already know, have computed ``tn_target.H @ tn_target``,
314
- or it doesn't matter, you can supply the value here.
315
- contract_optimize : str, optional
316
- The contraction path optimized used to contract the local environments.
317
- Note ``'greedy'`` is the default in order to maximize shared work.
318
- inplace : bool, optional
319
- Update ``tn`` in place.
320
- progbar : bool, optional
321
- Show a live progress bar of the fitting process.
322
-
323
- Returns
324
- -------
325
- TensorNetwork
326
-
327
- See Also
328
- --------
329
- tensor_network_fit_autodiff, tensor_network_distance
330
- """
331
- # mark the tensors we are going to optimize
332
- tna = tn.copy()
333
- tna.add_tag("__KET__")
334
-
335
- if tags is None:
336
- to_tag = tna
337
- else:
338
- to_tag = tna.select_tensors(tags, "any")
339
-
340
- var_tags = []
341
- for i, t in enumerate(to_tag):
342
- var_tag = f"__VAR{i}__"
343
- t.add_tag(var_tag)
344
- var_tags.append(var_tag)
345
-
346
- # form the norm of the varying TN (A) and its overlap with the target (B)
347
- if tnAA is None:
348
- tnAA = tna | tna.H.retag_({"__KET__": "__BRA__"})
349
- if tnAB is None:
350
- tnAB = tna | tn_target.H
351
-
352
- if (tol != 0.0) and (xBB is None):
353
- # <B|B>
354
- xBB = (tn_target | tn_target.H).contract(
355
- ...,
356
- optimize=contract_optimize,
357
- output_inds=(),
358
- )
359
-
360
- if pos_smudge is None:
361
- pos_smudge = max(tol, 1e-15)
362
-
363
- _tn_fit_als_core(
364
- var_tags=var_tags,
365
- tnAA=tnAA,
366
- tnAB=tnAB,
367
- xBB=xBB,
368
- tol=tol,
369
- contract_optimize=contract_optimize,
370
- steps=steps,
371
- enforce_pos=enforce_pos,
372
- pos_smudge=pos_smudge,
373
- solver=solver,
374
- progbar=progbar,
375
- )
376
-
377
- if not inplace:
378
- tn = tn.copy()
379
-
380
- for t1, t2 in zip(tn, tna):
381
- # transpose so only thing changed in original TN is data
382
- t2.transpose_like_(t1)
383
- t1.modify(data=t2.data)
384
-
385
- return tn