Trajectree 0.0.1__py3-none-any.whl → 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. trajectree/__init__.py +0 -3
  2. trajectree/fock_optics/devices.py +1 -1
  3. trajectree/fock_optics/light_sources.py +2 -2
  4. trajectree/fock_optics/measurement.py +3 -3
  5. trajectree/fock_optics/utils.py +6 -6
  6. trajectree/trajectory.py +2 -2
  7. {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/METADATA +2 -3
  8. trajectree-0.0.2.dist-info/RECORD +16 -0
  9. trajectree/quimb/docs/_pygments/_pygments_dark.py +0 -118
  10. trajectree/quimb/docs/_pygments/_pygments_light.py +0 -118
  11. trajectree/quimb/docs/conf.py +0 -158
  12. trajectree/quimb/docs/examples/ex_mpi_expm_evo.py +0 -62
  13. trajectree/quimb/quimb/__init__.py +0 -507
  14. trajectree/quimb/quimb/calc.py +0 -1491
  15. trajectree/quimb/quimb/core.py +0 -2279
  16. trajectree/quimb/quimb/evo.py +0 -712
  17. trajectree/quimb/quimb/experimental/__init__.py +0 -0
  18. trajectree/quimb/quimb/experimental/autojittn.py +0 -129
  19. trajectree/quimb/quimb/experimental/belief_propagation/__init__.py +0 -109
  20. trajectree/quimb/quimb/experimental/belief_propagation/bp_common.py +0 -397
  21. trajectree/quimb/quimb/experimental/belief_propagation/d1bp.py +0 -316
  22. trajectree/quimb/quimb/experimental/belief_propagation/d2bp.py +0 -653
  23. trajectree/quimb/quimb/experimental/belief_propagation/hd1bp.py +0 -571
  24. trajectree/quimb/quimb/experimental/belief_propagation/hv1bp.py +0 -775
  25. trajectree/quimb/quimb/experimental/belief_propagation/l1bp.py +0 -316
  26. trajectree/quimb/quimb/experimental/belief_propagation/l2bp.py +0 -537
  27. trajectree/quimb/quimb/experimental/belief_propagation/regions.py +0 -194
  28. trajectree/quimb/quimb/experimental/cluster_update.py +0 -286
  29. trajectree/quimb/quimb/experimental/merabuilder.py +0 -865
  30. trajectree/quimb/quimb/experimental/operatorbuilder/__init__.py +0 -15
  31. trajectree/quimb/quimb/experimental/operatorbuilder/operatorbuilder.py +0 -1631
  32. trajectree/quimb/quimb/experimental/schematic.py +0 -7
  33. trajectree/quimb/quimb/experimental/tn_marginals.py +0 -130
  34. trajectree/quimb/quimb/experimental/tnvmc.py +0 -1483
  35. trajectree/quimb/quimb/gates.py +0 -36
  36. trajectree/quimb/quimb/gen/__init__.py +0 -2
  37. trajectree/quimb/quimb/gen/operators.py +0 -1167
  38. trajectree/quimb/quimb/gen/rand.py +0 -713
  39. trajectree/quimb/quimb/gen/states.py +0 -479
  40. trajectree/quimb/quimb/linalg/__init__.py +0 -6
  41. trajectree/quimb/quimb/linalg/approx_spectral.py +0 -1109
  42. trajectree/quimb/quimb/linalg/autoblock.py +0 -258
  43. trajectree/quimb/quimb/linalg/base_linalg.py +0 -719
  44. trajectree/quimb/quimb/linalg/mpi_launcher.py +0 -397
  45. trajectree/quimb/quimb/linalg/numpy_linalg.py +0 -244
  46. trajectree/quimb/quimb/linalg/rand_linalg.py +0 -514
  47. trajectree/quimb/quimb/linalg/scipy_linalg.py +0 -293
  48. trajectree/quimb/quimb/linalg/slepc_linalg.py +0 -892
  49. trajectree/quimb/quimb/schematic.py +0 -1518
  50. trajectree/quimb/quimb/tensor/__init__.py +0 -401
  51. trajectree/quimb/quimb/tensor/array_ops.py +0 -610
  52. trajectree/quimb/quimb/tensor/circuit.py +0 -4824
  53. trajectree/quimb/quimb/tensor/circuit_gen.py +0 -411
  54. trajectree/quimb/quimb/tensor/contraction.py +0 -336
  55. trajectree/quimb/quimb/tensor/decomp.py +0 -1255
  56. trajectree/quimb/quimb/tensor/drawing.py +0 -1646
  57. trajectree/quimb/quimb/tensor/fitting.py +0 -385
  58. trajectree/quimb/quimb/tensor/geometry.py +0 -583
  59. trajectree/quimb/quimb/tensor/interface.py +0 -114
  60. trajectree/quimb/quimb/tensor/networking.py +0 -1058
  61. trajectree/quimb/quimb/tensor/optimize.py +0 -1818
  62. trajectree/quimb/quimb/tensor/tensor_1d.py +0 -4778
  63. trajectree/quimb/quimb/tensor/tensor_1d_compress.py +0 -1854
  64. trajectree/quimb/quimb/tensor/tensor_1d_tebd.py +0 -662
  65. trajectree/quimb/quimb/tensor/tensor_2d.py +0 -5954
  66. trajectree/quimb/quimb/tensor/tensor_2d_compress.py +0 -96
  67. trajectree/quimb/quimb/tensor/tensor_2d_tebd.py +0 -1230
  68. trajectree/quimb/quimb/tensor/tensor_3d.py +0 -2869
  69. trajectree/quimb/quimb/tensor/tensor_3d_tebd.py +0 -46
  70. trajectree/quimb/quimb/tensor/tensor_approx_spectral.py +0 -60
  71. trajectree/quimb/quimb/tensor/tensor_arbgeom.py +0 -3237
  72. trajectree/quimb/quimb/tensor/tensor_arbgeom_compress.py +0 -565
  73. trajectree/quimb/quimb/tensor/tensor_arbgeom_tebd.py +0 -1138
  74. trajectree/quimb/quimb/tensor/tensor_builder.py +0 -5411
  75. trajectree/quimb/quimb/tensor/tensor_core.py +0 -11179
  76. trajectree/quimb/quimb/tensor/tensor_dmrg.py +0 -1472
  77. trajectree/quimb/quimb/tensor/tensor_mera.py +0 -204
  78. trajectree/quimb/quimb/utils.py +0 -892
  79. trajectree/quimb/tests/__init__.py +0 -0
  80. trajectree/quimb/tests/test_accel.py +0 -501
  81. trajectree/quimb/tests/test_calc.py +0 -788
  82. trajectree/quimb/tests/test_core.py +0 -847
  83. trajectree/quimb/tests/test_evo.py +0 -565
  84. trajectree/quimb/tests/test_gen/__init__.py +0 -0
  85. trajectree/quimb/tests/test_gen/test_operators.py +0 -361
  86. trajectree/quimb/tests/test_gen/test_rand.py +0 -296
  87. trajectree/quimb/tests/test_gen/test_states.py +0 -261
  88. trajectree/quimb/tests/test_linalg/__init__.py +0 -0
  89. trajectree/quimb/tests/test_linalg/test_approx_spectral.py +0 -368
  90. trajectree/quimb/tests/test_linalg/test_base_linalg.py +0 -351
  91. trajectree/quimb/tests/test_linalg/test_mpi_linalg.py +0 -127
  92. trajectree/quimb/tests/test_linalg/test_numpy_linalg.py +0 -84
  93. trajectree/quimb/tests/test_linalg/test_rand_linalg.py +0 -134
  94. trajectree/quimb/tests/test_linalg/test_slepc_linalg.py +0 -283
  95. trajectree/quimb/tests/test_tensor/__init__.py +0 -0
  96. trajectree/quimb/tests/test_tensor/test_belief_propagation/__init__.py +0 -0
  97. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d1bp.py +0 -39
  98. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d2bp.py +0 -67
  99. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hd1bp.py +0 -64
  100. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hv1bp.py +0 -51
  101. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l1bp.py +0 -142
  102. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l2bp.py +0 -101
  103. trajectree/quimb/tests/test_tensor/test_circuit.py +0 -816
  104. trajectree/quimb/tests/test_tensor/test_contract.py +0 -67
  105. trajectree/quimb/tests/test_tensor/test_decomp.py +0 -40
  106. trajectree/quimb/tests/test_tensor/test_mera.py +0 -52
  107. trajectree/quimb/tests/test_tensor/test_optimizers.py +0 -488
  108. trajectree/quimb/tests/test_tensor/test_tensor_1d.py +0 -1171
  109. trajectree/quimb/tests/test_tensor/test_tensor_2d.py +0 -606
  110. trajectree/quimb/tests/test_tensor/test_tensor_2d_tebd.py +0 -144
  111. trajectree/quimb/tests/test_tensor/test_tensor_3d.py +0 -123
  112. trajectree/quimb/tests/test_tensor/test_tensor_arbgeom.py +0 -226
  113. trajectree/quimb/tests/test_tensor/test_tensor_builder.py +0 -441
  114. trajectree/quimb/tests/test_tensor/test_tensor_core.py +0 -2066
  115. trajectree/quimb/tests/test_tensor/test_tensor_dmrg.py +0 -388
  116. trajectree/quimb/tests/test_tensor/test_tensor_spectral_approx.py +0 -63
  117. trajectree/quimb/tests/test_tensor/test_tensor_tebd.py +0 -270
  118. trajectree/quimb/tests/test_utils.py +0 -85
  119. trajectree-0.0.1.dist-info/RECORD +0 -126
  120. {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/WHEEL +0 -0
  121. {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/licenses/LICENSE +0 -0
  122. {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/top_level.txt +0 -0
@@ -1,1854 +0,0 @@
1
- """Generic methods for compressing 1D-like tensor networks, where the tensor
2
- network can locally have arbitrary structure and outer indices.
3
-
4
- - [x] the direct method
5
- - [x] the density matrix method
6
- - [x] the zip-up method
7
- - [x] the zip-up first method
8
- - [x] the 1-site variational fit method, including sums of tensor networks
9
- - [x] the 2-site variational fit method, including sums of tensor networks
10
- - [x] the local projector method (CTMRG and HOTRG style)
11
- - [x] the autofit method (via non-1d specific ALS or autodiff)
12
-
13
- """
14
-
15
- import collections
16
- import functools
17
- import itertools
18
- import warnings
19
-
20
- from autoray import do
21
-
22
- from .tensor_arbgeom import tensor_network_apply_op_vec
23
- from .tensor_arbgeom_compress import tensor_network_ag_compress
24
- from .tensor_builder import TN_matching
25
- from .tensor_core import (
26
- Tensor,
27
- TensorNetwork,
28
- ensure_dict,
29
- rand_uuid,
30
- tensor_contract,
31
- )
32
-
33
-
34
- def enforce_1d_like(tn, site_tags=None, fix_bonds=True, inplace=False):
35
- """Check that ``tn`` is 1D-like with OBC, i.e. 1) that each tensor has
36
- exactly one of the given ``site_tags``. If not, raise a ValueError. 2) That
37
- there are no hyper indices. And 3) that there are only bonds within sites
38
- or between nearest neighbor sites. This issue can be optionally
39
- automatically fixed by inserting a string of identity tensors.
40
-
41
- Parameters
42
- ----------
43
- tn : TensorNetwork
44
- The tensor network to check.
45
- site_tags : sequence of str, optional
46
- The tags to use to group and order the tensors from ``tn``. If not
47
- given, uses ``tn.site_tags``.
48
- fix_bonds : bool, optional
49
- Whether to fix the bond structure by inserting identity tensors.
50
- inplace : bool, optional
51
- Whether to perform the fix inplace or not.
52
-
53
- Raises
54
- ------
55
- ValueError
56
- If the tensor network is not 1D-like.
57
- """
58
- tn = tn if inplace else tn.copy()
59
-
60
- if site_tags is None:
61
- site_tags = tn.site_tags
62
-
63
- tag_to_site = {tag: i for i, tag in enumerate(site_tags)}
64
- tid_to_site = {}
65
-
66
- def _check_tensor_site(tid, t):
67
- if tid in tid_to_site:
68
- return tid_to_site[tid]
69
-
70
- sites = []
71
- for tag in t.tags:
72
- site = tag_to_site.get(tag, None)
73
- if site is not None:
74
- sites.append(site)
75
- if len(sites) != 1:
76
- raise ValueError(
77
- f"{t} does not have one site tag, it has {sites}."
78
- )
79
-
80
- return sites[0]
81
-
82
- for ix, tids in list(tn.ind_map.items()):
83
- if len(tids) == 1:
84
- # assume outer
85
- continue
86
- elif len(tids) != 2:
87
- raise ValueError(
88
- f"TN has a hyper index, {ix}, connecting more than 2 tensors."
89
- )
90
-
91
- tida, tidb = tids
92
- ta = tn.tensor_map[tida]
93
- tb = tn.tensor_map[tidb]
94
-
95
- # get which single site each tensor belongs too
96
- sa = _check_tensor_site(tida, ta)
97
- sb = _check_tensor_site(tidb, tb)
98
- if sa > sb:
99
- sa, sb = sb, sa
100
-
101
- if sb - sa > 1:
102
- if not fix_bonds:
103
- raise ValueError(
104
- f"Tensor {ta} and {tb} are not nearest "
105
- "neighbors, and `fix_bonds=False`."
106
- )
107
-
108
- # not 1d like: bond is not nearest neighbor
109
- # but can insert identites along string to fix
110
- data = do("eye", ta.ind_size(ix), like=ta.data)
111
-
112
- ixl = ix
113
- for i in range(sa + 1, sb):
114
- ixr = rand_uuid()
115
- tn |= Tensor(
116
- data=data,
117
- inds=[ixl, ixr],
118
- tags=site_tags[i],
119
- )
120
- ixl = ixr
121
-
122
- tb.reindex_({ix: ixl})
123
-
124
- return tn
125
-
126
-
127
- def possibly_permute_(tn, permute_arrays):
128
- # possibly put the array indices in canonical order (e.g. when MPS or MPO)
129
- if permute_arrays and hasattr(tn, "permute_arrays"):
130
- if permute_arrays is True:
131
- # use default order
132
- tn.permute_arrays()
133
- else:
134
- # use given order
135
- tn.permute_arrays(permute_arrays)
136
-
137
-
138
- def tensor_network_1d_compress_direct(
139
- tn,
140
- max_bond=None,
141
- cutoff=1e-10,
142
- site_tags=None,
143
- normalize=False,
144
- canonize=True,
145
- cutoff_mode="rsum2",
146
- permute_arrays=True,
147
- optimize="auto-hq",
148
- sweep_reverse=False,
149
- equalize_norms=False,
150
- inplace=False,
151
- **compress_opts,
152
- ):
153
- """Compress a 1D-like tensor network using the 'direct' or 'naive' method,
154
- that is, explicitly contracting site-wise to form a MPS-like TN,
155
- canonicalizing in one direction, then compressing in the other. This has
156
- the same scaling as the density matrix (dm) method, but a larger prefactor.
157
- It can still be faster for small bond dimensions however, and is
158
- potentially higher precision since it works in the space of singular values
159
- directly rather than singular values squared. It is not quite optimal in
160
- terms of error due to the compounding errors of the SVDs.
161
-
162
- Parameters
163
- ----------
164
- tn : TensorNetwork
165
- The tensor network to compress. Every tensor should have exactly one of
166
- the site tags. Each site can have multiple tensors and output indices.
167
- max_bond : int
168
- The maximum bond dimension to compress to.
169
- cutoff : float, optional
170
- A dynamic threshold for discarding singular values when compressing.
171
- site_tags : sequence of str, optional
172
- The tags to use to group and order the tensors from ``tn``. If not
173
- given, uses ``tn.site_tags``. The tensor network built will have one
174
- tensor per site, in the order given by ``site_tags``.
175
- normalize : bool, optional
176
- Whether to normalize the final tensor network, making use of the fact
177
- that the output tensor network is in right canonical form.
178
- canonize : bool, optional
179
- Whether to canonicalize the network in one direction before compressing
180
- in the other.
181
- cutoff_mode : {"rsum2", "rel", ...}, optional
182
- The mode to use when truncating the singular values of the decomposed
183
- tensors. See :func:`~quimb.tensor.tensor_split`.
184
- permute_arrays : bool or str, optional
185
- Whether to permute the array indices of the final tensor network into
186
- canonical order. If ``True`` will use the default order, otherwise if a
187
- string this specifies a custom order.
188
- optimize : str, optional
189
- The contraction path optimizer to use.
190
- sweep_reverse : bool, optional
191
- Whether to sweep in the reverse direction, resulting in a left
192
- canonical form instead of right canonical.
193
- equalize_norms : bool, optional
194
- Whether to renormalize the tensors during the compression procedure.
195
- If ``True`` the gathered exponent will be redistributed equally among
196
- the tensors. If a float, all tensors will be renormalized to this
197
- value, and the gathered exponent is tracked in ``tn.exponent`` of the
198
- returned tensor network.
199
- inplace : bool, optional
200
- Whether to perform the compression inplace or not.
201
- compress_opts
202
- Supplied to :func:`~quimb.tensor.tensor_split`.
203
-
204
- Returns
205
- -------
206
- TensorNetwork
207
- The compressed tensor network, with canonical center at
208
- ``site_tags[0]`` ('right canonical' form) or ``site_tags[-1]`` ('left
209
- canonical' form) if ``sweep_reverse``.
210
- """
211
- if site_tags is None:
212
- site_tags = tn.site_tags
213
- if sweep_reverse:
214
- site_tags = tuple(reversed(site_tags))
215
-
216
- new = enforce_1d_like(tn, site_tags=site_tags, inplace=inplace)
217
-
218
- # contract the first site group
219
- new.contract_tags_(site_tags[0], optimize=optimize)
220
-
221
- # sweep right
222
- for i in range(1, len(site_tags)):
223
- # contract the next site group
224
- new.contract_tags_(site_tags[i], optimize=optimize)
225
- # │ │ │ │ │ │ │ │ │ │
226
- # ▶━▶━▶━▶═○─○─○─○─○─○
227
- # ╲│ │ │ │ │
228
- # : : ○─○─○─○─○
229
- # i-1 i
230
-
231
- if canonize:
232
- # shift canonical center rightwards
233
- new.canonize_between(
234
- site_tags[i - 1], site_tags[i], equalize_norms=equalize_norms
235
- )
236
- # │ │ │ │ │ │ │ │ │ │
237
- # ▶━▶━▶━▶━▶─○─○─○─○─○
238
- # ╲│ │ │ │ │
239
- # : : ○─○─○─○─○
240
- # i-1 i
241
-
242
- # sweep left
243
- for i in range(len(site_tags) - 1, 0, -1):
244
- # compress and shift canonical center leftwards
245
- new.compress_between(
246
- site_tags[i - 1],
247
- site_tags[i],
248
- absorb="left",
249
- reduced="right",
250
- max_bond=max_bond,
251
- cutoff=cutoff,
252
- cutoff_mode=cutoff_mode,
253
- equalize_norms=equalize_norms,
254
- **compress_opts,
255
- )
256
- # │ │ │ │ │ │ │ │ │ │
257
- # ▶━▶━▶━▶━▶━▶━○─◀─◀─◀
258
- # :
259
- # : : max_bond
260
- # i-1 i
261
-
262
- if normalize:
263
- # make use of the fact that the output is in right canonical form
264
- t0 = new[site_tags[0]]
265
- t0.normalize_()
266
- new.exponent = 0.0
267
- elif equalize_norms is True:
268
- # redistribute the exponent equally among the tensors
269
- new.equalize_norms_()
270
-
271
- # possibly put the array indices in canonical order (e.g. when MPS or MPO)
272
- possibly_permute_(new, permute_arrays)
273
-
274
- return new
275
-
276
-
277
- def tensor_network_1d_compress_dm(
278
- tn,
279
- max_bond=None,
280
- cutoff=1e-10,
281
- site_tags=None,
282
- normalize=False,
283
- cutoff_mode="rsum1",
284
- permute_arrays=True,
285
- optimize="auto-hq",
286
- sweep_reverse=False,
287
- canonize=True,
288
- equalize_norms=False,
289
- inplace=False,
290
- **compress_opts,
291
- ):
292
- """Compress any 1D-like tensor network using the 'density matrix' method
293
- (https://tensornetwork.org/mps/algorithms/denmat_mpo_mps/).
294
-
295
- While this has the same scaling as the direct method, in practice it can
296
- often be faster, especially at large bond dimensions. Potentially there are
297
- some situations where the direct method is more stable with regard to
298
- precision, since the density matrix method works in the 'squared' picture.
299
-
300
- Parameters
301
- ----------
302
- tn : TensorNetwork
303
- The tensor network to compress. Every tensor should have exactly one of
304
- the site tags. Each site can have multiple tensors and output indices.
305
- max_bond : int
306
- The maximum bond dimension to compress to.
307
- cutoff : float, optional
308
- The truncation error to use when compressing the double layer tensor
309
- network.
310
- site_tags : sequence of str, optional
311
- The tags to use to group and order the tensors from ``tn``. If not
312
- given, uses ``tn.site_tags``. The tensor network built will have one
313
- tensor per site, in the order given by ``site_tags``.
314
- normalize : bool, optional
315
- Whether to normalize the final tensor network, making use of the fact
316
- that the output tensor network is in right canonical form.
317
- cutoff_mode : {"rsum1", "rel", ...}, optional
318
- The mode to use when truncating the singular values of the decomposed
319
- tensors. See :func:`~quimb.tensor.tensor_split`. Note for the density
320
- matrix method the default 'rsum1' mode acts like 'rsum2' for the direct
321
- method due to truncating in the squared space.
322
- permute_arrays : bool or str, optional
323
- Whether to permute the array indices of the final tensor network into
324
- canonical order. If ``True`` will use the default order, otherwise if a
325
- string this specifies a custom order.
326
- optimize : str, optional
327
- The contraction path optimizer to use.
328
- sweep_reverse : bool, optional
329
- Whether to sweep in the reverse direction, resulting in a left
330
- canonical form instead of right canonical.
331
- canonize : bool, optional
332
- Dummy argument to match the signature of other compression methods.
333
- equalize_norms : bool or float, optional
334
- Whether to equalize the norms of the tensors after compression. If an
335
- explicit value is give, then the norms will be set to that value, and
336
- the overall scaling factor will be accumulated into `.exponent`.
337
- inplace : bool, optional
338
- Whether to perform the compression inplace or not.
339
- compress_opts
340
- Supplied to :func:`~quimb.tensor.tensor_split`.
341
-
342
- Returns
343
- -------
344
- TensorNetwork
345
- The compressed tensor network, with canonical center at
346
- ``site_tags[0]`` ('right canonical' form) or ``site_tags[-1]`` ('left
347
- canonical' form) if ``sweep_reverse``.
348
- """
349
- if not canonize:
350
- warnings.warn("`canonize=False` is ignored for the `dm` method.")
351
-
352
- if site_tags is None:
353
- site_tags = tn.site_tags
354
- if sweep_reverse:
355
- site_tags = tuple(reversed(site_tags))
356
- N = len(site_tags)
357
-
358
- ket = enforce_1d_like(tn, site_tags=site_tags, inplace=inplace)
359
-
360
- # partition outer indices, and create conjugate bra indices
361
- ket_site_inds = []
362
- bra_site_inds = []
363
- ketbra_indmap = {}
364
- for tag in site_tags:
365
- k_inds_i = []
366
- b_inds_i = []
367
- for kix in ket.select(tag)._outer_inds & ket._outer_inds:
368
- bix = rand_uuid()
369
- k_inds_i.append(kix)
370
- b_inds_i.append(bix)
371
- ketbra_indmap[kix] = bix
372
- ket_site_inds.append(tuple(k_inds_i))
373
- bra_site_inds.append(tuple(b_inds_i))
374
-
375
- bra = ket.H
376
- # doing this means forming the norm doesn't do its own mangling
377
- bra.mangle_inner_()
378
- # form the overlapping double layer TN
379
- norm = bra & ket
380
- # open the bra's indices back up
381
- bra.reindex_(ketbra_indmap)
382
-
383
- # construct dense left environments
384
- left_envs = {}
385
- left_envs[1] = norm.select(site_tags[0]).contract(
386
- optimize=optimize, drop_tags=True
387
- )
388
- for i in range(2, N):
389
- left_envs[i] = tensor_contract(
390
- left_envs[i - 1],
391
- *norm.select_tensors(site_tags[i - 1]),
392
- optimize=optimize,
393
- drop_tags=True,
394
- )
395
-
396
- # build projectors and right environments
397
- Us = []
398
- right_env_ket = None
399
- right_env_bra = None
400
- new_bonds = collections.defaultdict(rand_uuid)
401
-
402
- for i in range(N - 1, 0, -1):
403
- # form the reduced density matrix
404
- rho_tensors = [
405
- left_envs[i],
406
- *ket.select_tensors(site_tags[i]),
407
- *bra.select_tensors(site_tags[i]),
408
- ]
409
- left_inds = list(ket_site_inds[i])
410
- right_inds = list(bra_site_inds[i])
411
- if right_env_ket is not None:
412
- rho_tensors.extend([right_env_ket, right_env_bra])
413
- left_inds.append(new_bonds["k", i + 1])
414
- right_inds.append(new_bonds["b", i + 1])
415
-
416
- # contract and then split it
417
- rhoi = tensor_contract(
418
- *rho_tensors,
419
- preserve_tensor=True,
420
- optimize=optimize,
421
- )
422
- U, s, UH = rhoi.split(
423
- left_inds=left_inds,
424
- right_inds=right_inds,
425
- method="eigh",
426
- max_bond=max_bond,
427
- cutoff=cutoff,
428
- cutoff_mode=cutoff_mode,
429
- get="tensors",
430
- absorb=None,
431
- **compress_opts,
432
- )
433
-
434
- # turn bond into 'virtual right' indices
435
- (bix,) = s.inds
436
- U.reindex_({bix: new_bonds["k", i]})
437
- UH.reindex_({bix: new_bonds["b", i]})
438
- Us.append(U)
439
-
440
- # attach the unitaries to the right environments and contract
441
- right_ket_tensors = [*ket.select_tensors(site_tags[i]), U.H]
442
- right_bra_tensors = [*bra.select_tensors(site_tags[i]), UH.H]
443
- if right_env_ket is not None:
444
- # we have already done one move -> have right envs
445
- right_ket_tensors.append(right_env_ket)
446
- right_bra_tensors.append(right_env_bra)
447
-
448
- right_env_ket = tensor_contract(
449
- *right_ket_tensors, optimize=optimize, drop_tags=True
450
- )
451
- # TODO: could compute this just as conjugated and relabelled ket env
452
- right_env_bra = tensor_contract(
453
- *right_bra_tensors, optimize=optimize, drop_tags=True
454
- )
455
-
456
- # form the final site
457
- U0 = tensor_contract(
458
- *ket.select_tensors(site_tags[0]), right_env_ket, optimize=optimize
459
- )
460
-
461
- if normalize:
462
- # in right canonical form already
463
- U0.normalize_()
464
-
465
- # form the final TN
466
- if inplace:
467
- # simply replace all tensors
468
- new = tn
469
- new.remove_all_tensors()
470
- new |= U0
471
- for t in Us[::-1]:
472
- new |= t
473
- else:
474
- new = TensorNetwork([U0] + Us[::-1])
475
- # cast as whatever the input was e.g. MPS, MPO
476
- new.view_like_(tn)
477
-
478
- # possibly put the array indices in canonical order (e.g. when MPS or MPO)
479
- possibly_permute_(new, permute_arrays)
480
-
481
- # XXX: do better than simply waiting til the end to equalize norms
482
- if equalize_norms is True:
483
- new.equalize_norms_()
484
- elif equalize_norms:
485
- new.equalize_norms_(value=equalize_norms)
486
-
487
- return new
488
-
489
-
490
- def tensor_network_1d_compress_zipup(
491
- tn,
492
- max_bond=None,
493
- cutoff=1e-10,
494
- site_tags=None,
495
- canonize=True,
496
- normalize=False,
497
- cutoff_mode="rsum2",
498
- permute_arrays=True,
499
- optimize="auto-hq",
500
- sweep_reverse=False,
501
- equalize_norms=False,
502
- inplace=False,
503
- **compress_opts,
504
- ):
505
- """Compress a 1D-like tensor network using the 'zip-up' algorithm due to
506
- 'Minimally Entangled Typical Thermal State Algorithms', E.M. Stoudenmire &
507
- Steven R. White (https://arxiv.org/abs/1002.1305). The returned tensor
508
- network will have one tensor per site, in the order given by ``site_tags``,
509
- with canonical center at ``site_tags[0]`` ('right' canonical form).
510
-
511
- The zipup algorithm scales better than the direct and density matrix
512
- methods when multiple tensors are present at each site (such as MPO-MPS
513
- multiplication), but is less accurate due to the compressions taking place
514
- in an only pseudo-canonical gauge. It generally also only makes sense in
515
- the fixed bond dimension case, as opposed to relying on a specific
516
- `cutoff` only.
517
-
518
- Parameters
519
- ----------
520
- tn : TensorNetwork
521
- The tensor network to compress. Every tensor should have exactly one of
522
- the site tags. Each site can have multiple tensors and output indices.
523
- max_bond : int
524
- The maximum bond dimension to compress to.
525
- cutoff : float, optional
526
- A dynamic threshold for discarding singular values when compressing.
527
- site_tags : sequence of str, optional
528
- The tags to use to group and order the tensors from ``tn``. If not
529
- given, uses ``tn.site_tags``. The tensor network built will have one
530
- tensor per site, in the order given by ``site_tags``.
531
- canonize : bool, optional
532
- Whether to pseudo canonicalize the initial tensor network.
533
- normalize : bool, optional
534
- Whether to normalize the final tensor network, making use of the fact
535
- that the output tensor network is in right canonical form.
536
- cutoff_mode : {"rsum2", "rel", ...}, optional
537
- The mode to use when truncating the singular values of the decomposed
538
- tensors. See :func:`~quimb.tensor.tensor_split`.
539
- permute_arrays : bool or str, optional
540
- Whether to permute the array indices of the final tensor network into
541
- canonical order. If ``True`` will use the default order, otherwise if a
542
- string this specifies a custom order.
543
- optimize : str, optional
544
- The contraction path optimizer to use.
545
- sweep_reverse : bool, optional
546
- Whether to sweep in the reverse direction, resulting in a left
547
- canonical form instead of right canonical.
548
- equalize_norms : bool or float, optional
549
- Whether to equalize the norms of the tensors after compression. If an
550
- explicit value is give, then the norms will be set to that value, and
551
- the overall scaling factor will be accumulated into `.exponent`.
552
- inplace : bool, optional
553
- Whether to perform the compression inplace or not.
554
- compress_opts
555
- Supplied to :func:`~quimb.tensor.tensor_split`.
556
-
557
- Returns
558
- -------
559
- TensorNetwork
560
- The compressed tensor network, with canonical center at
561
- ``site_tags[0]`` ('right canonical' form) or ``site_tags[-1]`` ('left
562
- canonical' form) if ``sweep_reverse``.
563
- """
564
- if site_tags is None:
565
- site_tags = tn.site_tags
566
- if sweep_reverse:
567
- site_tags = tuple(reversed(site_tags))
568
- N = len(site_tags)
569
-
570
- tn = enforce_1d_like(tn, site_tags=site_tags, inplace=inplace)
571
-
572
- # calculate the local site (outer) indices
573
- site_inds = [
574
- tuple(tn.select(tag)._outer_inds & tn._outer_inds) for tag in site_tags
575
- ]
576
-
577
- if canonize:
578
- # put in 'pseudo' left canonical form:
579
- # (NB: diagrams assume MPO-MPS but algorithm is agnostic)
580
- #
581
- # │ │ │ │ │ │ │ │ │ │
582
- # ▶─▶─▶─▶─▶─▶─▶─▶─▶─○ MPO
583
- # │ │ │ │ │ │ │ │ │ │
584
- # ▶─▶─▶─▶─▶─▶─▶─▶─▶─○ MPS
585
- #
586
- tn = tn.canonize_around_(site_tags[-1])
587
-
588
- # zip along the bonds
589
- ts = []
590
- bix = None
591
- Us = None
592
- for i in range(N - 1, 0, -1):
593
- # U*s VH
594
- # │ │ │ │
595
- # ─▶─▶──□━━◀━◀━
596
- # │ │ ╱ :
597
- # ─▶─▶ max_bond
598
- # i
599
- # .... contract
600
- if Us is None:
601
- # first site
602
- C = tensor_contract(
603
- *tn.select_tensors(site_tags[i]), optimize=optimize
604
- )
605
- else:
606
- C = tensor_contract(
607
- Us, *tn.select_tensors(site_tags[i]), optimize=optimize
608
- )
609
- # i
610
- # │ │ │ │
611
- # ─▶──□━━━━◀━◀━
612
- # │ ╱ :
613
- # ─▶ : bix
614
- # C
615
- right_inds = list(site_inds[i])
616
- if bix is not None:
617
- right_inds.append(bix)
618
-
619
- # the new bond index, keep track for above
620
- bix = rand_uuid()
621
-
622
- Us, VH = C.split(
623
- left_inds=None,
624
- right_inds=right_inds,
625
- max_bond=max_bond,
626
- cutoff=cutoff,
627
- cutoff_mode=cutoff_mode,
628
- absorb="left",
629
- bond_ind=bix,
630
- get="tensors",
631
- **compress_opts,
632
- )
633
- Us.drop_tags()
634
- ts.append(VH)
635
- # i
636
- # │ │ │ │
637
- # ─▶──□━◀━━◀━◀━
638
- # │ ╱
639
- # ─▶ : :
640
- # U*s VH
641
-
642
- U0 = tensor_contract(
643
- Us, *tn.select_tensors(site_tags[0]), optimize=optimize
644
- )
645
-
646
- if normalize:
647
- # in right canonical form already
648
- U0.normalize_()
649
-
650
- ts.append(U0)
651
-
652
- if inplace:
653
- new = tn
654
- new.remove_all_tensors()
655
- for t in ts:
656
- new |= t
657
- else:
658
- new = TensorNetwork(ts)
659
- # cast as whatever the input was e.g. MPS
660
- new.view_like_(tn)
661
-
662
- # possibly put the array indices in canonical order (e.g. when MPS or MPO)
663
- possibly_permute_(new, permute_arrays)
664
-
665
- # XXX: do better than simply waiting til the end to equalize norms
666
- if equalize_norms is True:
667
- new.equalize_norms_()
668
- elif equalize_norms:
669
- new.equalize_norms_(value=equalize_norms)
670
-
671
- return new
672
-
673
-
674
- def tensor_network_1d_compress_zipup_first(
675
- tn,
676
- max_bond=None,
677
- max_bond_zipup=None,
678
- cutoff=1e-10,
679
- cutoff_zipup=None,
680
- site_tags=None,
681
- canonize=True,
682
- normalize=False,
683
- cutoff_mode="rsum2",
684
- permute_arrays=True,
685
- optimize="auto-hq",
686
- sweep_reverse=False,
687
- equalize_norms=False,
688
- inplace=False,
689
- **compress_opts,
690
- ):
691
- """Compress this 1D-like tensor network using the 'zip-up first' algorithm,
692
- that is, first compressing the tensor network to a larger bond dimension
693
- using the 'zip-up' algorithm, then compressing to the desired bond
694
- dimension using a direct sweep.
695
-
696
- Depending on the value of ``max_bond`` and ``max_bond_zipup``, this can be
697
- scale better than the direct and density matrix methods, but reach close to
698
- the same accuracy. As with the 'zip-up' method, there is no advantage
699
- unless there are multiple tensors per site, and it generally only makes
700
- sense in the fixed bond dimension case, as opposed to relying on a
701
- specific `cutoff` only.
702
-
703
- Parameters
704
- ----------
705
- tn : TensorNetwork
706
- The tensor network to compress. Every tensor should have exactly one of
707
- the site tags. Each site can have multiple tensors and output indices.
708
- max_bond : int
709
- The final maximum bond dimension to compress to.
710
- max_bond_zipup : int, optional
711
- The intermediate maximum bond dimension to compress to using the
712
- 'zip-up' algorithm. If not given and `max_bond` is, this is set as
713
- twice the target bond dimension, ``2 * max_bond``.
714
- cutoff : float, optional
715
- A dynamic threshold for discarding singular values when compressing.
716
- cutoff_zipup : float, optional
717
- A dynamic threshold for discarding singular values when compressing to
718
- the intermediate bond dimension using the 'zip-up' algorithm. If not
719
- given, this is set to the same as ``cutoff`` if a maximum bond is
720
- given, else ``cutoff / 10``.
721
- site_tags : sequence of str, optional
722
- The tags to use to group and order the tensors from ``tn``. If not
723
- given, uses ``tn.site_tags``. The tensor network built will have one
724
- tensor per site, in the order given by ``site_tags``.
725
- canonize : bool, optional
726
- Whether to pseudo canonicalize the initial tensor network.
727
- normalize : bool, optional
728
- Whether to normalize the final tensor network, making use of the fact
729
- that the output tensor network is in right canonical form.
730
- cutoff_mode : {"rsum2", "rel", ...}, optional
731
- The mode to use when truncating the singular values of the decomposed
732
- tensors. See :func:`~quimb.tensor.tensor_split`.
733
- permute_arrays : bool or str, optional
734
- Whether to permute the array indices of the final tensor network into
735
- canonical order. If ``True`` will use the default order, otherwise if a
736
- string this specifies a custom order.
737
- optimize : str, optional
738
- The contraction path optimizer to use.
739
- sweep_reverse : bool, optional
740
- Whether to sweep in the reverse direction, resulting in a left
741
- canonical form instead of right canonical.
742
- equalize_norms : bool or float, optional
743
- Whether to equalize the norms of the tensors after compression. If an
744
- explicit value is give, then the norms will be set to that value, and
745
- the overall scaling factor will be accumulated into `.exponent`.
746
- inplace : bool, optional
747
- Whether to perform the compression inplace or not.
748
- compress_opts
749
- Supplied to :func:`~quimb.tensor.tensor_split`.
750
-
751
- Returns
752
- -------
753
- TensorNetwork
754
- The compressed tensor network, with canonical center at
755
- ``site_tags[0]`` ('right canonical' form) or ``site_tags[-1]`` ('left
756
- canonical' form) if ``sweep_reverse``.
757
- """
758
- if max_bond_zipup is None:
759
- if max_bond is not None:
760
- max_bond_zipup = 2 * max_bond
761
-
762
- if cutoff_zipup is None:
763
- if max_bond is not None:
764
- # assume max_bond limited
765
- cutoff_zipup = cutoff
766
- else:
767
- # fully dynamic mode
768
- cutoff_zipup = cutoff / 10
769
-
770
- if site_tags is None:
771
- site_tags = tn.site_tags
772
- if sweep_reverse:
773
- site_tags = tuple(reversed(site_tags))
774
-
775
- # is now in right canonical form
776
- tn = tensor_network_1d_compress_zipup(
777
- tn,
778
- max_bond=max_bond_zipup,
779
- cutoff=cutoff_zipup,
780
- site_tags=site_tags,
781
- canonize=canonize,
782
- cutoff_mode=cutoff_mode,
783
- optimize=optimize,
784
- sweep_reverse=True,
785
- equalize_norms=equalize_norms,
786
- inplace=inplace,
787
- **compress_opts,
788
- )
789
-
790
- for i in range(len(site_tags) - 1, 0, -1):
791
- # compress and shift canonical center
792
- tn.compress_between(
793
- site_tags[i - 1],
794
- site_tags[i],
795
- absorb="left",
796
- reduced="right",
797
- max_bond=max_bond,
798
- cutoff=cutoff,
799
- cutoff_mode=cutoff_mode,
800
- equalize_norms=equalize_norms,
801
- **compress_opts,
802
- )
803
-
804
- if normalize:
805
- # make use of the fact that the output is in right canonical form
806
- tn[site_tags[-1]].normalize_()
807
-
808
- # possibly put the array indices in canonical order (e.g. when MPS or MPO)
809
- possibly_permute_(tn, permute_arrays)
810
-
811
- if equalize_norms is True:
812
- tn.equalize_norms_()
813
- elif equalize_norms:
814
- tn.equalize_norms_(value=equalize_norms)
815
-
816
- return tn
817
-
818
-
819
- def _tn1d_fit_sum_sweep_1site(
820
- tn_fit,
821
- tn_overlaps,
822
- site_tags,
823
- max_bond=None,
824
- cutoff=0.0,
825
- envs=None,
826
- prepare=True,
827
- reverse=False,
828
- compute_tdiff=True,
829
- optimize="auto-hq",
830
- ):
831
- """Core sweep of the 1-site 1D fit algorithm."""
832
-
833
- if cutoff != 0.0:
834
- raise ValueError("Non-zero `cutoff` not supported for 1-site fit.")
835
-
836
- N = len(site_tags)
837
- K = len(tn_overlaps)
838
-
839
- if max_bond is not None:
840
- current_bond_dim = tn_fit.max_bond()
841
- if current_bond_dim < max_bond:
842
- tn_fit.expand_bond_dimension_(max_bond)
843
- prepare = True
844
-
845
- if envs is None:
846
- envs = {}
847
- prepare = True
848
-
849
- if prepare:
850
- for k in range(K):
851
- envs.setdefault(("L", 0, k), TensorNetwork())
852
- envs.setdefault(("R", N - 1, k), TensorNetwork())
853
-
854
- if not reverse:
855
- # move canonical center to left
856
- tn_fit.canonize_around_(site_tags[0])
857
- # compute each of K right environments
858
- for i in reversed(range(N - 1)):
859
- site_r = site_tags[i + 1]
860
- for k, tn_overlap in enumerate(tn_overlaps):
861
- tni = envs["R", i + 1, k] | tn_overlap.select(site_r)
862
- envs["R", i, k] = tni.contract(all, optimize=optimize)
863
- else:
864
- # move canonical center to right
865
- tn_fit.canonize_around_(site_tags[-1])
866
- # compute each of K left environments
867
- for i in range(1, N):
868
- site_l = site_tags[i - 1]
869
- for k, tn_overlap in enumerate(tn_overlaps):
870
- tni = envs["L", i - 1, k] | tn_overlap.select(site_l)
871
- envs["L", i, k] = tni.contract(all, optimize=optimize)
872
-
873
- # track the maximum change in any tensor norm
874
- max_tdiff = -1.0
875
-
876
- sweep = range(N)
877
- if reverse:
878
- sweep = reversed(sweep)
879
-
880
- for i in sweep:
881
- site = site_tags[i]
882
-
883
- if not reverse:
884
- if i > 0:
885
- # move canonical center
886
- site_l = site_tags[i - 1]
887
- tn_fit.canonize_between(site_l, site)
888
-
889
- # recalculate K left environments
890
- for k, tn_overlap in enumerate(tn_overlaps):
891
- tni = envs["L", i - 1, k] | tn_overlap.select(site_l)
892
- envs["L", i, k] = tni.contract(all, optimize=optimize)
893
- else:
894
- if i < N - 1:
895
- # move canonical center
896
- site_r = site_tags[i + 1]
897
- tn_fit.canonize_between(site_r, site)
898
-
899
- # recalculate right environment
900
- for k, tn_overlap in enumerate(tn_overlaps):
901
- tni = envs["R", i + 1, k] | tn_overlap.select(site_r)
902
- envs["R", i, k] = tni.contract(all, optimize=optimize)
903
-
904
- tfi = tn_fit[site_tags[i]]
905
- tfinew = None
906
-
907
- for k, tn_overlap in enumerate(tn_overlaps):
908
- # form local overlap
909
- tnik = (
910
- envs["L", i, k]
911
- | tn_overlap.select_any(site_tags[i])
912
- | envs["R", i, k]
913
- )
914
-
915
- # remove old tensor
916
- del tnik["__FIT__", site]
917
-
918
- # contract its new value, maintaining index order
919
- tfiknew = tnik.contract(
920
- all, optimize=optimize, output_inds=tfi.inds
921
- )
922
-
923
- # sum into fitted tensor
924
- if tfinew is None:
925
- tfinew = tfiknew
926
- else:
927
- tfinew += tfiknew
928
-
929
- tfinew.conj_()
930
-
931
- if compute_tdiff:
932
- # track change in tensor norm
933
- dt = tfi.distance_normalized(tfinew)
934
- max_tdiff = max(max_tdiff, dt)
935
-
936
- # reinsert into all viewing tensor networks
937
- tfi.modify(data=tfinew.data)
938
-
939
- return max_tdiff
940
-
941
-
942
- def _tn1d_fit_sum_sweep_2site(
943
- tn_fit,
944
- tn_overlaps,
945
- site_tags,
946
- max_bond=None,
947
- cutoff=1e-10,
948
- envs=None,
949
- prepare=True,
950
- reverse=False,
951
- optimize="auto-hq",
952
- compute_tdiff=True,
953
- **compress_opts,
954
- ):
955
- """Core sweep of the 2-site 1D fit algorithm."""
956
-
957
- N = len(site_tags)
958
- K = len(tn_overlaps)
959
-
960
- if envs is None:
961
- envs = {}
962
- prepare = True
963
-
964
- if prepare:
965
- for k in range(K):
966
- envs.setdefault(("L", 0, k), TensorNetwork())
967
- envs.setdefault(("R", N - 1, k), TensorNetwork())
968
-
969
- if not reverse:
970
- # move canonical center to left
971
- tn_fit.canonize_around_(site_tags[0])
972
- # compute each of K right environments
973
- for i in range(N - 2, 0, -1):
974
- site_r = site_tags[i + 1]
975
- for k, tn_overlap in enumerate(tn_overlaps):
976
- tni = envs["R", i + 1, k] | tn_overlap.select(site_r)
977
- envs["R", i, k] = tni.contract(all, optimize=optimize)
978
- else:
979
- # move canonical center to right
980
- tn_fit.canonize_around_(site_tags[-1])
981
- # compute each of K left environments
982
- for i in range(1, N - 1):
983
- site_l = site_tags[i - 1]
984
- for k, tn_overlap in enumerate(tn_overlaps):
985
- tni = envs["L", i - 1, k] | tn_overlap.select(site_l)
986
- envs["L", i, k] = tni.contract(all, optimize=optimize)
987
-
988
- # track the maximum change in any tensor norm
989
- max_tdiff = -1.0
990
-
991
- sweep = range(N - 1)
992
- if reverse:
993
- sweep = reversed(sweep)
994
-
995
- for i in sweep:
996
- site0 = site_tags[i]
997
- site1 = site_tags[i + 1]
998
-
999
- if not reverse:
1000
- if i > 0:
1001
- site_l = site_tags[i - 1]
1002
- # recalculate K left environments
1003
- for k, tn_overlap in enumerate(tn_overlaps):
1004
- tni = envs["L", i - 1, k] | tn_overlap.select(site_l)
1005
- envs["L", i, k] = tni.contract(all, optimize=optimize)
1006
- else:
1007
- if i < N - 2:
1008
- site_r = site_tags[i + 2]
1009
- # recalculate right environment
1010
- for k, tn_overlap in enumerate(tn_overlaps):
1011
- tni = envs["R", i + 2, k] | tn_overlap.select(site_r)
1012
- envs["R", i + 1, k] = tni.contract(all, optimize=optimize)
1013
-
1014
- tfi0 = tn_fit[site0]
1015
- tfi1 = tn_fit[site1]
1016
- (bond,) = tfi0.bonds(tfi1)
1017
- left_inds = tuple(ix for ix in tfi0.inds if ix != bond)
1018
- right_inds = tuple(ix for ix in tfi1.inds if ix != bond)
1019
- tfinew = None
1020
-
1021
- for k, tn_overlap in enumerate(tn_overlaps):
1022
- # form local overlap
1023
- tnik = (
1024
- envs["L", i, k]
1025
- | tn_overlap.select_any((site0, site1))
1026
- | envs["R", i + 1, k]
1027
- )
1028
-
1029
- # remove old tensors
1030
- del tnik["__FIT__", site0]
1031
- del tnik["__FIT__", site1]
1032
-
1033
- # contract its new value, maintaining index order
1034
- tfiknew = tnik.contract(
1035
- all, optimize=optimize, output_inds=left_inds + right_inds
1036
- )
1037
-
1038
- # sum into fitted tensor
1039
- if tfinew is None:
1040
- tfinew = tfiknew
1041
- else:
1042
- tfinew += tfiknew
1043
-
1044
- tfinew.conj_()
1045
-
1046
- tfinew0, tfinew1 = tfinew.split(
1047
- max_bond=max_bond,
1048
- cutoff=cutoff,
1049
- absorb="left" if reverse else "right",
1050
- left_inds=left_inds,
1051
- right_inds=right_inds,
1052
- bond_ind=bond,
1053
- get="tensors",
1054
- **compress_opts,
1055
- )
1056
-
1057
- if compute_tdiff:
1058
- # track change in tensor norm
1059
- dt = (tfi0 | tfi1).distance_normalized(tfinew0 | tfinew1)
1060
- max_tdiff = max(max_tdiff, dt)
1061
-
1062
- # reinsert into all viewing tensor networks
1063
- tfinew0.transpose_like_(tfi0)
1064
- tfinew1.transpose_like_(tfi1)
1065
- tfi0.modify(data=tfinew0.data, left_inds=tfinew0.left_inds)
1066
- tfi1.modify(data=tfinew1.data, left_inds=tfinew1.left_inds)
1067
-
1068
- return max_tdiff
1069
-
1070
-
1071
- def tensor_network_1d_compress_fit(
1072
- tns,
1073
- max_bond=None,
1074
- cutoff=None,
1075
- tn_fit=None,
1076
- bsz="auto",
1077
- initial_bond_dim=8,
1078
- max_iterations=10,
1079
- tol=0.0,
1080
- site_tags=None,
1081
- cutoff_mode="rsum2",
1082
- sweep_sequence="RL",
1083
- normalize=False,
1084
- permute_arrays=True,
1085
- optimize="auto-hq",
1086
- canonize=True,
1087
- sweep_reverse=False,
1088
- equalize_norms=False,
1089
- inplace_fit=False,
1090
- inplace=False,
1091
- progbar=False,
1092
- **compress_opts,
1093
- ):
1094
- """Compress any 1D-like (can have multiple tensors per site) tensor network
1095
- or sum of tensor networks to an exactly 1D (one tensor per site) tensor
1096
- network of bond dimension `max_bond` using the 1-site or 2-site variational
1097
- fitting (or 'DMRG-style') method. The tensor network(s) can have arbitrary
1098
- inner and outer structure.
1099
-
1100
- This method has the lowest scaling of the standard 1D compression methods
1101
- and can also provide the most accurate compression, but the actual speed
1102
- and accuracy depend on the number of iterations required and initial guess,
1103
- making it a more 'hands-on' method.
1104
-
1105
- It's also the only method to support fitting to a sum of tensor networks
1106
- directly, rather than having to forming the explicitly summed TN first.
1107
-
1108
- Parameters
1109
- ----------
1110
- tns : TensorNetwork or Sequence[TensorNetwork]
1111
- The tensor network or tensor networks to compress. Each tensor network
1112
- should have the same outer index structure, and within each tensor
1113
- network every tensor should have exactly one of the site tags.
1114
- max_bond : int
1115
- The maximum bond dimension to compress to. If not given, this is set
1116
- as the maximum bond dimension of the initial guess tensor network, if
1117
- any, else infinite for ``bsz=2``.
1118
- cutoff : float, optional
1119
- A dynamic threshold for discarding singular values when compressing.
1120
- This is only relevant for the 2-site sweeping algorithm (``bsz=2``),
1121
- where it defaults to 1e-10.
1122
- tn_fit : TensorNetwork, dict, or str, optional
1123
- An initial guess for the compressed tensor network. It should matching
1124
- outer indices and site tags with ``tn``. If a `dict`, this is assumed
1125
- to be options to supply to `tensor_network_1d_compress` to construct
1126
- the initial guess, inheriting various defaults like `initial_bond_dim`.
1127
- If a string, e.g. ``"zipup"``, this is shorthand for that compression
1128
- method with default settings. If not given, a random 1D tensor network
1129
- will be used.
1130
- bsz : {"auto", 1, 2}, optional
1131
- The size of the block to optimize while sweeping. If ``"auto"``, this
1132
- will be inferred from the value of ``max_bond`` and ``cutoff``.
1133
- initial_bond_dim : int, optional
1134
- The initial bond dimension to use when creating the initial guess. This
1135
- is only relevant if ``tn_fit`` is not given. For each sweep the allowed
1136
- bond dimension is doubled, up to ``max_bond``. For 1-site this occurs
1137
- via explicit bond expansion, while for 2-site it occurs during the
1138
- 2-site tensor decomposition.
1139
- max_iterations : int, optional
1140
- The maximum number of variational sweeps to perform.
1141
- tol : float, optional
1142
- The convergence tolerance, in terms of local tensor distance
1143
- normalized. If zero, there will be exactly ``max_iterations`` sweeps.
1144
- site_tags : sequence of str, optional
1145
- The tags to use to group and order the tensors from ``tn``. If not
1146
- given, uses ``tn.site_tags``. The tensor network built will have one
1147
- tensor per site, in the order given by ``site_tags``.
1148
- cutoff_mode : {"rsum2", "rel", ...}, optional
1149
- The mode to use when truncating the singular values of the decomposed
1150
- tensors. See :func:`~quimb.tensor.tensor_split`, if using the 2-site
1151
- sweeping algorithm.
1152
- sweep_sequence : str, optional
1153
- The sequence of sweeps to perform, e.g. ``"LR"`` means first sweep left
1154
- to right, then right to left. The sequence is cycled.
1155
- normalize : bool, optional
1156
- Whether to normalize the final tensor network, making use of the fact
1157
- that the output tensor network is in left or right canonical form.
1158
- permute_arrays : bool or str, optional
1159
- Whether to permute the array indices of the final tensor network into
1160
- canonical order. If ``True`` will use the default order, otherwise if a
1161
- string this specifies a custom order.
1162
- optimize : str, optional
1163
- The contraction path optimizer to use.
1164
- canonize : bool, optional
1165
- Dummy argument to match the signature of other compression methods.
1166
- sweep_reverse : bool, optional
1167
- Whether to sweep in the reverse direction, swapping whether the final
1168
- tensor network is in right or left canonical form, which also depends
1169
- on the last sweep direction.
1170
- equalize_norms : bool or float, optional
1171
- Whether to equalize the norms of the tensors after compression. If an
1172
- explicit value is give, then the norms will be set to that value, and
1173
- the overall scaling factor will be accumulated into `.exponent`.
1174
- inplace_fit : bool, optional
1175
- Whether to perform the compression inplace on the initial guess tensor
1176
- network, ``tn_fit``, if supplied.
1177
- inplace : bool, optional
1178
- Whether to perform the compression inplace on the target tensor network
1179
- supplied, or ``tns[0]`` if a sequence to sum is supplied.
1180
- progbar : bool, optional
1181
- Whether to show a progress bar. Note the progress bar shows the maximum
1182
- change of any single tensor norm, *not* the global change in norm or
1183
- truncation error.
1184
- compress_opts
1185
- Supplied to :func:`~quimb.tensor.tensor_split`, if using the 2-site
1186
- sweeping algorithm.
1187
-
1188
- Returns
1189
- -------
1190
- TensorNetwork
1191
- The compressed tensor network. Depending on ``sweep_reverse`` and the
1192
- last sweep direction, the canonical center will be at either L:
1193
- ``site_tags[0]`` or R: ``site_tags[-1]``, or the opposite if
1194
- ``sweep_reverse``.
1195
- """
1196
- if not canonize:
1197
- warnings.warn("`canonize=False` is ignored for the `fit` method.")
1198
-
1199
- if isinstance(tns, TensorNetwork):
1200
- # fit to single tensor network
1201
- tns = (tns,)
1202
- else:
1203
- # fit to sum of tensor networks
1204
- tns = tuple(tns)
1205
-
1206
- # how to partition the tensor network(s)
1207
- if site_tags is None:
1208
- site_tags = next(
1209
- tn.site_tags for tn in tns if hasattr(tn, "site_tags")
1210
- )
1211
-
1212
- tns = tuple(
1213
- enforce_1d_like(tn, site_tags=site_tags, inplace=inplace) for tn in tns
1214
- )
1215
-
1216
- # choose the block size of the sweeping function
1217
- if bsz == "auto":
1218
- if max_bond is not None:
1219
- if (cutoff is None) or (cutoff == 0.0):
1220
- # max_bond specified, no cutoff -> 1-site
1221
- bsz = 1
1222
- else:
1223
- # max_bond and cutoff specified -> 2-site
1224
- bsz = 2
1225
- else:
1226
- if cutoff == 0.0:
1227
- # no max_bond or cutoff -> 1-site
1228
- bsz = 1
1229
- else:
1230
- # no max_bond, but cutoff -> 2-site
1231
- bsz = 2
1232
- f_sweep = {
1233
- 1: _tn1d_fit_sum_sweep_1site,
1234
- 2: _tn1d_fit_sum_sweep_2site,
1235
- }[bsz]
1236
-
1237
- if cutoff is None:
1238
- # set default cutoff
1239
- cutoff = 1e-10 if bsz == 2 else 0.0
1240
-
1241
- if bsz == 2:
1242
- compress_opts["cutoff_mode"] = cutoff_mode
1243
-
1244
- # choose our initial guess
1245
- if not isinstance(tn_fit, TensorNetwork):
1246
- if max_bond is None:
1247
- if bsz == 1:
1248
- raise ValueError(
1249
- "Need to specify at least one of `max_bond` "
1250
- "or `tn_fit` when using 1-site sweeping."
1251
- )
1252
- max_bond = float("inf")
1253
- current_bond_dim = initial_bond_dim
1254
- else:
1255
- # don't start larger than the target bond dimension
1256
- current_bond_dim = min(initial_bond_dim, max_bond)
1257
-
1258
- if tn_fit is None:
1259
- # random initial guess
1260
- tn_fit = TN_matching(
1261
- tns[0], max_bond=current_bond_dim, site_tags=site_tags
1262
- )
1263
- else:
1264
- if isinstance(tn_fit, str):
1265
- tn_fit = {"method": tn_fit}
1266
- tn_fit.setdefault("max_bond", current_bond_dim)
1267
- tn_fit.setdefault("cutoff", cutoff)
1268
- tn_fit.setdefault("site_tags", site_tags)
1269
- tn_fit.setdefault("optimize", optimize)
1270
- tn_fit = tensor_network_1d_compress(tns[0], **tn_fit)
1271
- inplace_fit = True
1272
- else:
1273
- # a guess was supplied
1274
- current_bond_dim = tn_fit.max_bond()
1275
- if max_bond is None:
1276
- # assume we want to limit bond dimension to the initial guess
1277
- max_bond = current_bond_dim
1278
-
1279
- # choose to conjugte the smaller fitting network
1280
- tn_fit = tn_fit.conj(inplace=inplace_fit)
1281
- tn_fit.add_tag("__FIT__")
1282
- # note these are all views of `tn_fit` and thus will update as it does
1283
- tn_overlaps = [(tn_fit | tn) for tn in tns]
1284
-
1285
- if any(tn_overlap.outer_inds() for tn_overlap in tn_overlaps):
1286
- raise ValueError(
1287
- "The outer indices of one or more of "
1288
- "`tns` and `tn_fit` don't seem to match."
1289
- )
1290
-
1291
- sweeps = itertools.cycle(sweep_sequence)
1292
- if max_iterations is None:
1293
- its = itertools.count()
1294
- else:
1295
- its = range(max_iterations)
1296
-
1297
- envs = {}
1298
- old_direction = ""
1299
-
1300
- if progbar:
1301
- from quimb.utils import progbar as ProgBar
1302
-
1303
- its = ProgBar(its, total=max_iterations)
1304
-
1305
- # whether to compute the maximum change in tensor norm
1306
- compute_tdiff = (tol != 0.0) or progbar
1307
-
1308
- try:
1309
- for i in its:
1310
- next_direction = next(sweeps)
1311
- reverse = {"R": False, "L": True}[next_direction]
1312
- if sweep_reverse:
1313
- reverse = not reverse
1314
-
1315
- if current_bond_dim < max_bond:
1316
- # double bond dimension, up to max_bond
1317
- current_bond_dim = min(2 * current_bond_dim, max_bond)
1318
-
1319
- # perform a single sweep
1320
- max_tdiff = f_sweep(
1321
- tn_fit,
1322
- tn_overlaps,
1323
- max_bond=current_bond_dim,
1324
- cutoff=cutoff,
1325
- envs=envs,
1326
- prepare=(i == 0) or (next_direction == old_direction),
1327
- site_tags=site_tags,
1328
- reverse=reverse,
1329
- optimize=optimize,
1330
- compute_tdiff=compute_tdiff,
1331
- **compress_opts,
1332
- )
1333
-
1334
- if progbar:
1335
- its.set_description(f"max_tdiff={max_tdiff:.2e}")
1336
- if tol != 0.0 and max_tdiff < tol:
1337
- # converged
1338
- break
1339
-
1340
- old_direction = next_direction
1341
- except KeyboardInterrupt:
1342
- pass
1343
- finally:
1344
- if progbar:
1345
- its.close()
1346
-
1347
- tn_fit.drop_tags("__FIT__")
1348
- tn_fit.conj_()
1349
-
1350
- if normalize:
1351
- if reverse:
1352
- tn_fit[site_tags[0]].normalize_()
1353
- else:
1354
- tn_fit[site_tags[-1]].normalize_()
1355
-
1356
- if inplace:
1357
- tn0 = tns[0]
1358
- tn0.remove_all_tensors()
1359
- tn0.add_tensor_network(
1360
- tn_fit, virtual=not inplace_fit, check_collisions=False
1361
- )
1362
- tn_fit = tn0
1363
-
1364
- # possibly put the array indices in canonical order (e.g. when MPS or MPO)
1365
- possibly_permute_(tn_fit, permute_arrays)
1366
-
1367
- # XXX: do better than simply waiting til the end to equalize norms
1368
- if equalize_norms is True:
1369
- tn_fit.equalize_norms_()
1370
- elif equalize_norms:
1371
- tn_fit.equalize_norms_(value=equalize_norms)
1372
-
1373
- return tn_fit
1374
-
1375
-
1376
- def tensor_network_1d_compress_fit_guess(
1377
- tn,
1378
- guess,
1379
- max_bond=None,
1380
- cutoff=1e-10,
1381
- cutoff_fit=0.0,
1382
- bsz=1,
1383
- max_iterations=8,
1384
- canonize=True,
1385
- **kwargs,
1386
- ):
1387
- """Compress any 1D-like (can have multiple tensors per site) tensor network
1388
- to an exactly 1D (one tensor per site) tensor network of bond dimension
1389
- `max_bond` using by default 1-site variational fitting (or 'DMRG-style')
1390
- method starting with a non-random guess tensor network, e.g. from the cheap
1391
- zip-up or projector methods.
1392
- """
1393
- tn_fit = {
1394
- "method": guess,
1395
- # use cutoff in guess, but not in fitting
1396
- "cutoff": cutoff,
1397
- "canonize": canonize,
1398
- }
1399
-
1400
- return tensor_network_1d_compress_fit(
1401
- tn,
1402
- max_bond=max_bond,
1403
- cutoff=cutoff_fit,
1404
- tn_fit=tn_fit,
1405
- bsz=bsz,
1406
- max_iterations=max_iterations,
1407
- inplace_fit=True,
1408
- **kwargs,
1409
- )
1410
-
1411
-
1412
- tensor_network_1d_compress_fit_zipup = functools.partial(
1413
- tensor_network_1d_compress_fit_guess, guess="zipup"
1414
- )
1415
- tensor_network_1d_compress_fit_projector = functools.partial(
1416
- tensor_network_1d_compress_fit_guess, guess="projector"
1417
- )
1418
-
1419
-
1420
- _TN1D_COMPRESS_METHODS = {
1421
- "direct": tensor_network_1d_compress_direct,
1422
- "dm": tensor_network_1d_compress_dm,
1423
- "zipup": tensor_network_1d_compress_zipup,
1424
- "zipup-first": tensor_network_1d_compress_zipup_first,
1425
- "fit": tensor_network_1d_compress_fit,
1426
- "fit-zipup": tensor_network_1d_compress_fit_zipup,
1427
- "fit-projector": tensor_network_1d_compress_fit_projector,
1428
- }
1429
-
1430
-
1431
- def tensor_network_1d_compress(
1432
- tn,
1433
- max_bond=None,
1434
- cutoff=1e-10,
1435
- method="dm",
1436
- site_tags=None,
1437
- canonize=True,
1438
- permute_arrays=True,
1439
- optimize="auto-hq",
1440
- sweep_reverse=False,
1441
- equalize_norms=False,
1442
- compress_opts=None,
1443
- inplace=False,
1444
- **kwargs,
1445
- ):
1446
- """Compress a 1D-like tensor network using the specified method.
1447
-
1448
- Parameters
1449
- ----------
1450
- tn : TensorNetwork
1451
- The tensor network to compress. Every tensor should have exactly one of
1452
- the site tags. Each site can have multiple tensors and output indices.
1453
- max_bond : int
1454
- The maximum bond dimension to compress to.
1455
- cutoff : float, optional
1456
- A dynamic threshold for discarding singular values when compressing.
1457
- method : {"direct", "dm", "zipup", "zipup-first", "fit", "projector", ...}
1458
- The compression method to use.
1459
- site_tags : sequence of str, optional
1460
- The tags to use to group and order the tensors from ``tn``. If not
1461
- given, uses ``tn.site_tags``. The tensor network built will have one
1462
- tensor per site, in the order given by ``site_tags``.
1463
- canonize : bool, optional
1464
- Whether to perform canonicalization, pseudo or otherwise depending on
1465
- the method, before compressing. Ignored for ``method='dm'`` and
1466
- ``method='fit'``.
1467
- permute_arrays : bool or str, optional
1468
- Whether to permute the array indices of the final tensor network into
1469
- canonical order. If ``True`` will use the default order, otherwise if a
1470
- string this specifies a custom order.
1471
- optimize : str, optional
1472
- The contraction path optimizer to use.
1473
- sweep_reverse : bool, optional
1474
- Whether to sweep in the reverse direction, resulting in a left
1475
- canonical form instead of right canonical (for the fit method, this
1476
- also depends on the last sweep direction).
1477
- equalize_norms : bool or float, optional
1478
- Whether to equalize the norms of the tensors after compression. If an
1479
- explicit value is give, then the norms will be set to that value, and
1480
- the overall scaling factor will be accumulated into `.exponent`.
1481
- inplace : bool, optional
1482
- Whether to perform the compression inplace.
1483
- kwargs
1484
- Supplied to the chosen compression method.
1485
-
1486
- Returns
1487
- -------
1488
- TensorNetwork
1489
- """
1490
- compress_opts = compress_opts or {}
1491
-
1492
- f_tn1d = _TN1D_COMPRESS_METHODS.get(method, None)
1493
- if f_tn1d is not None:
1494
- # 1D specific compression methods
1495
- return f_tn1d(
1496
- tn,
1497
- max_bond=max_bond,
1498
- cutoff=cutoff,
1499
- site_tags=site_tags,
1500
- canonize=canonize,
1501
- permute_arrays=permute_arrays,
1502
- optimize=optimize,
1503
- sweep_reverse=sweep_reverse,
1504
- equalize_norms=equalize_norms,
1505
- inplace=inplace,
1506
- **compress_opts,
1507
- **kwargs,
1508
- )
1509
-
1510
- # generic tensor network compression methods
1511
- if sweep_reverse:
1512
- warnings.warn(
1513
- "sweep_reverse has no effect for arbitrary geometry (AG) methods."
1514
- )
1515
-
1516
- tnc = tensor_network_ag_compress(
1517
- tn,
1518
- max_bond=max_bond,
1519
- cutoff=cutoff,
1520
- method=method,
1521
- site_tags=site_tags,
1522
- canonize=canonize,
1523
- optimize=optimize,
1524
- equalize_norms=equalize_norms,
1525
- inplace=inplace,
1526
- **compress_opts,
1527
- **kwargs,
1528
- )
1529
-
1530
- if permute_arrays:
1531
- possibly_permute_(tnc, permute_arrays)
1532
-
1533
- return tnc
1534
-
1535
-
1536
- # --------------- MPO-MPS gating using 1D compression methods --------------- #
1537
-
1538
-
1539
- def mps_gate_with_mpo_lazy(mps, mpo, inplace=False):
1540
- """Apply an MPO to an MPS lazily, i.e. nothing is contracted, but the new
1541
- TN object has the same outer indices as the original MPS.
1542
- """
1543
- return tensor_network_apply_op_vec(
1544
- A=mpo, x=mps, contract=False, inplace=inplace
1545
- )
1546
-
1547
-
1548
- def mps_gate_with_mpo_direct(
1549
- mps,
1550
- mpo,
1551
- max_bond=None,
1552
- cutoff=1e-10,
1553
- inplace=False,
1554
- **compress_opts,
1555
- ):
1556
- """Apply an MPO to an MPS using the boundary compression method, that is,
1557
- explicitly contracting site-wise to form a MPS-like TN, canonicalizing in
1558
- one direction, then compressing in the other. This has the same scaling as
1559
- the density matrix (dm) method, but a larger prefactor. It can still be
1560
- faster for small bond dimensions however, and is potentially higher
1561
- precision since it works in the space of singular values directly rather
1562
- than singular values squared. It is not quite optimal in terms of error due
1563
- to the compounding errors of the SVDs.
1564
-
1565
- Parameters
1566
- ----------
1567
- mps : MatrixProductState
1568
- The MPS to gate.
1569
- mpo : MatrixProductOperator
1570
- The MPO to gate with.
1571
- max_bond : int
1572
- The maximum bond dimension to compress to.
1573
- cutoff : float, optional
1574
- A dynamic threshold for discarding singular values when compressing.
1575
- compress_opts
1576
- Supplied to :func:`~quimb.tensor.tensor_split`.
1577
- """
1578
- # form the double layer tensor network
1579
- tn = mps_gate_with_mpo_lazy(mps, mpo, inplace=inplace)
1580
-
1581
- # directly compress it without first contracting site-wise
1582
- return tensor_network_1d_compress_direct(
1583
- tn,
1584
- max_bond=max_bond,
1585
- cutoff=cutoff,
1586
- inplace=inplace,
1587
- **compress_opts,
1588
- )
1589
-
1590
-
1591
- def mps_gate_with_mpo_dm(
1592
- mps,
1593
- mpo,
1594
- max_bond=None,
1595
- cutoff=1e-10,
1596
- inplace=False,
1597
- **compress_opts,
1598
- ):
1599
- """Gate this MPS with an MPO, using the density matrix compression method.
1600
-
1601
- Parameters
1602
- ----------
1603
- mps : MatrixProductState
1604
- The MPS to gate.
1605
- mpo : MatrixProductOperator
1606
- The MPO to gate with.
1607
- max_bond : int, optional
1608
- The maximum bond dimension to keep when compressing the double layer
1609
- tensor network, if any.
1610
- cutoff : float, optional
1611
- The truncation error to use when compressing the double layer tensor
1612
- network, if any.
1613
- compress_opts
1614
- Supplied to :func:`~quimb.tensor.tensor_split`.
1615
- """
1616
- # form the double layer tensor network
1617
- tn = mps_gate_with_mpo_lazy(mps, mpo, inplace=inplace)
1618
-
1619
- # directly compress it without first contracting site-wise
1620
- return tensor_network_1d_compress_dm(
1621
- tn, max_bond, cutoff, inplace=inplace, **compress_opts
1622
- )
1623
-
1624
-
1625
- def mps_gate_with_mpo_zipup(
1626
- mps,
1627
- mpo,
1628
- max_bond=None,
1629
- cutoff=1e-10,
1630
- canonize=True,
1631
- optimize="auto-hq",
1632
- **compress_opts,
1633
- ):
1634
- """Apply an MPO to an MPS using the 'zip-up' algorithm due to
1635
- 'Minimally Entangled Typical Thermal State Algorithms', E.M. Stoudenmire &
1636
- Steven R. White (https://arxiv.org/abs/1002.1305).
1637
-
1638
- Parameters
1639
- ----------
1640
- mps : MatrixProductState
1641
- The MPS to gate.
1642
- mpo : MatrixProductOperator
1643
- The MPO to gate with.
1644
- max_bond : int
1645
- The maximum bond dimension to compress to.
1646
- cutoff : float, optional
1647
- A dynamic threshold for discarding singular values when compressing.
1648
- site_tags : sequence of str, optional
1649
- The tags to use to group and order the tensors from ``tn``. If not
1650
- given, uses ``tn.site_tags``. The tensor network built will have one
1651
- tensor per site, in the order given by ``site_tags``.
1652
- canonize : bool, optional
1653
- Whether to pseudo canonicalize the initial tensor network.
1654
- normalize : bool, optional
1655
- Whether to normalize the final tensor network, making use of the fact
1656
- that the output tensor network is in right canonical form.
1657
- permute_arrays : bool or str, optional
1658
- Whether to permute the array indices of the final tensor network into
1659
- canonical order. If ``True`` will use the default order, otherwise if a
1660
- string this specifies a custom order.
1661
- optimize : str, optional
1662
- The contraction path optimizer to use.
1663
- compress_opts
1664
- Supplied to :func:`~quimb.tensor.tensor_split`.
1665
-
1666
- Returns
1667
- -------
1668
- MatrixProductState
1669
- The compressed MPS, in right canonical form.
1670
- """
1671
- # form double layer
1672
- tn = mps_gate_with_mpo_lazy(mps, mpo)
1673
-
1674
- # compress it using zip-up
1675
- return tensor_network_1d_compress_zipup(
1676
- tn,
1677
- max_bond=max_bond,
1678
- cutoff=cutoff,
1679
- canonize=canonize,
1680
- optimize=optimize,
1681
- **compress_opts,
1682
- )
1683
-
1684
-
1685
- def mps_gate_with_mpo_zipup_first(
1686
- mps,
1687
- mpo,
1688
- max_bond=None,
1689
- max_bond_zipup=None,
1690
- cutoff=1e-10,
1691
- cutoff_zipup=None,
1692
- canonize=True,
1693
- optimize="auto-hq",
1694
- **compress_opts,
1695
- ):
1696
- """Apply an MPO to an MPS by first using the zip-up method with a larger
1697
- bond dimension, then doing a regular compression sweep to the target final
1698
- bond dimension. This avoids forming an intermediate MPS with bond dimension
1699
- ``mps.max_bond() * mpo.max_bond()``.
1700
-
1701
- Parameters
1702
- ----------
1703
- mps : MatrixProductState
1704
- The MPS to gate.
1705
- mpo : MatrixProductOperator
1706
- The MPO to gate with.
1707
- max_bond : int
1708
- The target final bond dimension.
1709
- max_bond_zipup : int, optional
1710
- The maximum bond dimension to use when zip-up compressing the double
1711
- layer tensor network. If not given, defaults to ``2 * max_bond``.
1712
- Needs to be smaller than ``mpo.max_bond()`` for any savings.
1713
- cutoff : float, optional
1714
- The truncation error to use when performing the final regular
1715
- compression sweep.
1716
- cutoff_zipup : float, optional
1717
- The truncation error to use when performing the zip-up compression.
1718
- canonize : bool, optional
1719
- Whether to pseudo canonicalize the initial tensor network.
1720
- optimize : str, optional
1721
- The contraction path optimizer to use.
1722
- compress_opts
1723
- Supplied to :func:`~quimb.tensor.tensor_split` (both the zip-up and
1724
- final sweep).
1725
-
1726
- Returns
1727
- -------
1728
- MatrixProductState
1729
- The compressed MPS, in right canonical form.
1730
- """
1731
- new = mps_gate_with_mpo_lazy(mps, mpo)
1732
- return tensor_network_1d_compress_zipup_first(
1733
- new,
1734
- max_bond=max_bond,
1735
- max_bond_zipup=max_bond_zipup,
1736
- cutoff=cutoff,
1737
- cutoff_zipup=cutoff_zipup,
1738
- canonize=canonize,
1739
- optimize=optimize,
1740
- **compress_opts,
1741
- )
1742
-
1743
-
1744
- def mps_gate_with_mpo_fit(mps, mpo, max_bond, **kwargs):
1745
- """Gate an MPS with an MPO using the variational fitting or DMRG-style
1746
- method.
1747
-
1748
- Parameters
1749
- ----------
1750
- mps : MatrixProductState
1751
- The MPS to gate.
1752
- mpo : MatrixProductOperator
1753
- The MPO to gate with.
1754
- max_bond : int
1755
- The maximum bond dimension to compress to.
1756
-
1757
- Returns
1758
- -------
1759
- MatrixProductState
1760
- The gated MPS.
1761
- """
1762
- tn = mps_gate_with_mpo_lazy(mps, mpo)
1763
- return tensor_network_1d_compress_fit(tn, max_bond, **kwargs)
1764
-
1765
-
1766
- def mps_gate_with_mpo_autofit(
1767
- self,
1768
- mpo,
1769
- max_bond,
1770
- cutoff=0.0,
1771
- init_guess=None,
1772
- **fit_opts,
1773
- ):
1774
- """Fit a MPS to a MPO applied to an MPS using geometry generic versions
1775
- of either ALS or autodiff. This is usually much less efficient that using
1776
- the 1D specific methods.
1777
-
1778
- Some nice alternatives to the default fit_opts:
1779
-
1780
- - method="autodiff"
1781
- - method="als", solver="lstsq"
1782
-
1783
- """
1784
- if cutoff != 0.0:
1785
- raise ValueError("cutoff must be zero for fitting")
1786
-
1787
- target = mps_gate_with_mpo_lazy(self, mpo)
1788
-
1789
- if init_guess is None:
1790
- ansatz = self.copy()
1791
- ansatz.expand_bond_dimension_(max_bond)
1792
- else:
1793
- raise NotImplementedError
1794
-
1795
- return ansatz.fit_(target, **fit_opts)
1796
-
1797
-
1798
- def mps_gate_with_mpo_projector(
1799
- self,
1800
- mpo,
1801
- max_bond,
1802
- cutoff=1e-10,
1803
- canonize=True,
1804
- canonize_opts=None,
1805
- inplace=False,
1806
- **compress_opts,
1807
- ):
1808
- """Apply an MPO to an MPS using local projectors, in the style of CTMRG
1809
- or HOTRG, without using information beyond the neighboring 4 tensors.
1810
- """
1811
- tn = mps_gate_with_mpo_lazy(self, mpo)
1812
-
1813
- if canonize:
1814
- # precondition
1815
- canonize_opts = ensure_dict(canonize_opts)
1816
- tn.gauge_all_(**canonize_opts)
1817
-
1818
- tn_calc = tn.copy()
1819
-
1820
- for i in range(tn.L - 1):
1821
- ltags = (tn.site_tag(i),)
1822
- rtags = (tn.site_tag(i + 1),)
1823
-
1824
- tn_calc.insert_compressor_between_regions_(
1825
- ltags,
1826
- rtags,
1827
- new_ltags=ltags,
1828
- new_rtags=rtags,
1829
- max_bond=max_bond,
1830
- cutoff=cutoff,
1831
- insert_into=tn,
1832
- bond_ind=self.bond(i, i + 1),
1833
- **compress_opts,
1834
- )
1835
-
1836
- if inplace:
1837
- for i in range(tn.L):
1838
- ti = self[i]
1839
- data = tensor_contract(
1840
- *tn[i], output_inds=ti.inds, optimize="auto-hq"
1841
- ).data
1842
- ti.modify(data=data)
1843
-
1844
- else:
1845
- for i in range(tn.L):
1846
- tn.contract_tags_(
1847
- tn.site_tag(i),
1848
- output_inds=self[i].inds,
1849
- optimize="auto-hq",
1850
- )
1851
-
1852
- tn.view_like_(self)
1853
-
1854
- return tn