Trajectree 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. trajectree/__init__.py +0 -3
  2. trajectree/fock_optics/devices.py +1 -1
  3. trajectree/fock_optics/light_sources.py +2 -2
  4. trajectree/fock_optics/measurement.py +9 -9
  5. trajectree/fock_optics/outputs.py +10 -6
  6. trajectree/fock_optics/utils.py +9 -6
  7. trajectree/sequence/swap.py +5 -4
  8. trajectree/trajectory.py +5 -4
  9. {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/METADATA +2 -3
  10. trajectree-0.0.3.dist-info/RECORD +16 -0
  11. trajectree/quimb/docs/_pygments/_pygments_dark.py +0 -118
  12. trajectree/quimb/docs/_pygments/_pygments_light.py +0 -118
  13. trajectree/quimb/docs/conf.py +0 -158
  14. trajectree/quimb/docs/examples/ex_mpi_expm_evo.py +0 -62
  15. trajectree/quimb/quimb/__init__.py +0 -507
  16. trajectree/quimb/quimb/calc.py +0 -1491
  17. trajectree/quimb/quimb/core.py +0 -2279
  18. trajectree/quimb/quimb/evo.py +0 -712
  19. trajectree/quimb/quimb/experimental/__init__.py +0 -0
  20. trajectree/quimb/quimb/experimental/autojittn.py +0 -129
  21. trajectree/quimb/quimb/experimental/belief_propagation/__init__.py +0 -109
  22. trajectree/quimb/quimb/experimental/belief_propagation/bp_common.py +0 -397
  23. trajectree/quimb/quimb/experimental/belief_propagation/d1bp.py +0 -316
  24. trajectree/quimb/quimb/experimental/belief_propagation/d2bp.py +0 -653
  25. trajectree/quimb/quimb/experimental/belief_propagation/hd1bp.py +0 -571
  26. trajectree/quimb/quimb/experimental/belief_propagation/hv1bp.py +0 -775
  27. trajectree/quimb/quimb/experimental/belief_propagation/l1bp.py +0 -316
  28. trajectree/quimb/quimb/experimental/belief_propagation/l2bp.py +0 -537
  29. trajectree/quimb/quimb/experimental/belief_propagation/regions.py +0 -194
  30. trajectree/quimb/quimb/experimental/cluster_update.py +0 -286
  31. trajectree/quimb/quimb/experimental/merabuilder.py +0 -865
  32. trajectree/quimb/quimb/experimental/operatorbuilder/__init__.py +0 -15
  33. trajectree/quimb/quimb/experimental/operatorbuilder/operatorbuilder.py +0 -1631
  34. trajectree/quimb/quimb/experimental/schematic.py +0 -7
  35. trajectree/quimb/quimb/experimental/tn_marginals.py +0 -130
  36. trajectree/quimb/quimb/experimental/tnvmc.py +0 -1483
  37. trajectree/quimb/quimb/gates.py +0 -36
  38. trajectree/quimb/quimb/gen/__init__.py +0 -2
  39. trajectree/quimb/quimb/gen/operators.py +0 -1167
  40. trajectree/quimb/quimb/gen/rand.py +0 -713
  41. trajectree/quimb/quimb/gen/states.py +0 -479
  42. trajectree/quimb/quimb/linalg/__init__.py +0 -6
  43. trajectree/quimb/quimb/linalg/approx_spectral.py +0 -1109
  44. trajectree/quimb/quimb/linalg/autoblock.py +0 -258
  45. trajectree/quimb/quimb/linalg/base_linalg.py +0 -719
  46. trajectree/quimb/quimb/linalg/mpi_launcher.py +0 -397
  47. trajectree/quimb/quimb/linalg/numpy_linalg.py +0 -244
  48. trajectree/quimb/quimb/linalg/rand_linalg.py +0 -514
  49. trajectree/quimb/quimb/linalg/scipy_linalg.py +0 -293
  50. trajectree/quimb/quimb/linalg/slepc_linalg.py +0 -892
  51. trajectree/quimb/quimb/schematic.py +0 -1518
  52. trajectree/quimb/quimb/tensor/__init__.py +0 -401
  53. trajectree/quimb/quimb/tensor/array_ops.py +0 -610
  54. trajectree/quimb/quimb/tensor/circuit.py +0 -4824
  55. trajectree/quimb/quimb/tensor/circuit_gen.py +0 -411
  56. trajectree/quimb/quimb/tensor/contraction.py +0 -336
  57. trajectree/quimb/quimb/tensor/decomp.py +0 -1255
  58. trajectree/quimb/quimb/tensor/drawing.py +0 -1646
  59. trajectree/quimb/quimb/tensor/fitting.py +0 -385
  60. trajectree/quimb/quimb/tensor/geometry.py +0 -583
  61. trajectree/quimb/quimb/tensor/interface.py +0 -114
  62. trajectree/quimb/quimb/tensor/networking.py +0 -1058
  63. trajectree/quimb/quimb/tensor/optimize.py +0 -1818
  64. trajectree/quimb/quimb/tensor/tensor_1d.py +0 -4778
  65. trajectree/quimb/quimb/tensor/tensor_1d_compress.py +0 -1854
  66. trajectree/quimb/quimb/tensor/tensor_1d_tebd.py +0 -662
  67. trajectree/quimb/quimb/tensor/tensor_2d.py +0 -5954
  68. trajectree/quimb/quimb/tensor/tensor_2d_compress.py +0 -96
  69. trajectree/quimb/quimb/tensor/tensor_2d_tebd.py +0 -1230
  70. trajectree/quimb/quimb/tensor/tensor_3d.py +0 -2869
  71. trajectree/quimb/quimb/tensor/tensor_3d_tebd.py +0 -46
  72. trajectree/quimb/quimb/tensor/tensor_approx_spectral.py +0 -60
  73. trajectree/quimb/quimb/tensor/tensor_arbgeom.py +0 -3237
  74. trajectree/quimb/quimb/tensor/tensor_arbgeom_compress.py +0 -565
  75. trajectree/quimb/quimb/tensor/tensor_arbgeom_tebd.py +0 -1138
  76. trajectree/quimb/quimb/tensor/tensor_builder.py +0 -5411
  77. trajectree/quimb/quimb/tensor/tensor_core.py +0 -11179
  78. trajectree/quimb/quimb/tensor/tensor_dmrg.py +0 -1472
  79. trajectree/quimb/quimb/tensor/tensor_mera.py +0 -204
  80. trajectree/quimb/quimb/utils.py +0 -892
  81. trajectree/quimb/tests/__init__.py +0 -0
  82. trajectree/quimb/tests/test_accel.py +0 -501
  83. trajectree/quimb/tests/test_calc.py +0 -788
  84. trajectree/quimb/tests/test_core.py +0 -847
  85. trajectree/quimb/tests/test_evo.py +0 -565
  86. trajectree/quimb/tests/test_gen/__init__.py +0 -0
  87. trajectree/quimb/tests/test_gen/test_operators.py +0 -361
  88. trajectree/quimb/tests/test_gen/test_rand.py +0 -296
  89. trajectree/quimb/tests/test_gen/test_states.py +0 -261
  90. trajectree/quimb/tests/test_linalg/__init__.py +0 -0
  91. trajectree/quimb/tests/test_linalg/test_approx_spectral.py +0 -368
  92. trajectree/quimb/tests/test_linalg/test_base_linalg.py +0 -351
  93. trajectree/quimb/tests/test_linalg/test_mpi_linalg.py +0 -127
  94. trajectree/quimb/tests/test_linalg/test_numpy_linalg.py +0 -84
  95. trajectree/quimb/tests/test_linalg/test_rand_linalg.py +0 -134
  96. trajectree/quimb/tests/test_linalg/test_slepc_linalg.py +0 -283
  97. trajectree/quimb/tests/test_tensor/__init__.py +0 -0
  98. trajectree/quimb/tests/test_tensor/test_belief_propagation/__init__.py +0 -0
  99. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d1bp.py +0 -39
  100. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d2bp.py +0 -67
  101. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hd1bp.py +0 -64
  102. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hv1bp.py +0 -51
  103. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l1bp.py +0 -142
  104. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l2bp.py +0 -101
  105. trajectree/quimb/tests/test_tensor/test_circuit.py +0 -816
  106. trajectree/quimb/tests/test_tensor/test_contract.py +0 -67
  107. trajectree/quimb/tests/test_tensor/test_decomp.py +0 -40
  108. trajectree/quimb/tests/test_tensor/test_mera.py +0 -52
  109. trajectree/quimb/tests/test_tensor/test_optimizers.py +0 -488
  110. trajectree/quimb/tests/test_tensor/test_tensor_1d.py +0 -1171
  111. trajectree/quimb/tests/test_tensor/test_tensor_2d.py +0 -606
  112. trajectree/quimb/tests/test_tensor/test_tensor_2d_tebd.py +0 -144
  113. trajectree/quimb/tests/test_tensor/test_tensor_3d.py +0 -123
  114. trajectree/quimb/tests/test_tensor/test_tensor_arbgeom.py +0 -226
  115. trajectree/quimb/tests/test_tensor/test_tensor_builder.py +0 -441
  116. trajectree/quimb/tests/test_tensor/test_tensor_core.py +0 -2066
  117. trajectree/quimb/tests/test_tensor/test_tensor_dmrg.py +0 -388
  118. trajectree/quimb/tests/test_tensor/test_tensor_spectral_approx.py +0 -63
  119. trajectree/quimb/tests/test_tensor/test_tensor_tebd.py +0 -270
  120. trajectree/quimb/tests/test_utils.py +0 -85
  121. trajectree-0.0.1.dist-info/RECORD +0 -126
  122. {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/WHEEL +0 -0
  123. {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/licenses/LICENSE +0 -0
  124. {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/top_level.txt +0 -0
@@ -1,865 +0,0 @@
1
- """Tools for constructing MERA for arbitrary geometry.
2
-
3
- TODO::
4
-
5
- - [ ] 2D, 3D MERA classes
6
- - [ ] general strategies for arbitrary geometries
7
- - [ ] layer_tag? and hanling of other attributes
8
- - [ ] handle dangling case
9
- - [ ] invariant generators?
10
-
11
- DONE::
12
-
13
- - [x] layer_gate methods for arbitrary geometry
14
- - [x] 1D: generic way to handle finite and open boundary conditions
15
- - [x] hook into other arbgeom infrastructure for computing rdms etc
16
-
17
- """
18
- import itertools
19
- import functools
20
- from quimb.tensor.tensor_core import (
21
- Tensor,
22
- IsoTensor,
23
- oset_union,
24
- prod,
25
- )
26
- from quimb.tensor.tensor_arbgeom import (
27
- TensorNetworkGenVector,
28
- oset,
29
- tags_to_oset,
30
- rand_uuid,
31
- _compute_expecs_maybe_in_parallel,
32
- _tn_local_expectation,
33
- )
34
- from quimb.tensor.tensor_1d import TensorNetwork1DVector
35
- from quimb.utils import partition
36
-
37
-
38
- class TensorNetworkGenIso(TensorNetworkGenVector):
39
- """A class for building generic 'isometric' or MERA like tensor network
40
- states with arbitrary geometry. After supplying the underyling `sites` of
41
- the problem - which can be an arbitrary sequence of hashable objects - one
42
- places either unitaries, isometries or tree tensors layered above groups of
43
- sites. The isometric and tree tensors effectively coarse grain blocks into
44
- a single new site, and the unitaries generally 'disentangle' between
45
- blocks.
46
- """
47
-
48
- _EXTRA_PROPS = (
49
- "_site_tag_id",
50
- "_sites",
51
- "_site_ind_id",
52
- "_layer_ind_id",
53
- )
54
-
55
- @classmethod
56
- def empty(
57
- cls,
58
- sites,
59
- phys_dim=2,
60
- site_tag_id="I{}",
61
- site_ind_id="k{}",
62
- layer_ind_id="l{}",
63
- ):
64
- new = object.__new__(cls)
65
- new.phys_dim = phys_dim
66
- new._sites = tuple(sites)
67
- new._site_tag_id = site_tag_id
68
- new._site_ind_id = site_ind_id
69
- new._layer_ind_id = layer_ind_id
70
-
71
- new._open_upper_sites = oset(new._sites)
72
- new._open_lower_sites = oset(new._sites)
73
-
74
- super().__init__(new, ())
75
- return new
76
-
77
- @property
78
- def layer_ind_id(self):
79
- return self._layer_ind_id
80
-
81
- def layer_ind(self, site):
82
- return self._layer_ind_id.format(site)
83
-
84
- def layer_gate_raw(
85
- self,
86
- G,
87
- where,
88
- iso=True,
89
- new_sites=None,
90
- tags=None,
91
- all_site_tags=None,
92
- ):
93
- """Build out this MERA by placing either a new unitary, isometry or
94
- tree tensor, given by ``G``, at the sites given by ``where``. This
95
- handles propagating the lightcone of tags and marking the correct
96
- indices of the ``IsoTensor`` as ``left_inds``.
97
-
98
- Parameters
99
- ----------
100
- G : array_like
101
- The raw array to place at the sites. Its shape determines whether
102
- it is a unitary or isometry/tree. It should have ``k + len(where)``
103
- dimensions. For a unitary ``k == len(where)``. If it is an
104
- isometry/tree, ``k`` will generally be ``1``, or ``0`` to 'cap' the
105
- MERA. The rightmost indices are those attached to the current open
106
- layer indices.
107
- where : sequence of hashable
108
- The sites to layer the tensor above.
109
- iso : bool, optional
110
- Whether to declare the tensor as an unitary/isometry by marking
111
- the left indices. If ``iso = False`` (a 'tree' tensor) then one
112
- should have ``k <= 1``. Once you have such a 'tree' tensor you
113
- cannot place isometries or unitaries above it. It will also have
114
- the lightcone tags of every site. Technically one could place
115
- 'PEPS' style tensor with ``iso = False`` and ``k > 1`` but some
116
- methods might break.
117
- new_sites : sequence of hashable, optional
118
- Which sites to make new open sites. If not given, defaults to the
119
- first ``k`` sites in ``where``.
120
- tags : sequence of str, optional
121
- Custom tags to add to the new tensor, in addition to the
122
- automatically generated site tags.
123
- all_site_tags : sequence of str, optional
124
- For performance, supply all site tags to avoid recomputing them.
125
- """
126
- if all_site_tags is None:
127
- all_site_tags = oset(map(self.site_tag, self.gen_site_coos()))
128
-
129
- # work out 'lower' tensor indices
130
- nbelow = len(where)
131
- below_ix = []
132
- reindex_map = {}
133
- tags = tags_to_oset(tags)
134
- for site in where:
135
- if site in self._open_lower_sites:
136
- # this is the first tensor placed above site
137
- below_ix.append(self.site_ind(site))
138
- self._open_lower_sites.remove(site)
139
- tags.add(self.site_tag(site))
140
- else:
141
- # tensor is being placed above existing tensor
142
- current_lower_ix = self.layer_ind(site)
143
- new_lower_ix = rand_uuid()
144
- reindex_map[current_lower_ix] = new_lower_ix
145
- below_ix.append(new_lower_ix)
146
-
147
- # work out 'upper' tensor indices
148
- nabove = len(G.shape) - nbelow
149
- if new_sites is None:
150
- new_sites = where[:nabove]
151
- above_ix = [self.layer_ind(site) for site in new_sites]
152
-
153
- for site in where[nabove:]:
154
- # if tensor is not a unitary then some upper sites are removed
155
- self._open_upper_sites.remove(site)
156
-
157
- # want to propagate just site tags from tensors below
158
- old_tags = oset_union(t.tags for t in self._inds_get(*reindex_map))
159
-
160
- if iso and "TREE" in old_tags:
161
- raise ValueError(
162
- "You can't place isometric tensors above tree tensors."
163
- )
164
-
165
- if not iso:
166
- # tensor is in lightcone of all sites
167
- tags |= all_site_tags
168
- tags.add("TREE")
169
- left_inds = None
170
- if nabove > 1:
171
- import warnings
172
-
173
- warnings.warn(
174
- "You are placing a tensor which is neither "
175
- "isometric/unitary or a tree. Some methods might break."
176
- )
177
- else:
178
- # just want site tags present on tensors below
179
- tags |= old_tags & all_site_tags
180
- if nbelow == nabove:
181
- tags.add("UNI")
182
- else:
183
- tags.add("ISO")
184
- if nabove == 0:
185
- tags.add("CAP")
186
- left_inds = below_ix
187
-
188
- # rewire and add tensor
189
- self.reindex_(reindex_map)
190
- self |= IsoTensor(
191
- data=G,
192
- inds=below_ix + above_ix,
193
- left_inds=left_inds,
194
- tags=tags,
195
- )
196
-
197
- def layer_gate_fill_fn(
198
- self,
199
- fill_fn,
200
- operation,
201
- where,
202
- max_bond,
203
- new_sites=None,
204
- tags=None,
205
- all_site_tags=None,
206
- ):
207
- """Build out this MERA by placing either a new unitary, isometry or
208
- tree tensor at sites ``where``, generating the data array using
209
- ``fill_fn`` and maximum bond dimension ``max_bond``.
210
-
211
- Parameters
212
- ----------
213
- fill_fn : callable
214
- A function with signature ``fill_fn(shape) -> array_like``.
215
- operation : {"iso", "uni", "cap", "tree", "treecap"}
216
- The type of tensor to place.
217
- where : sequence of hashable
218
- The sites to layer the tensor above.
219
- max_bond : int
220
- The maximum bond dimension of the tensor. This only applies for
221
- isometries and trees and when the product of the lower dimensions
222
- is greater than ``max_bond``.
223
- new_sites : sequence of hashable, optional
224
- Which sites to make new open sites. If not given, defaults to the
225
- first ``k`` sites in ``where``.
226
- tags : sequence of str, optional
227
- Custom tags to add to the new tensor, in addition to the
228
- automatically generated site tags.
229
- all_site_tags : sequence of str, optional
230
- For performance, supply all site tags to avoid recomputing them.
231
-
232
- See Also
233
- --------
234
- layer_gate_raw
235
- """
236
- shape = []
237
- for site in where:
238
- if site in self._open_lower_sites:
239
- shape.append(self.phys_dim)
240
- else:
241
- shape.append(self.ind_size(self.layer_ind(site)))
242
-
243
- if operation == "uni":
244
- # unitary, map is shape preserving
245
- shape = (*shape, *shape)
246
- elif operation in ("iso", "tree"):
247
- current_size = prod(shape)
248
- new_size = min(current_size, max_bond)
249
- shape = (*shape, new_size)
250
- elif operation in ("cap", "treecap"):
251
- # no new sitess
252
- shape = tuple(shape)
253
- else:
254
- raise ValueError(
255
- f"Unknown operation: '{operation}'. Should be one of: "
256
- "'uni', 'iso', 'cap', 'tree', or 'treecap'."
257
- )
258
-
259
- G = fill_fn(shape)
260
- self.layer_gate_raw(
261
- G,
262
- where,
263
- new_sites=new_sites,
264
- tags=tags,
265
- all_site_tags=all_site_tags,
266
- iso="tree" not in operation,
267
- )
268
-
269
- def partial_trace(
270
- self,
271
- keep,
272
- optimize="auto-hq",
273
- rehearse=False,
274
- preserve_tensor=False,
275
- **contract_opts,
276
- ):
277
- """Partial trace out all sites except those in ``keep``, making use of
278
- the lightcone structure of the MERA.
279
-
280
- Parameters
281
- ----------
282
- keep : sequence of hashable
283
- The sites to keep.
284
- optimize : str or PathOptimzer, optional
285
- The contraction ordering strategy to use.
286
- rehearse : {False, "tn", "tree"}, optional
287
- Whether to rehearse the contraction rather than actually performing
288
- it. If:
289
-
290
- - ``False``: perform the contraction and return the reduced density
291
- matrix,
292
- - "tn": just the lightcone tensor network is returned,
293
- - "tree": just the contraction tree that will be used is returned.
294
-
295
- contract_opts
296
- Additional options to pass to
297
- :func:`~quimb.tensor.tensor_core.tensor_contract`.
298
-
299
- Returns
300
- -------
301
- array_like
302
- The reduced density matrix on sites ``keep``.
303
- """
304
- tags = tuple(map(self.site_tag, keep))
305
- k = self.select_any(tags, virtual=False)
306
-
307
- kix = tuple(map(self.site_ind, keep))
308
- bix = tuple(f"b{site}" for site in keep)
309
- b = k.reindex(dict(zip(kix, bix))).conj_()
310
- tn = b | k
311
-
312
- if rehearse == "tn":
313
- return tn
314
-
315
- contract_opts["optimize"] = optimize
316
-
317
- if rehearse == "tree":
318
- return tn.contraction_tree(output_inds=bix + kix, **contract_opts)
319
-
320
- t = tn.contract(output_inds=bix + kix, **contract_opts)
321
- if preserve_tensor:
322
- return t
323
-
324
- return t.to_dense(bix, kix)
325
-
326
- def local_expectation(
327
- self,
328
- G,
329
- where,
330
- optimize="auto-hq",
331
- rehearse=False,
332
- **contract_opts,
333
- ):
334
- """Compute the expectation value of a local operator ``G`` at sites
335
- ``where``. This is done by contracting the lightcone tensor network
336
- to form the reduced density matrix, before taking the trace with
337
- ``G``.
338
-
339
- Parameters
340
- ----------
341
- G : array_like
342
- The local operator to compute the expectation value of.
343
- where : sequence of hashable
344
- The sites to compute the expectation value at.
345
- optimize : str or PathOptimzer, optional
346
- The contraction ordering strategy to use.
347
- rehearse : {False, "tn", "tree"}, optional
348
- Whether to rehearse the contraction rather than actually performing
349
- it. See :meth:`~quimb.tensor.mera.MERA.partial_trace` for details.
350
- contract_opts
351
- Additional options to pass to
352
- :func:`~quimb.tensor.tensor_core.tensor_contract`.
353
-
354
- Returns
355
- -------
356
- float
357
- The expectation value of ``G`` at sites ``where``.
358
-
359
- See Also
360
- --------
361
- partial_trace
362
- """
363
- t_rho = self.partial_trace(
364
- keep=where,
365
- optimize=optimize,
366
- rehearse=rehearse,
367
- preserve_tensor=True,
368
- **contract_opts,
369
- )
370
-
371
- if rehearse:
372
- # returned t_rho is the tree or whole TN etc.
373
- return t_rho
374
-
375
- # make sure G is compatible shape (could be supplied in 'matrix' form)
376
- if G.shape != t_rho.shape:
377
- # n.b. both are hermitian so no 'transpose' needed here
378
- G = G.reshape(t_rho.shape)
379
-
380
- # make gate tensor
381
- nphys = t_rho.ndim // 2
382
- bix, kix = t_rho.inds[:nphys], t_rho.inds[nphys:]
383
- t_G = Tensor(G, inds=kix + bix)
384
-
385
- return t_rho @ t_G
386
-
387
- def compute_local_expectation(
388
- self,
389
- terms,
390
- optimize="auto-hq",
391
- return_all=False,
392
- rehearse=False,
393
- executor=None,
394
- progbar=False,
395
- **contract_opts,
396
- ):
397
- """Compute the expectation value of a collection of local operators
398
- ``terms`` at sites ``where``. This is done by contracting the lightcone
399
- tensor network to form the reduced density matrices, before taking the
400
- trace with each ``G`` in ``terms``.
401
-
402
- Parameters
403
- ----------
404
- terms : dict[tuple[hashable], array_like]
405
- The local operators to compute the expectation value of, keyed by
406
- the sites they act on.
407
- optimize : str or PathOptimzer, optional
408
- The contraction ordering strategy to use.
409
- return_all : bool, optional
410
- Whether to return all the expectation values, or just the sum.
411
- rehearse : {False, "tn", "tree"}, optional
412
- Whether to rehearse the contraction rather than actually performing
413
- it. See :meth:`~quimb.tensor.mera.MERA.partial_trace` for details.
414
- executor : Executor, optional
415
- The executor to use for parallelism.
416
- progbar : bool, optional
417
- Whether to show a progress bar.
418
- contract_opts
419
- Additional options to pass to
420
- :func:`~quimb.tensor.tensor_core.tensor_contract`.
421
- """
422
- return _compute_expecs_maybe_in_parallel(
423
- fn=_tn_local_expectation,
424
- tn=self,
425
- terms=terms,
426
- return_all=return_all,
427
- executor=executor,
428
- progbar=progbar,
429
- optimize=optimize,
430
- rehearse=rehearse,
431
- **contract_opts,
432
- )
433
-
434
- def expand_bond_dimension(
435
- self,
436
- new_bond_dim,
437
- rand_strength=0.0,
438
- inds_to_expand=None,
439
- inplace=False,
440
- ):
441
- """Expand the maxmimum bond dimension of this isometric tensor network
442
- to ``new_bond_dim``. Unlike
443
- :meth:`~quimb.tensor.tensor_core.TensorNetwork.expand_bond_dimension`
444
- this proceeds from the physical indices upwards, and only increases a
445
- bonds size if ``new_bond_dim`` is larger than product of the lower
446
- indices dimensions.
447
-
448
- Parameters
449
- ----------
450
- new_bond_dim : int
451
- The new maximum bond dimension to expand to.
452
- rand_strength : float, optional
453
- The strength of random noise to add to the new array entries,
454
- if any.
455
- inds_to_expand : sequence of str, optional
456
- The indices to expand, if not all.
457
- inplace : bool, optional
458
- Whether to expand this tensor network in place, or return a new
459
- one.
460
-
461
- Returns
462
- -------
463
- TensorNetworkGenIso
464
- """
465
- if inds_to_expand is not None:
466
- return super().expand_bond_dimension(
467
- new_bond_dim=new_bond_dim,
468
- rand_strength=rand_strength,
469
- inds_to_expand=inds_to_expand,
470
- inplace=inplace,
471
- )
472
-
473
- tn = self if inplace else self.copy()
474
-
475
- tids_done = oset()
476
- inds_done = oset(tn.site_inds)
477
- tids_todo = tn._get_tids_from_inds(inds_done, "any")
478
-
479
- # XXX: switch this logic to get_tree_span('CAP')? to
480
- # ensure topologically sorted order?
481
- while tids_todo:
482
- tid = tids_todo.popleft()
483
- t = tn.tensor_map[tid]
484
-
485
- if t.left_inds is not None:
486
- below_inds = oset(t.left_inds)
487
- above_inds = oset(t.inds) - below_inds
488
- else:
489
- below_inds, above_inds = oset(), oset()
490
- for ix in t.inds:
491
- (below_inds if ix in inds_done else above_inds).add(ix)
492
-
493
- if len(above_inds) == 0:
494
- # top piece
495
- continue
496
-
497
- elif len(above_inds) == 1:
498
- # isometry, bond can expand
499
- (ix,) = above_inds
500
- cur_sz = t.ind_size(ix)
501
- rem_inds_sz = t.size // cur_sz
502
-
503
- # don't expand beyond product of lower index sizes
504
- new_sz = min(rem_inds_sz, new_bond_dim)
505
- if new_sz > cur_sz:
506
- tn.expand_bond_dimension_(
507
- new_bond_dim=new_sz,
508
- rand_strength=rand_strength,
509
- inds_to_expand=ix,
510
- )
511
-
512
- elif len(above_inds) == len(below_inds):
513
- # unitary gate, maintain bond sizes
514
- for bix, aix in zip(below_inds, above_inds):
515
- tn.expand_bond_dimension_(
516
- new_bond_dim=t.ind_size(bix),
517
- rand_strength=rand_strength,
518
- inds_to_expand=aix,
519
- )
520
-
521
- else:
522
- raise NotImplementedError
523
-
524
- tids_done.add(tid)
525
- inds_done.update(above_inds)
526
- for tid_above in tn._get_tids_from_inds(above_inds, "any"):
527
- if tid_above not in tids_done:
528
- tids_todo.add(tid_above)
529
-
530
- return tn
531
-
532
- expand_bond_dimension_ = functools.partialmethod(
533
- expand_bond_dimension, inplace=True
534
- )
535
-
536
-
537
- def calc_1d_unis_isos(sites, block_size, cyclic, group_from_right):
538
- """Given ``sites``, assumed to be in a 1D order, though not neccessarily
539
- contiguous, calculate unitary and isometry groupings::
540
-
541
- │ │ <- new grouped site
542
- ┐ ┌─────┐ ┌─────┐ ┌
543
- │ │ ISO │ │ ISO │ │
544
- ┘ └─────┘ └─────┘ └
545
- │ │..│..│ │..│..│ │
546
- ┌───┐ │ ┌───┐ │ ┌───┐
547
- │UNI│ │ │UNI│ │ │UNI│
548
- └───┘ │ └───┘ │ └───┘
549
- │ │ ... │ │ ... │ │
550
- ^^^^^^^ <- isometry groupings of size, block_size
551
- ^^^^^ ^^^^^ <- unitary groupings of size 2
552
-
553
- Parameters
554
- ----------
555
- sites : sequence of hashable
556
- The sites to apply a layer to.
557
- block_size : int
558
- How many sites to group together per isometry block. Note that
559
- currently the unitaries will only ever act on blocks of size 2 across
560
- isometry block boundaries.
561
- cyclic : bool
562
- Whether to apply disentangler / unitaries across the boundary. The
563
- isometries will never be applied across the boundary, but since they
564
- always form a tree such a bipartition is natural.
565
- group_from_right : bool
566
- Wether to group the sites starting from the left or right. This only
567
- matters if ``block_size`` does not divide the number of sites.
568
- Alternating between left and right more evenly tiles the unitaries and
569
- isometries, especially at lower layers.
570
-
571
- Returns
572
- -------
573
- unis : list[tuple]
574
- The unitary groupings.
575
- isos : list[tuple]
576
- The isometry groupings.
577
- """
578
- sites = tuple(sites)
579
- nsites = len(sites)
580
-
581
- # track this so we know neighboring sites
582
- ranks = {s: i for i, s in enumerate(sites)}
583
-
584
- # first we linearly partition the sites to form the isometry groups
585
- size = block_size * (nsites // block_size)
586
- if group_from_right:
587
- grouped = sites[-size:]
588
- else:
589
- grouped = sites[:size]
590
- isos = list(partition(block_size, grouped))
591
-
592
- # then we disentangle at the edges of the
593
- # isometries to form the unitaries
594
- unis = set()
595
- for iso in isos:
596
- # n.b. only when the groups are not adjacent (e.g. because the number
597
- # of sites doesn't divide) will there be a left (right) disentangler
598
- # which is not also a right (left) disentangler. In that case a site
599
- # can see 2 unitaries rather than the usual 1 unitary and 1 isometry:
600
- # │
601
- # ┐ ┌─────┐ ┌
602
- # │ │ ISO │ │
603
- # ┘ └─────┘ └
604
- # │ │..│..│ │
605
- # ┌───┐ │ ┌───┐
606
- # │UNI│ │ │UNI│
607
- # └───┘ │ └───┘
608
- # │ │ │ │
609
- # sl si sf sr
610
-
611
- # attempt left disentangle
612
- si = iso[0]
613
- ri = ranks[si]
614
- if cyclic or ri > 0:
615
- sl = sites[ri - 1]
616
- unis.add((sl, si))
617
-
618
- # attempt right disentangle
619
- sf = iso[-1]
620
- rf = ranks[sf]
621
- if cyclic or rf < nsites - 1:
622
- sr = sites[(rf + 1) % nsites]
623
- unis.add((sf, sr))
624
-
625
- return sorted(unis), isos
626
-
627
-
628
- class MERA(TensorNetwork1DVector, TensorNetworkGenIso):
629
- """Replacement class for ``MERA`` which uses the new infrastructure and
630
- thus has methods like ``compute_local_expectation``.
631
- """
632
-
633
- _EXTRA_PROPS = tuple(
634
- sorted(
635
- set(TensorNetwork1DVector._EXTRA_PROPS)
636
- | set(TensorNetworkGenIso._EXTRA_PROPS)
637
- )
638
- )
639
- _CONTRACT_STRUCTURED = False
640
-
641
- def __init__(self, *args, **kwargs):
642
- self._num_layers = None
643
- super().__init__(*args, **kwargs)
644
-
645
- @classmethod
646
- def from_fill_fn(
647
- cls,
648
- fill_fn,
649
- L,
650
- D,
651
- phys_dim=2,
652
- block_size=2,
653
- cyclic=True,
654
- uni_fill_fn=None,
655
- iso_fill_fn=None,
656
- cap_fill_fn=None,
657
- **kwargs,
658
- ):
659
- """Create a 1D MERA using ``fill_fn(shape) -> array_like`` to fill the
660
- tensors.
661
-
662
- Parameters
663
- ----------
664
- fill_fn : callable
665
- A function which takes a shape and returns an array_like of that
666
- shape. You can override this specfically for the unitaries,
667
- isometries and cap tensors using the kwargs ``uni_fill_fn``,
668
- ``iso_fill_fn`` and ``cap_fill_fn``.
669
- L : int
670
- The number of sites.
671
- D : int
672
- The maximum bond dimension.
673
- phys_dim : int, optional
674
- The dimension of the physical indices.
675
- block_size : int, optional
676
- The size of the isometry blocks. Binary MERA is the default,
677
- ternary MERA is ``block_size=3``.
678
- cyclic : bool, optional
679
- Whether to apply disentangler / unitaries across the boundary. The
680
- isometries will never be applied across the boundary, but since
681
- they always form a tree such a bipartition is natural.
682
- uni_fill_fn : callable, optional
683
- A function which takes a shape and returns an array_like of that
684
- shape. This is used to fill the unitary tensors. If ``None`` then
685
- ``fill_fn`` is used.
686
- iso_fill_fn : callable, optional
687
- A function which takes a shape and returns an array_like of that
688
- shape. This is used to fill the isometry tensors. If ``None`` then
689
- ``fill_fn`` is used.
690
- cap_fill_fn : callable, optional
691
- A function which takes a shape and returns an array_like of that
692
- shape. This is used to fill the cap tensors. If ``None`` then
693
- ``fill_fn`` is used.
694
- kwargs
695
- Supplied to ``TensorNetworkGenIso.__init__``.
696
- """
697
- mera = cls.empty(sites=range(L), phys_dim=phys_dim, **kwargs)
698
- mera._L = L
699
-
700
- if uni_fill_fn is None:
701
- uni_fill_fn = fill_fn
702
- if iso_fill_fn is None:
703
- iso_fill_fn = fill_fn
704
- if cap_fill_fn is None:
705
- cap_fill_fn = iso_fill_fn
706
-
707
- for lyr in itertools.count():
708
- remaining_sites = sorted(mera._open_upper_sites)
709
-
710
- if len(remaining_sites) <= block_size + 1:
711
- # can terminate with a 'cap'
712
- mera.layer_gate_fill_fn(
713
- cap_fill_fn,
714
- "cap",
715
- remaining_sites,
716
- D,
717
- tags=f"LAYER{lyr}",
718
- )
719
- break
720
-
721
- # else add a disentangling and grouping layer
722
- uni_groups, iso_groups = calc_1d_unis_isos(
723
- remaining_sites,
724
- block_size,
725
- cyclic,
726
- group_from_right=lyr % 2,
727
- )
728
- for uni_sites in uni_groups:
729
- mera.layer_gate_fill_fn(
730
- uni_fill_fn,
731
- "uni",
732
- uni_sites,
733
- D,
734
- tags=f"LAYER{lyr}",
735
- )
736
- for iso_sites in iso_groups:
737
- mera.layer_gate_fill_fn(
738
- iso_fill_fn,
739
- "iso",
740
- iso_sites,
741
- D,
742
- tags=f"LAYER{lyr}",
743
- )
744
-
745
- mera._num_layers = lyr + 1
746
-
747
- return mera
748
-
749
- @classmethod
750
- def rand(
751
- cls,
752
- L,
753
- D,
754
- seed=None,
755
- block_size=2,
756
- phys_dim=2,
757
- cyclic=True,
758
- isometrize_method="svd",
759
- **kwargs,
760
- ):
761
- """Return a random (optionally isometrized) MERA.
762
-
763
- Parameters
764
- ----------
765
- L : int
766
- The number of sites.
767
- D : int
768
- The maximum bond dimension.
769
- seed : int, optional
770
- A random seed.
771
- block_size : int, optional
772
- The size of the isometry blocks. Binary MERA is the default,
773
- ternary MERA is ``block_size=3``.
774
- phys_dim : int, optional
775
- The dimension of the physical indices.
776
- cyclic : bool, optional
777
- Whether to apply disentangler / unitaries across the boundary. The
778
- isometries will never be applied across the boundary, but since
779
- they always form a tree such a bipartition is natural.
780
- isometrize_method : str or None, optional
781
- If given, the method to use to isometrize the MERA. If ``None``
782
- then the MERA is not isometrized.
783
- """
784
- import numpy as np
785
-
786
- rng = np.random.default_rng(seed)
787
- mera = cls.from_fill_fn(
788
- lambda shape: rng.normal(size=shape),
789
- L,
790
- D,
791
- block_size,
792
- phys_dim,
793
- cyclic,
794
- **kwargs,
795
- )
796
- if isometrize_method is not None:
797
- mera.isometrize_(isometrize_method)
798
- return mera
799
-
800
- @property
801
- def num_layers(self):
802
- return self._num_layers
803
-
804
-
805
- def TTN_randtree_rand(
806
- sites,
807
- D,
808
- phys_dim=2,
809
- group_size=2,
810
- iso=False,
811
- seed=None,
812
- **kwargs,
813
- ):
814
- """Return a randomly constructed tree tensor network.
815
-
816
- Parameters
817
- ----------
818
- sites : list of hashable
819
- The sites of the tensor network.
820
- D : int
821
- The maximum bond dimension.
822
- phys_dim : int, optional
823
- The dimension of the physical indices.
824
- group_size : int, optional
825
- How many sites to group together in each tensor.
826
- iso : bool, optional
827
- Whether to build the tree with an isometric flow towards the top.
828
- seed : int, optional
829
- A random seed.
830
- kwargs
831
- Supplied to ``TensorNetworkGenIso.empty``.
832
-
833
- Returns
834
- -------
835
- ttn : TensorNetworkGenIso
836
- The tree tensor network.
837
- """
838
- import numpy as np
839
-
840
- sites = list(sites)
841
-
842
- rng = np.random.default_rng(seed)
843
- tn = TensorNetworkGenIso.empty(sites, phys_dim=phys_dim, **kwargs)
844
-
845
- while len(sites) > group_size + 1:
846
- # randomly pick two sites to merge
847
- merge = sorted(
848
- sites.pop(rng.integers(len(sites))) for _ in range(group_size)
849
- )
850
- tn.layer_gate_fill_fn(
851
- lambda shape: rng.normal(size=shape),
852
- "iso" if iso else "tree",
853
- merge,
854
- max_bond=D,
855
- )
856
- sites.append(merge[0])
857
-
858
- tn.layer_gate_fill_fn(
859
- lambda shape: rng.normal(size=shape),
860
- "cap" if iso else "treecap",
861
- sites,
862
- max_bond=D,
863
- )
864
-
865
- return tn