Trajectree 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- trajectree/__init__.py +0 -3
- trajectree/fock_optics/devices.py +1 -1
- trajectree/fock_optics/light_sources.py +2 -2
- trajectree/fock_optics/measurement.py +9 -9
- trajectree/fock_optics/outputs.py +10 -6
- trajectree/fock_optics/utils.py +9 -6
- trajectree/sequence/swap.py +5 -4
- trajectree/trajectory.py +5 -4
- {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/METADATA +2 -3
- trajectree-0.0.3.dist-info/RECORD +16 -0
- trajectree/quimb/docs/_pygments/_pygments_dark.py +0 -118
- trajectree/quimb/docs/_pygments/_pygments_light.py +0 -118
- trajectree/quimb/docs/conf.py +0 -158
- trajectree/quimb/docs/examples/ex_mpi_expm_evo.py +0 -62
- trajectree/quimb/quimb/__init__.py +0 -507
- trajectree/quimb/quimb/calc.py +0 -1491
- trajectree/quimb/quimb/core.py +0 -2279
- trajectree/quimb/quimb/evo.py +0 -712
- trajectree/quimb/quimb/experimental/__init__.py +0 -0
- trajectree/quimb/quimb/experimental/autojittn.py +0 -129
- trajectree/quimb/quimb/experimental/belief_propagation/__init__.py +0 -109
- trajectree/quimb/quimb/experimental/belief_propagation/bp_common.py +0 -397
- trajectree/quimb/quimb/experimental/belief_propagation/d1bp.py +0 -316
- trajectree/quimb/quimb/experimental/belief_propagation/d2bp.py +0 -653
- trajectree/quimb/quimb/experimental/belief_propagation/hd1bp.py +0 -571
- trajectree/quimb/quimb/experimental/belief_propagation/hv1bp.py +0 -775
- trajectree/quimb/quimb/experimental/belief_propagation/l1bp.py +0 -316
- trajectree/quimb/quimb/experimental/belief_propagation/l2bp.py +0 -537
- trajectree/quimb/quimb/experimental/belief_propagation/regions.py +0 -194
- trajectree/quimb/quimb/experimental/cluster_update.py +0 -286
- trajectree/quimb/quimb/experimental/merabuilder.py +0 -865
- trajectree/quimb/quimb/experimental/operatorbuilder/__init__.py +0 -15
- trajectree/quimb/quimb/experimental/operatorbuilder/operatorbuilder.py +0 -1631
- trajectree/quimb/quimb/experimental/schematic.py +0 -7
- trajectree/quimb/quimb/experimental/tn_marginals.py +0 -130
- trajectree/quimb/quimb/experimental/tnvmc.py +0 -1483
- trajectree/quimb/quimb/gates.py +0 -36
- trajectree/quimb/quimb/gen/__init__.py +0 -2
- trajectree/quimb/quimb/gen/operators.py +0 -1167
- trajectree/quimb/quimb/gen/rand.py +0 -713
- trajectree/quimb/quimb/gen/states.py +0 -479
- trajectree/quimb/quimb/linalg/__init__.py +0 -6
- trajectree/quimb/quimb/linalg/approx_spectral.py +0 -1109
- trajectree/quimb/quimb/linalg/autoblock.py +0 -258
- trajectree/quimb/quimb/linalg/base_linalg.py +0 -719
- trajectree/quimb/quimb/linalg/mpi_launcher.py +0 -397
- trajectree/quimb/quimb/linalg/numpy_linalg.py +0 -244
- trajectree/quimb/quimb/linalg/rand_linalg.py +0 -514
- trajectree/quimb/quimb/linalg/scipy_linalg.py +0 -293
- trajectree/quimb/quimb/linalg/slepc_linalg.py +0 -892
- trajectree/quimb/quimb/schematic.py +0 -1518
- trajectree/quimb/quimb/tensor/__init__.py +0 -401
- trajectree/quimb/quimb/tensor/array_ops.py +0 -610
- trajectree/quimb/quimb/tensor/circuit.py +0 -4824
- trajectree/quimb/quimb/tensor/circuit_gen.py +0 -411
- trajectree/quimb/quimb/tensor/contraction.py +0 -336
- trajectree/quimb/quimb/tensor/decomp.py +0 -1255
- trajectree/quimb/quimb/tensor/drawing.py +0 -1646
- trajectree/quimb/quimb/tensor/fitting.py +0 -385
- trajectree/quimb/quimb/tensor/geometry.py +0 -583
- trajectree/quimb/quimb/tensor/interface.py +0 -114
- trajectree/quimb/quimb/tensor/networking.py +0 -1058
- trajectree/quimb/quimb/tensor/optimize.py +0 -1818
- trajectree/quimb/quimb/tensor/tensor_1d.py +0 -4778
- trajectree/quimb/quimb/tensor/tensor_1d_compress.py +0 -1854
- trajectree/quimb/quimb/tensor/tensor_1d_tebd.py +0 -662
- trajectree/quimb/quimb/tensor/tensor_2d.py +0 -5954
- trajectree/quimb/quimb/tensor/tensor_2d_compress.py +0 -96
- trajectree/quimb/quimb/tensor/tensor_2d_tebd.py +0 -1230
- trajectree/quimb/quimb/tensor/tensor_3d.py +0 -2869
- trajectree/quimb/quimb/tensor/tensor_3d_tebd.py +0 -46
- trajectree/quimb/quimb/tensor/tensor_approx_spectral.py +0 -60
- trajectree/quimb/quimb/tensor/tensor_arbgeom.py +0 -3237
- trajectree/quimb/quimb/tensor/tensor_arbgeom_compress.py +0 -565
- trajectree/quimb/quimb/tensor/tensor_arbgeom_tebd.py +0 -1138
- trajectree/quimb/quimb/tensor/tensor_builder.py +0 -5411
- trajectree/quimb/quimb/tensor/tensor_core.py +0 -11179
- trajectree/quimb/quimb/tensor/tensor_dmrg.py +0 -1472
- trajectree/quimb/quimb/tensor/tensor_mera.py +0 -204
- trajectree/quimb/quimb/utils.py +0 -892
- trajectree/quimb/tests/__init__.py +0 -0
- trajectree/quimb/tests/test_accel.py +0 -501
- trajectree/quimb/tests/test_calc.py +0 -788
- trajectree/quimb/tests/test_core.py +0 -847
- trajectree/quimb/tests/test_evo.py +0 -565
- trajectree/quimb/tests/test_gen/__init__.py +0 -0
- trajectree/quimb/tests/test_gen/test_operators.py +0 -361
- trajectree/quimb/tests/test_gen/test_rand.py +0 -296
- trajectree/quimb/tests/test_gen/test_states.py +0 -261
- trajectree/quimb/tests/test_linalg/__init__.py +0 -0
- trajectree/quimb/tests/test_linalg/test_approx_spectral.py +0 -368
- trajectree/quimb/tests/test_linalg/test_base_linalg.py +0 -351
- trajectree/quimb/tests/test_linalg/test_mpi_linalg.py +0 -127
- trajectree/quimb/tests/test_linalg/test_numpy_linalg.py +0 -84
- trajectree/quimb/tests/test_linalg/test_rand_linalg.py +0 -134
- trajectree/quimb/tests/test_linalg/test_slepc_linalg.py +0 -283
- trajectree/quimb/tests/test_tensor/__init__.py +0 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/__init__.py +0 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d1bp.py +0 -39
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d2bp.py +0 -67
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hd1bp.py +0 -64
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hv1bp.py +0 -51
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l1bp.py +0 -142
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l2bp.py +0 -101
- trajectree/quimb/tests/test_tensor/test_circuit.py +0 -816
- trajectree/quimb/tests/test_tensor/test_contract.py +0 -67
- trajectree/quimb/tests/test_tensor/test_decomp.py +0 -40
- trajectree/quimb/tests/test_tensor/test_mera.py +0 -52
- trajectree/quimb/tests/test_tensor/test_optimizers.py +0 -488
- trajectree/quimb/tests/test_tensor/test_tensor_1d.py +0 -1171
- trajectree/quimb/tests/test_tensor/test_tensor_2d.py +0 -606
- trajectree/quimb/tests/test_tensor/test_tensor_2d_tebd.py +0 -144
- trajectree/quimb/tests/test_tensor/test_tensor_3d.py +0 -123
- trajectree/quimb/tests/test_tensor/test_tensor_arbgeom.py +0 -226
- trajectree/quimb/tests/test_tensor/test_tensor_builder.py +0 -441
- trajectree/quimb/tests/test_tensor/test_tensor_core.py +0 -2066
- trajectree/quimb/tests/test_tensor/test_tensor_dmrg.py +0 -388
- trajectree/quimb/tests/test_tensor/test_tensor_spectral_approx.py +0 -63
- trajectree/quimb/tests/test_tensor/test_tensor_tebd.py +0 -270
- trajectree/quimb/tests/test_utils.py +0 -85
- trajectree-0.0.1.dist-info/RECORD +0 -126
- {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/WHEEL +0 -0
- {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/licenses/LICENSE +0 -0
- {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/top_level.txt +0 -0
|
@@ -1,1058 +0,0 @@
|
|
|
1
|
-
"""Functionality for analyzing the structure of tensor networks, including
|
|
2
|
-
finding paths, loops, connected components, hierarchical groupings and more.
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
import collections
|
|
6
|
-
import functools
|
|
7
|
-
import itertools
|
|
8
|
-
import math
|
|
9
|
-
|
|
10
|
-
from ..utils import oset
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class NetworkPath:
|
|
14
|
-
"""A simple class to represent a path through a tensor network, storing
|
|
15
|
-
both the tensor identifies (`tids`) and indices (`inds`) it passes through.
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
__slots__ = ("_tids", "_inds", "_key")
|
|
19
|
-
|
|
20
|
-
def __init__(self, tids, inds=()):
|
|
21
|
-
self._tids = tuple(tids)
|
|
22
|
-
self._inds = tuple(inds)
|
|
23
|
-
if len(self._tids) != len(self._inds) + 1:
|
|
24
|
-
raise ValueError("tids should be one longer than inds")
|
|
25
|
-
self._key = None
|
|
26
|
-
|
|
27
|
-
@classmethod
|
|
28
|
-
def from_sequence(self, it):
|
|
29
|
-
tids = []
|
|
30
|
-
inds = []
|
|
31
|
-
for x in it:
|
|
32
|
-
(tids if isinstance(x, int) else inds).append(x)
|
|
33
|
-
return NetworkPath(tids, inds)
|
|
34
|
-
|
|
35
|
-
@property
|
|
36
|
-
def tids(self):
|
|
37
|
-
return self._tids
|
|
38
|
-
|
|
39
|
-
@property
|
|
40
|
-
def inds(self):
|
|
41
|
-
return self._inds
|
|
42
|
-
|
|
43
|
-
@property
|
|
44
|
-
def key(self):
|
|
45
|
-
# build lazily as don't always need
|
|
46
|
-
if self._key is None:
|
|
47
|
-
self._key = frozenset(self._tids + self._inds)
|
|
48
|
-
return self._key
|
|
49
|
-
|
|
50
|
-
def __len__(self):
|
|
51
|
-
return len(self._inds)
|
|
52
|
-
|
|
53
|
-
def __iter__(self):
|
|
54
|
-
# interleave tids and inds
|
|
55
|
-
for tid, ind in zip(self._tids, self._inds):
|
|
56
|
-
yield tid
|
|
57
|
-
yield ind
|
|
58
|
-
# always one more tid
|
|
59
|
-
yield self._tids[-1]
|
|
60
|
-
|
|
61
|
-
def __contains__(self, x):
|
|
62
|
-
return x in self.key
|
|
63
|
-
|
|
64
|
-
def __hash__(self):
|
|
65
|
-
return hash(self.key)
|
|
66
|
-
|
|
67
|
-
def __repr__(self):
|
|
68
|
-
return f"NetworkPath({self._tids}, {self._inds})"
|
|
69
|
-
|
|
70
|
-
def extend(self, ind, tid):
|
|
71
|
-
"""Get a new path by extending this one with a new index and tensor id."""
|
|
72
|
-
new = NetworkPath.__new__(NetworkPath)
|
|
73
|
-
new._tids = self._tids + (tid,)
|
|
74
|
-
new._inds = self._inds + (ind,)
|
|
75
|
-
if self._key is not None:
|
|
76
|
-
new._key = self._key | {tid, ind}
|
|
77
|
-
return new
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
def istree(tn):
|
|
81
|
-
"""Check if this tensor network has a tree structure, (treating
|
|
82
|
-
multibonds as a single edge).
|
|
83
|
-
|
|
84
|
-
Examples
|
|
85
|
-
--------
|
|
86
|
-
|
|
87
|
-
>>> MPS_rand_state(10, 7).istree()
|
|
88
|
-
True
|
|
89
|
-
|
|
90
|
-
>>> MPS_rand_state(10, 7, cyclic=True).istree()
|
|
91
|
-
False
|
|
92
|
-
|
|
93
|
-
"""
|
|
94
|
-
tid0 = next(iter(tn.tensor_map))
|
|
95
|
-
region = [(tid0, None)]
|
|
96
|
-
seen = {tid0}
|
|
97
|
-
while region:
|
|
98
|
-
tid, ptid = region.pop()
|
|
99
|
-
for ntid in tn._get_neighbor_tids(tid):
|
|
100
|
-
if ntid == ptid:
|
|
101
|
-
# ignore the previous tid we just came from
|
|
102
|
-
continue
|
|
103
|
-
if ntid in seen:
|
|
104
|
-
# found a loop
|
|
105
|
-
return False
|
|
106
|
-
# expand the queue
|
|
107
|
-
region.append((ntid, tid))
|
|
108
|
-
seen.add(ntid)
|
|
109
|
-
return True
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
def isconnected(tn):
|
|
113
|
-
"""Check whether this tensor network is connected, i.e. whether
|
|
114
|
-
there is a path between any two tensors, (including size 1 indices).
|
|
115
|
-
"""
|
|
116
|
-
tid0 = next(iter(tn.tensor_map))
|
|
117
|
-
region = tn._get_subgraph_tids([tid0])
|
|
118
|
-
return len(region) == len(tn.tensor_map)
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
def subgraphs(tn, virtual=False):
|
|
122
|
-
"""Split this tensor network into disconneceted subgraphs.
|
|
123
|
-
|
|
124
|
-
Parameters
|
|
125
|
-
----------
|
|
126
|
-
virtual : bool, optional
|
|
127
|
-
Whether the tensor networks should view the original tensors or
|
|
128
|
-
not - by default take copies.
|
|
129
|
-
|
|
130
|
-
Returns
|
|
131
|
-
-------
|
|
132
|
-
list[TensorNetwork]
|
|
133
|
-
"""
|
|
134
|
-
groups = []
|
|
135
|
-
tids = oset(tn.tensor_map)
|
|
136
|
-
|
|
137
|
-
# check all nodes
|
|
138
|
-
while tids:
|
|
139
|
-
# get a remaining node
|
|
140
|
-
tid0 = tids.popright()
|
|
141
|
-
queue = [tid0]
|
|
142
|
-
group = oset(queue)
|
|
143
|
-
|
|
144
|
-
while queue:
|
|
145
|
-
# expand it until no neighbors
|
|
146
|
-
tid = queue.pop()
|
|
147
|
-
for tid_n in tn._get_neighbor_tids(tid):
|
|
148
|
-
if tid_n in group:
|
|
149
|
-
continue
|
|
150
|
-
else:
|
|
151
|
-
group.add(tid_n)
|
|
152
|
-
queue.append(tid_n)
|
|
153
|
-
|
|
154
|
-
# remove current subgraph and continue
|
|
155
|
-
tids -= group
|
|
156
|
-
groups.append(group)
|
|
157
|
-
|
|
158
|
-
return [tn._select_tids(group, virtual=virtual) for group in groups]
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
def get_tree_span(
|
|
162
|
-
tn,
|
|
163
|
-
tids,
|
|
164
|
-
min_distance=0,
|
|
165
|
-
max_distance=None,
|
|
166
|
-
include=None,
|
|
167
|
-
exclude=None,
|
|
168
|
-
ndim_sort="max",
|
|
169
|
-
distance_sort="min",
|
|
170
|
-
sorter=None,
|
|
171
|
-
weight_bonds=True,
|
|
172
|
-
inwards=True,
|
|
173
|
-
):
|
|
174
|
-
"""Generate a tree on the tensor network graph, fanning out from the
|
|
175
|
-
tensors identified by ``tids``, up to a maximum of ``max_distance``
|
|
176
|
-
away. The tree can be visualized with
|
|
177
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.draw_tree_span`.
|
|
178
|
-
|
|
179
|
-
Parameters
|
|
180
|
-
----------
|
|
181
|
-
tids : sequence of str
|
|
182
|
-
The nodes that define the region to span out of.
|
|
183
|
-
min_distance : int, optional
|
|
184
|
-
Don't add edges to the tree until this far from the region. For
|
|
185
|
-
example, ``1`` will not include the last merges from neighboring
|
|
186
|
-
tensors in the region defined by ``tids``.
|
|
187
|
-
max_distance : None or int, optional
|
|
188
|
-
Terminate branches once they reach this far away. If ``None`` there
|
|
189
|
-
is no limit,
|
|
190
|
-
include : sequence of str, optional
|
|
191
|
-
If specified, only ``tids`` specified here can be part of the tree.
|
|
192
|
-
exclude : sequence of str, optional
|
|
193
|
-
If specified, ``tids`` specified here cannot be part of the tree.
|
|
194
|
-
ndim_sort : {'min', 'max', 'none'}, optional
|
|
195
|
-
When expanding the tree, how to choose what nodes to expand to
|
|
196
|
-
next, once connectivity to the current surface has been taken into
|
|
197
|
-
account.
|
|
198
|
-
distance_sort : {'min', 'max', 'none'}, optional
|
|
199
|
-
When expanding the tree, how to choose what nodes to expand to
|
|
200
|
-
next, once connectivity to the current surface has been taken into
|
|
201
|
-
account.
|
|
202
|
-
weight_bonds : bool, optional
|
|
203
|
-
Whether to weight the 'connection' of a candidate tensor to expand
|
|
204
|
-
out to using bond size as well as number of bonds.
|
|
205
|
-
|
|
206
|
-
Returns
|
|
207
|
-
-------
|
|
208
|
-
list[(str, str, int)]
|
|
209
|
-
The ordered list of merges, each given as tuple ``(tid1, tid2, d)``
|
|
210
|
-
indicating merge ``tid1 -> tid2`` at distance ``d``.
|
|
211
|
-
|
|
212
|
-
See Also
|
|
213
|
-
--------
|
|
214
|
-
draw_tree_span
|
|
215
|
-
"""
|
|
216
|
-
# current tensors in the tree -> we will grow this
|
|
217
|
-
region = oset(tids)
|
|
218
|
-
|
|
219
|
-
# check if we should only allow a certain set of nodes
|
|
220
|
-
if include is None:
|
|
221
|
-
include = oset(tn.tensor_map)
|
|
222
|
-
elif not isinstance(include, oset):
|
|
223
|
-
include = oset(include)
|
|
224
|
-
|
|
225
|
-
allowed = include - region
|
|
226
|
-
|
|
227
|
-
# check if we should explicitly ignore some nodes
|
|
228
|
-
if exclude is not None:
|
|
229
|
-
if not isinstance(exclude, oset):
|
|
230
|
-
exclude = oset(exclude)
|
|
231
|
-
allowed -= exclude
|
|
232
|
-
|
|
233
|
-
# possible merges of neighbors into the region
|
|
234
|
-
candidates = []
|
|
235
|
-
|
|
236
|
-
# actual merges we have performed, defining the tree
|
|
237
|
-
merges = {}
|
|
238
|
-
|
|
239
|
-
# distance to the original region
|
|
240
|
-
distances = {tid: 0 for tid in region}
|
|
241
|
-
|
|
242
|
-
# how many times (or weight) that neighbors are connected to the region
|
|
243
|
-
connectivity = collections.defaultdict(lambda: 0)
|
|
244
|
-
|
|
245
|
-
# given equal connectivity compare neighbors based on
|
|
246
|
-
# min/max distance and min/max ndim
|
|
247
|
-
distance_coeff = {"min": -1, "max": 1, "none": 0}[distance_sort]
|
|
248
|
-
ndim_coeff = {"min": -1, "max": 1, "none": 0}[ndim_sort]
|
|
249
|
-
|
|
250
|
-
def _check_candidate(tid_surface, tid_neighb):
|
|
251
|
-
"""Check the expansion of ``tid_surface`` to ``tid_neighb``."""
|
|
252
|
-
if (tid_neighb in region) or (tid_neighb not in allowed):
|
|
253
|
-
# we've already absorbed it, or we're not allowed to
|
|
254
|
-
return
|
|
255
|
-
|
|
256
|
-
if tid_neighb not in distances:
|
|
257
|
-
# defines a new spanning tree edge
|
|
258
|
-
merges[tid_neighb] = tid_surface
|
|
259
|
-
# graph distance to original region
|
|
260
|
-
new_d = distances[tid_surface] + 1
|
|
261
|
-
distances[tid_neighb] = new_d
|
|
262
|
-
if (max_distance is None) or (new_d <= max_distance):
|
|
263
|
-
candidates.append(tid_neighb)
|
|
264
|
-
|
|
265
|
-
# keep track of how connected to the current surface potential new
|
|
266
|
-
# nodes are
|
|
267
|
-
if weight_bonds:
|
|
268
|
-
connectivity[tid_neighb] += math.log2(
|
|
269
|
-
tn.tensor_map[tid_surface].bonds_size(
|
|
270
|
-
tn.tensor_map[tid_neighb]
|
|
271
|
-
)
|
|
272
|
-
)
|
|
273
|
-
else:
|
|
274
|
-
connectivity[tid_neighb] += 1
|
|
275
|
-
|
|
276
|
-
if sorter is None:
|
|
277
|
-
|
|
278
|
-
def _sorter(t):
|
|
279
|
-
# how to pick which tensor to absorb into the expanding surface
|
|
280
|
-
# here, choose the candidate that is most connected to current
|
|
281
|
-
# surface, breaking ties with how close it is to the original
|
|
282
|
-
# tree, and how many dimensions it has
|
|
283
|
-
return (
|
|
284
|
-
connectivity[t],
|
|
285
|
-
ndim_coeff * tn.tensor_map[t].ndim,
|
|
286
|
-
distance_coeff * distances[t],
|
|
287
|
-
)
|
|
288
|
-
else:
|
|
289
|
-
_sorter = functools.partial(
|
|
290
|
-
sorter, tn=tn, distances=distances, connectivity=connectivity
|
|
291
|
-
)
|
|
292
|
-
|
|
293
|
-
# setup the initial region and candidate nodes to expand to
|
|
294
|
-
for tid_surface in region:
|
|
295
|
-
for tid_next in tn._get_neighbor_tids(tid_surface):
|
|
296
|
-
_check_candidate(tid_surface, tid_next)
|
|
297
|
-
|
|
298
|
-
# generate the sequence of tensor merges
|
|
299
|
-
seq = []
|
|
300
|
-
while candidates:
|
|
301
|
-
# choose the *highest* scoring candidate
|
|
302
|
-
candidates.sort(key=_sorter)
|
|
303
|
-
tid_surface = candidates.pop()
|
|
304
|
-
region.add(tid_surface)
|
|
305
|
-
|
|
306
|
-
if distances[tid_surface] > min_distance:
|
|
307
|
-
# checking distance allows the innermost merges to be ignored,
|
|
308
|
-
# for example, to contract an environment around a region
|
|
309
|
-
seq.append(
|
|
310
|
-
(tid_surface, merges[tid_surface], distances[tid_surface])
|
|
311
|
-
)
|
|
312
|
-
|
|
313
|
-
# check all the neighbors of the tensor we've just expanded to
|
|
314
|
-
for tid_next in tn._get_neighbor_tids(tid_surface):
|
|
315
|
-
_check_candidate(tid_surface, tid_next)
|
|
316
|
-
|
|
317
|
-
if inwards:
|
|
318
|
-
# make the sequence of merges flow inwards
|
|
319
|
-
seq.reverse()
|
|
320
|
-
|
|
321
|
-
return seq
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
def get_path_between_tids(tn, tida, tidb):
|
|
325
|
-
"""Find a shortest path between ``tida`` and ``tidb`` in this tensor
|
|
326
|
-
network. Returns a ``NetworkPath`` if a path is found, otherwise ``None``.
|
|
327
|
-
|
|
328
|
-
Currently ignores dangling and hyper indices.
|
|
329
|
-
|
|
330
|
-
Parameters
|
|
331
|
-
----------
|
|
332
|
-
tn : TensorNetwork
|
|
333
|
-
The tensor network to find a path in.
|
|
334
|
-
tida : int
|
|
335
|
-
The tensor id to start from.
|
|
336
|
-
tidb : int
|
|
337
|
-
The tensor id to end at.
|
|
338
|
-
|
|
339
|
-
Returns
|
|
340
|
-
-------
|
|
341
|
-
NetworkPath or None
|
|
342
|
-
"""
|
|
343
|
-
# expand from both points
|
|
344
|
-
path_a0 = NetworkPath((tida,))
|
|
345
|
-
path_b0 = NetworkPath((tidb,))
|
|
346
|
-
queue_a = collections.deque((path_a0,))
|
|
347
|
-
queue_b = collections.deque((path_b0,))
|
|
348
|
-
# track ends of path so we identify when they meet
|
|
349
|
-
# also acts a store for shortest path to that point
|
|
350
|
-
ends_a = {tida: path_a0}
|
|
351
|
-
ends_b = {tidb: path_b0}
|
|
352
|
-
|
|
353
|
-
while queue_a or queue_b:
|
|
354
|
-
for queue, ends_this, ends_other in [
|
|
355
|
-
(queue_a, ends_a, ends_b),
|
|
356
|
-
(queue_b, ends_b, ends_a),
|
|
357
|
-
]:
|
|
358
|
-
if not queue:
|
|
359
|
-
# no possible path
|
|
360
|
-
return None
|
|
361
|
-
|
|
362
|
-
path = queue.popleft()
|
|
363
|
-
|
|
364
|
-
# get the tensor at the current end of the path
|
|
365
|
-
last_tid = path.tids[-1]
|
|
366
|
-
t = tn.tensor_map[last_tid]
|
|
367
|
-
|
|
368
|
-
# check ways we could extend it
|
|
369
|
-
for next_ind in t.inds:
|
|
370
|
-
if next_ind in path:
|
|
371
|
-
# don't go back on ourselves
|
|
372
|
-
continue
|
|
373
|
-
|
|
374
|
-
tids = tn.ind_map[next_ind]
|
|
375
|
-
if len(tids) != 2:
|
|
376
|
-
# ignore dangling and hyper indices
|
|
377
|
-
continue
|
|
378
|
-
|
|
379
|
-
next_tid = next(tid for tid in tids if tid != last_tid)
|
|
380
|
-
|
|
381
|
-
if next_tid in ends_this:
|
|
382
|
-
# already been here in shorter or equal path
|
|
383
|
-
continue
|
|
384
|
-
|
|
385
|
-
if next_tid in ends_other:
|
|
386
|
-
# found valid complete path!
|
|
387
|
-
other_path = ends_other[next_tid]
|
|
388
|
-
|
|
389
|
-
# want path to go from tida -> tidb
|
|
390
|
-
if queue is queue_a:
|
|
391
|
-
return NetworkPath(
|
|
392
|
-
tids=path.tids + other_path.tids[::-1],
|
|
393
|
-
inds=path.inds
|
|
394
|
-
+ (next_ind,)
|
|
395
|
-
+ other_path.inds[::-1],
|
|
396
|
-
)
|
|
397
|
-
else:
|
|
398
|
-
return NetworkPath(
|
|
399
|
-
tids=other_path.tids + path.tids[::-1],
|
|
400
|
-
inds=other_path.inds
|
|
401
|
-
+ (next_ind,)
|
|
402
|
-
+ path.inds[::-1],
|
|
403
|
-
)
|
|
404
|
-
|
|
405
|
-
# valid partial path
|
|
406
|
-
next_path = path.extend(next_ind, next_tid)
|
|
407
|
-
ends_this[next_tid] = next_path
|
|
408
|
-
queue.append(next_path)
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
def gen_all_paths_between_tids(tn, tida, tidb):
|
|
412
|
-
"""Generate all shortest paths between ``tida`` and ``tidb`` in this
|
|
413
|
-
tensor network. Returns a generator of ``NetworkPath`` objects, ignores
|
|
414
|
-
dangling and hyper indices currently.
|
|
415
|
-
|
|
416
|
-
Parameters
|
|
417
|
-
----------
|
|
418
|
-
tn : TensorNetwork
|
|
419
|
-
The tensor network to find paths in.
|
|
420
|
-
tida : int
|
|
421
|
-
The tensor id to start from.
|
|
422
|
-
tidb : int
|
|
423
|
-
The tensor id to end at.
|
|
424
|
-
|
|
425
|
-
Yields
|
|
426
|
-
------
|
|
427
|
-
NetworkPath
|
|
428
|
-
"""
|
|
429
|
-
# map of only those neighbors which contribute to shortest paths
|
|
430
|
-
predecessors = {}
|
|
431
|
-
distances = {tidb: 0}
|
|
432
|
-
queue = collections.deque([(tidb, 0)])
|
|
433
|
-
found_start = False
|
|
434
|
-
|
|
435
|
-
while queue:
|
|
436
|
-
# get possible path extension, BFS
|
|
437
|
-
last_tid, length = queue.popleft()
|
|
438
|
-
|
|
439
|
-
# check ways we could extend it
|
|
440
|
-
t = tn.tensor_map[last_tid]
|
|
441
|
-
for next_ind in t.inds:
|
|
442
|
-
tids = tn.ind_map[next_ind]
|
|
443
|
-
if len(tids) != 2:
|
|
444
|
-
# ignore dangling and hyper indices
|
|
445
|
-
continue
|
|
446
|
-
next_tid = next(tid for tid in tids if tid != last_tid)
|
|
447
|
-
|
|
448
|
-
if next_tid == tida:
|
|
449
|
-
found_start = True
|
|
450
|
-
|
|
451
|
-
d = distances.get(next_tid, None)
|
|
452
|
-
if d is None:
|
|
453
|
-
# first time reaching this node
|
|
454
|
-
distances[next_tid] = length + 1
|
|
455
|
-
predecessors[next_tid] = [(last_tid, next_ind)]
|
|
456
|
-
if not found_start:
|
|
457
|
-
# BFS search, so once we have found target, all
|
|
458
|
-
# possible paths will be in the queue already
|
|
459
|
-
queue.append((next_tid, length + 1))
|
|
460
|
-
elif length < d:
|
|
461
|
-
# another shortest path, just update predecessors
|
|
462
|
-
# since extentions handled by case above
|
|
463
|
-
predecessors[next_tid].append((last_tid, next_ind))
|
|
464
|
-
|
|
465
|
-
# back track to find all paths
|
|
466
|
-
queue = [NetworkPath([tida])]
|
|
467
|
-
while queue:
|
|
468
|
-
# this part can be DFS
|
|
469
|
-
path = queue.pop()
|
|
470
|
-
last_tid = path.tids[-1]
|
|
471
|
-
for next_tid, next_ind in predecessors[last_tid]:
|
|
472
|
-
new_path = path.extend(next_ind, next_tid)
|
|
473
|
-
if next_tid == tidb:
|
|
474
|
-
# reached the start
|
|
475
|
-
yield new_path
|
|
476
|
-
else:
|
|
477
|
-
queue.append(new_path)
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
def gen_paths_loops(
|
|
481
|
-
tn,
|
|
482
|
-
max_loop_length=None,
|
|
483
|
-
intersect=False,
|
|
484
|
-
tids=None,
|
|
485
|
-
inds=None,
|
|
486
|
-
paths=None,
|
|
487
|
-
):
|
|
488
|
-
"""Generate all paths, up to a specified length, that represent loops in
|
|
489
|
-
this tensor network. Unlike ``gen_loops`` this function will yield a
|
|
490
|
-
`NetworkPath` objects, allowing one to differentiate between e.g. a double
|
|
491
|
-
loop and a 'figure of eight' loop. Dangling and hyper indices are ignored.
|
|
492
|
-
|
|
493
|
-
Currently ignores dangling and hyper indices.
|
|
494
|
-
|
|
495
|
-
Parameters
|
|
496
|
-
----------
|
|
497
|
-
tn : TensorNetwork
|
|
498
|
-
The tensor network to find loops in.
|
|
499
|
-
max_loop_length : None or int
|
|
500
|
-
Set the maximum number of indices that can appear in a loop. If
|
|
501
|
-
``None``, wait until any loop is found and set that as the maximum
|
|
502
|
-
length.
|
|
503
|
-
intersect : bool, optional
|
|
504
|
-
Whether to allow self-intersecting loops.
|
|
505
|
-
tids : None or sequence of int, optional
|
|
506
|
-
If supplied, only consider loops containing one of these tensor ids.
|
|
507
|
-
inds : None or sequence of str, optional
|
|
508
|
-
If supplied, only consider loops containing one of these indices.
|
|
509
|
-
paths : None or sequence of NetworkPath, optional
|
|
510
|
-
If supplied, only consider loops starting from these paths.
|
|
511
|
-
|
|
512
|
-
Yields
|
|
513
|
-
------
|
|
514
|
-
NetworkPath
|
|
515
|
-
|
|
516
|
-
See Also
|
|
517
|
-
--------
|
|
518
|
-
gen_loops, gen_inds_connected
|
|
519
|
-
"""
|
|
520
|
-
queue = collections.deque()
|
|
521
|
-
|
|
522
|
-
if isinstance(tids, int):
|
|
523
|
-
# allow single tid to be passed
|
|
524
|
-
tids = (tids,)
|
|
525
|
-
if isinstance(inds, str):
|
|
526
|
-
# allow single index to be passed
|
|
527
|
-
inds = (inds,)
|
|
528
|
-
|
|
529
|
-
if (tids is None) and (inds is None) and (paths is None):
|
|
530
|
-
# default to finding loops everywhere
|
|
531
|
-
inds = tn.ind_map
|
|
532
|
-
|
|
533
|
-
if tids is not None:
|
|
534
|
-
# generate loops starting at any of these tids
|
|
535
|
-
for tid in tids:
|
|
536
|
-
queue.append(NetworkPath([tid]))
|
|
537
|
-
|
|
538
|
-
if inds is not None:
|
|
539
|
-
# generate loops passing through any of these indices
|
|
540
|
-
for ind in inds:
|
|
541
|
-
tids = tn.ind_map[ind]
|
|
542
|
-
if len(tids) != 2:
|
|
543
|
-
# ignore dangling and hyper indices
|
|
544
|
-
continue
|
|
545
|
-
|
|
546
|
-
tida, tidb = tids
|
|
547
|
-
# (only need one direction)
|
|
548
|
-
queue.append(NetworkPath((tida, tidb), (ind,)))
|
|
549
|
-
|
|
550
|
-
if paths is not None:
|
|
551
|
-
# generate loops starting from these paths
|
|
552
|
-
for path in paths:
|
|
553
|
-
if not isinstance(path, NetworkPath):
|
|
554
|
-
path = NetworkPath.from_sequence(path)
|
|
555
|
-
queue.append(path)
|
|
556
|
-
|
|
557
|
-
# cache index neighbor lookups for speed
|
|
558
|
-
neighbormap = {}
|
|
559
|
-
seen = set()
|
|
560
|
-
|
|
561
|
-
while queue:
|
|
562
|
-
path = queue.popleft()
|
|
563
|
-
|
|
564
|
-
if intersect:
|
|
565
|
-
# might have formed a closed loop, then it matter where we are
|
|
566
|
-
# continuing from, so key on both ends
|
|
567
|
-
search_key = (path.key, frozenset((path.tids[0], path.tids[-1])))
|
|
568
|
-
else:
|
|
569
|
-
# set of tids and inds is unique for non-intersecting loops
|
|
570
|
-
search_key = path.key
|
|
571
|
-
|
|
572
|
-
if search_key in seen:
|
|
573
|
-
continue
|
|
574
|
-
seen.add(search_key)
|
|
575
|
-
|
|
576
|
-
last_tid = path.tids[-1]
|
|
577
|
-
try:
|
|
578
|
-
last_ind = path.inds[-1]
|
|
579
|
-
except IndexError:
|
|
580
|
-
# path is a single tid, no indices
|
|
581
|
-
last_ind = None
|
|
582
|
-
|
|
583
|
-
try:
|
|
584
|
-
expansions = neighbormap[last_ind, last_tid]
|
|
585
|
-
except KeyError:
|
|
586
|
-
# check which ways we can continue this path
|
|
587
|
-
possible_inds = tn.tensor_map[last_tid].inds
|
|
588
|
-
expansions = []
|
|
589
|
-
for next_ind in possible_inds:
|
|
590
|
-
# don't come back the way we came
|
|
591
|
-
if next_ind != last_ind:
|
|
592
|
-
next_ind_tids = tn.ind_map[next_ind]
|
|
593
|
-
# only consider normal bonds
|
|
594
|
-
if len(next_ind_tids) == 2:
|
|
595
|
-
# get the tid which isn't the direction we came
|
|
596
|
-
next_tid, next_tid_b = next_ind_tids
|
|
597
|
-
if next_tid == last_tid:
|
|
598
|
-
next_tid = next_tid_b
|
|
599
|
-
expansions.append((next_ind, next_tid))
|
|
600
|
-
|
|
601
|
-
# cache this lookup
|
|
602
|
-
neighbormap[last_ind, last_tid] = expansions
|
|
603
|
-
|
|
604
|
-
continue_search = (max_loop_length is None) or (
|
|
605
|
-
len(path) < max_loop_length - 1
|
|
606
|
-
)
|
|
607
|
-
|
|
608
|
-
for next_ind, next_tid in expansions:
|
|
609
|
-
if next_ind in path:
|
|
610
|
-
# can't ever double up on indices
|
|
611
|
-
continue
|
|
612
|
-
|
|
613
|
-
if next_tid == path.tids[0]:
|
|
614
|
-
# finished a loop!
|
|
615
|
-
|
|
616
|
-
loop = path.extend(next_ind, next_tid)
|
|
617
|
-
if loop.key not in seen:
|
|
618
|
-
seen.add(loop.key)
|
|
619
|
-
if max_loop_length is None:
|
|
620
|
-
max_loop_length = len(loop)
|
|
621
|
-
|
|
622
|
-
# normalize the loop to be consistent across searches
|
|
623
|
-
# yield _normalize_loop(loop)
|
|
624
|
-
yield loop
|
|
625
|
-
|
|
626
|
-
if continue_search and (intersect or next_tid not in path):
|
|
627
|
-
# valid candidate extension!
|
|
628
|
-
# -> we can double up on nodes only if intersecting
|
|
629
|
-
queue.append(path.extend(next_ind, next_tid))
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
def gen_regions(tn, max_region_size=None, tids=None, which="all"):
|
|
633
|
-
"""Generate sets of tids that represent 'regions' where every node is
|
|
634
|
-
connected to at least two other region nodes, i.e. 2-degree connected
|
|
635
|
-
subgraphs.
|
|
636
|
-
|
|
637
|
-
Parameters
|
|
638
|
-
----------
|
|
639
|
-
tn : TensorNetwork
|
|
640
|
-
The tensor network to find regions in.
|
|
641
|
-
max_region_size : None or int
|
|
642
|
-
Set the maximum number of tensors that can appear in a region. If
|
|
643
|
-
``None``, wait until any valid region is found and set that as the
|
|
644
|
-
maximum size.
|
|
645
|
-
tids : None or sequence of int, optional
|
|
646
|
-
If supplied, only yield regions containing these tids, see
|
|
647
|
-
``which``.
|
|
648
|
-
which : {'all', 'any'}, optional
|
|
649
|
-
Only if ``tids`` is specified, this determines how to filter
|
|
650
|
-
regions. If 'all', only yield regions containing *all* of the tids
|
|
651
|
-
in ``tids``, if 'any', yield regions containing *any* of the tids
|
|
652
|
-
in ``tids``.
|
|
653
|
-
|
|
654
|
-
Yields
|
|
655
|
-
------
|
|
656
|
-
tuple[int]
|
|
657
|
-
"""
|
|
658
|
-
if tids is None:
|
|
659
|
-
# find regions anywhere
|
|
660
|
-
tids = tn.tensor_map.keys()
|
|
661
|
-
which = "any"
|
|
662
|
-
elif isinstance(tids, int):
|
|
663
|
-
# handle single tid region
|
|
664
|
-
tids = (tids,)
|
|
665
|
-
|
|
666
|
-
if which == "all":
|
|
667
|
-
# take `tids` as single initial region
|
|
668
|
-
queue = collections.deque((frozenset(tids),))
|
|
669
|
-
elif which == "any":
|
|
670
|
-
# take each tid as an initial region
|
|
671
|
-
queue = collections.deque(frozenset([tid]) for tid in tids)
|
|
672
|
-
else:
|
|
673
|
-
raise ValueError("`which` must be 'all' or 'any'.")
|
|
674
|
-
|
|
675
|
-
# cache neighbors for speed
|
|
676
|
-
neighbormap = {}
|
|
677
|
-
seen = set()
|
|
678
|
-
|
|
679
|
-
while queue:
|
|
680
|
-
region = queue.popleft()
|
|
681
|
-
inner = {}
|
|
682
|
-
outer = set()
|
|
683
|
-
|
|
684
|
-
for tid in region:
|
|
685
|
-
try:
|
|
686
|
-
neighbors = neighbormap[tid]
|
|
687
|
-
except KeyError:
|
|
688
|
-
neighbors = tn._get_neighbor_tids([tid])
|
|
689
|
-
neighbormap[tid] = neighbors
|
|
690
|
-
|
|
691
|
-
for ntid in neighbors:
|
|
692
|
-
if ntid in region:
|
|
693
|
-
# count inner connections to check for dangling nodes
|
|
694
|
-
inner[tid] = inner.setdefault(tid, 0) + 1
|
|
695
|
-
inner[ntid] = inner.setdefault(ntid, 0) + 1
|
|
696
|
-
else:
|
|
697
|
-
# check outer connections for extending region
|
|
698
|
-
outer.add(ntid)
|
|
699
|
-
|
|
700
|
-
if inner and all(c >= 4 for c in inner.values()):
|
|
701
|
-
# valid region: no node is connected by a single bond only
|
|
702
|
-
# (bonds are double counted in above so 4 == 2-connected)
|
|
703
|
-
if max_region_size is None:
|
|
704
|
-
# automatically set maximum region size
|
|
705
|
-
max_region_size = len(region)
|
|
706
|
-
yield tuple(sorted(region))
|
|
707
|
-
|
|
708
|
-
if (max_region_size is None) or len(region) < max_region_size:
|
|
709
|
-
# continue searching
|
|
710
|
-
for ntid in outer:
|
|
711
|
-
# possible extensions
|
|
712
|
-
nregion = region | {ntid}
|
|
713
|
-
if nregion not in seen:
|
|
714
|
-
# many ways to construct a region -> only check one
|
|
715
|
-
queue.append(nregion)
|
|
716
|
-
seen.add(nregion)
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
def gen_loops(tn, max_loop_length=None):
|
|
720
|
-
"""Generate sequences of tids that represent loops in the TN.
|
|
721
|
-
|
|
722
|
-
Parameters
|
|
723
|
-
----------
|
|
724
|
-
max_loop_length : None or int
|
|
725
|
-
Set the maximum number of tensors that can appear in a loop. If
|
|
726
|
-
``None``, wait until any loop is found and set that as the
|
|
727
|
-
maximum length.
|
|
728
|
-
|
|
729
|
-
Yields
|
|
730
|
-
------
|
|
731
|
-
tuple[int]
|
|
732
|
-
|
|
733
|
-
See Also
|
|
734
|
-
--------
|
|
735
|
-
gen_paths_loops
|
|
736
|
-
"""
|
|
737
|
-
from cotengra.core import get_hypergraph
|
|
738
|
-
|
|
739
|
-
inputs = {tid: t.inds for tid, t in tn.tensor_map.items()}
|
|
740
|
-
hg = get_hypergraph(inputs, accel="auto")
|
|
741
|
-
return hg.compute_loops(max_loop_length=max_loop_length)
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
def gen_inds_connected(tn, max_length):
|
|
745
|
-
"""Generate all index 'patches' of size up to ``max_length``.
|
|
746
|
-
|
|
747
|
-
Parameters
|
|
748
|
-
----------
|
|
749
|
-
max_length : int
|
|
750
|
-
The maximum number of indices in the patch.
|
|
751
|
-
|
|
752
|
-
Yields
|
|
753
|
-
------
|
|
754
|
-
tuple[str]
|
|
755
|
-
|
|
756
|
-
See Also
|
|
757
|
-
--------
|
|
758
|
-
gen_paths_loops
|
|
759
|
-
"""
|
|
760
|
-
queue = [(ix,) for ix in tn.ind_map]
|
|
761
|
-
seen = {frozenset(s) for s in queue}
|
|
762
|
-
while queue:
|
|
763
|
-
s = queue.pop()
|
|
764
|
-
if len(s) == max_length:
|
|
765
|
-
continue
|
|
766
|
-
expansions = tn._get_neighbor_inds(s)
|
|
767
|
-
for ix in expansions:
|
|
768
|
-
next_s = s + (ix,)
|
|
769
|
-
key = frozenset(next_s)
|
|
770
|
-
if key not in seen:
|
|
771
|
-
# new string
|
|
772
|
-
yield next_s
|
|
773
|
-
seen.add(key)
|
|
774
|
-
queue.append(next_s)
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
def tids_are_connected(tn, tids):
|
|
778
|
-
"""Check whether nodes ``tids`` are connected.
|
|
779
|
-
|
|
780
|
-
Parameters
|
|
781
|
-
----------
|
|
782
|
-
tn : TensorNetwork
|
|
783
|
-
The tensor network to check.
|
|
784
|
-
tids : sequence of int
|
|
785
|
-
Nodes to check.
|
|
786
|
-
|
|
787
|
-
Returns
|
|
788
|
-
-------
|
|
789
|
-
bool
|
|
790
|
-
"""
|
|
791
|
-
enum = range(len(tids))
|
|
792
|
-
groups = dict(zip(enum, enum))
|
|
793
|
-
regions = [(oset([tid]), tn._get_neighbor_tids(tid)) for tid in tids]
|
|
794
|
-
for i, j in itertools.combinations(enum, 2):
|
|
795
|
-
mi = groups.get(i, i)
|
|
796
|
-
mj = groups.get(j, j)
|
|
797
|
-
|
|
798
|
-
if regions[mi][0] & regions[mj][1]:
|
|
799
|
-
groups[mj] = mi
|
|
800
|
-
regions[mi][0].update(regions[mj][0])
|
|
801
|
-
regions[mi][1].update(regions[mj][1])
|
|
802
|
-
|
|
803
|
-
return len(set(groups.values())) == 1
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
def compute_shortest_distances(tn, tids=None, exclude_inds=()):
|
|
807
|
-
"""Compute the minimum graph distances between all or some nodes
|
|
808
|
-
``tids``.
|
|
809
|
-
|
|
810
|
-
Parameters
|
|
811
|
-
----------
|
|
812
|
-
tn : TensorNetwork
|
|
813
|
-
The tensor network to compute distances in.
|
|
814
|
-
tids : None or sequence of int, optional
|
|
815
|
-
If supplied, only compute distances between these nodes.
|
|
816
|
-
exclude_inds : sequence of str, optional
|
|
817
|
-
Exclude these indices when computing distances.
|
|
818
|
-
|
|
819
|
-
Returns
|
|
820
|
-
-------
|
|
821
|
-
dict[tuple[int, int], int]
|
|
822
|
-
"""
|
|
823
|
-
if tids is None:
|
|
824
|
-
tids = tn.tensor_map
|
|
825
|
-
else:
|
|
826
|
-
tids = set(tids)
|
|
827
|
-
|
|
828
|
-
visitors = collections.defaultdict(frozenset)
|
|
829
|
-
for tid in tids:
|
|
830
|
-
# start with target tids having 'visited' themselves only
|
|
831
|
-
visitors[tid] = frozenset([tid])
|
|
832
|
-
|
|
833
|
-
distances = {}
|
|
834
|
-
N = math.comb(len(tids), 2)
|
|
835
|
-
|
|
836
|
-
for d in itertools.count(1):
|
|
837
|
-
any_change = False
|
|
838
|
-
old_visitors = visitors.copy()
|
|
839
|
-
|
|
840
|
-
# only need to iterate over touched region
|
|
841
|
-
for tid in tuple(visitors):
|
|
842
|
-
# at each step, each node sends its current visitors to all
|
|
843
|
-
# neighboring nodes
|
|
844
|
-
current_visitors = old_visitors[tid]
|
|
845
|
-
for next_tid in tn._get_neighbor_tids(tid, exclude_inds):
|
|
846
|
-
visitors[next_tid] |= current_visitors
|
|
847
|
-
|
|
848
|
-
for tid in tuple(visitors):
|
|
849
|
-
# check for new visitors -> those with shortest path d
|
|
850
|
-
for diff_tid in visitors[tid] - old_visitors[tid]:
|
|
851
|
-
any_change = True
|
|
852
|
-
if (tid in tids) and (diff_tid in tids) and (tid < diff_tid):
|
|
853
|
-
distances[tid, diff_tid] = d
|
|
854
|
-
|
|
855
|
-
if (len(distances) == N) or (not any_change):
|
|
856
|
-
# all pair combinations have been computed, or everything
|
|
857
|
-
# converged, presumably due to disconnected subgraphs
|
|
858
|
-
break
|
|
859
|
-
|
|
860
|
-
return distances
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
def compute_hierarchical_linkage(
|
|
864
|
-
tn,
|
|
865
|
-
tids=None,
|
|
866
|
-
method="weighted",
|
|
867
|
-
optimal_ordering=True,
|
|
868
|
-
exclude_inds=(),
|
|
869
|
-
):
|
|
870
|
-
from scipy.cluster import hierarchy
|
|
871
|
-
|
|
872
|
-
if tids is None:
|
|
873
|
-
tids = tn.tensor_map
|
|
874
|
-
|
|
875
|
-
try:
|
|
876
|
-
from cotengra import get_hypergraph
|
|
877
|
-
|
|
878
|
-
hg = get_hypergraph(
|
|
879
|
-
{tid: t.inds for tid, t in tn.tensor_map.items()},
|
|
880
|
-
accel="auto",
|
|
881
|
-
)
|
|
882
|
-
for ix in exclude_inds:
|
|
883
|
-
hg.remove_edge(ix)
|
|
884
|
-
y = hg.all_shortest_distances_condensed(tuple(tids))
|
|
885
|
-
return hierarchy.linkage(
|
|
886
|
-
y, method=method, optimal_ordering=optimal_ordering
|
|
887
|
-
)
|
|
888
|
-
except ImportError:
|
|
889
|
-
pass
|
|
890
|
-
|
|
891
|
-
distances = tn.compute_shortest_distances(tids, exclude_inds)
|
|
892
|
-
|
|
893
|
-
dinf = 10 * tn.num_tensors
|
|
894
|
-
y = [
|
|
895
|
-
distances.get(tuple(sorted((i, j))), dinf)
|
|
896
|
-
for i, j in itertools.combinations(tids, 2)
|
|
897
|
-
]
|
|
898
|
-
|
|
899
|
-
return hierarchy.linkage(
|
|
900
|
-
y, method=method, optimal_ordering=optimal_ordering
|
|
901
|
-
)
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
def compute_hierarchical_ssa_path(
|
|
905
|
-
tn,
|
|
906
|
-
tids=None,
|
|
907
|
-
method="weighted",
|
|
908
|
-
optimal_ordering=True,
|
|
909
|
-
exclude_inds=(),
|
|
910
|
-
are_sorted=False,
|
|
911
|
-
linkage=None,
|
|
912
|
-
):
|
|
913
|
-
"""Compute a hierarchical grouping of ``tids``, as a ``ssa_path``."""
|
|
914
|
-
if linkage is None:
|
|
915
|
-
linkage = tn.compute_hierarchical_linkage(
|
|
916
|
-
tids,
|
|
917
|
-
method=method,
|
|
918
|
-
exclude_inds=exclude_inds,
|
|
919
|
-
optimal_ordering=optimal_ordering,
|
|
920
|
-
)
|
|
921
|
-
|
|
922
|
-
sorted_ssa_path = ((int(x[0]), int(x[1])) for x in linkage)
|
|
923
|
-
if are_sorted:
|
|
924
|
-
return tuple(sorted_ssa_path)
|
|
925
|
-
|
|
926
|
-
if tids is None:
|
|
927
|
-
tids = tn.tensor_map
|
|
928
|
-
given_idx = {tid: i for i, tid in enumerate(tids)}
|
|
929
|
-
sorted_to_given_idx = {
|
|
930
|
-
i: given_idx[tid] for i, tid in enumerate(sorted(tids))
|
|
931
|
-
}
|
|
932
|
-
return tuple(
|
|
933
|
-
(sorted_to_given_idx.get(x, x), sorted_to_given_idx.get(y, y))
|
|
934
|
-
for x, y in sorted_ssa_path
|
|
935
|
-
)
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
def compute_hierarchical_ordering(
|
|
939
|
-
tn,
|
|
940
|
-
tids=None,
|
|
941
|
-
method="weighted",
|
|
942
|
-
optimal_ordering=True,
|
|
943
|
-
exclude_inds=(),
|
|
944
|
-
linkage=None,
|
|
945
|
-
):
|
|
946
|
-
"""Compute a hierarchical ordering of ``tids``."""
|
|
947
|
-
from scipy.cluster import hierarchy
|
|
948
|
-
|
|
949
|
-
if tids is None:
|
|
950
|
-
tids = list(tn.tensor_map)
|
|
951
|
-
|
|
952
|
-
if linkage is None:
|
|
953
|
-
linkage = tn.compute_hierarchical_linkage(
|
|
954
|
-
tids,
|
|
955
|
-
method=method,
|
|
956
|
-
exclude_inds=exclude_inds,
|
|
957
|
-
optimal_ordering=optimal_ordering,
|
|
958
|
-
)
|
|
959
|
-
|
|
960
|
-
node2tid = {i: tid for i, tid in enumerate(sorted(tids))}
|
|
961
|
-
return tuple(map(node2tid.__getitem__, hierarchy.leaves_list(linkage)))
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
def compute_hierarchical_grouping(
|
|
965
|
-
tn,
|
|
966
|
-
max_group_size,
|
|
967
|
-
tids=None,
|
|
968
|
-
method="weighted",
|
|
969
|
-
optimal_ordering=True,
|
|
970
|
-
exclude_inds=(),
|
|
971
|
-
linkage=None,
|
|
972
|
-
):
|
|
973
|
-
"""Group ``tids`` (by default, all tensors) into groups of size
|
|
974
|
-
``max_group_size`` or less, using a hierarchical clustering.
|
|
975
|
-
"""
|
|
976
|
-
if tids is None:
|
|
977
|
-
tids = list(tn.tensor_map)
|
|
978
|
-
|
|
979
|
-
tids = sorted(tids)
|
|
980
|
-
|
|
981
|
-
if linkage is None:
|
|
982
|
-
linkage = tn.compute_hierarchical_linkage(
|
|
983
|
-
tids,
|
|
984
|
-
method=method,
|
|
985
|
-
exclude_inds=exclude_inds,
|
|
986
|
-
optimal_ordering=optimal_ordering,
|
|
987
|
-
)
|
|
988
|
-
|
|
989
|
-
ssa_path = tn.compute_hierarchical_ssa_path(
|
|
990
|
-
tids=tids,
|
|
991
|
-
method=method,
|
|
992
|
-
exclude_inds=exclude_inds,
|
|
993
|
-
are_sorted=True,
|
|
994
|
-
linkage=linkage,
|
|
995
|
-
)
|
|
996
|
-
|
|
997
|
-
# follow ssa_path, agglomerating groups as long they small enough
|
|
998
|
-
groups = {i: (tid,) for i, tid in enumerate(tids)}
|
|
999
|
-
ssa = len(tids) - 1
|
|
1000
|
-
for i, j in ssa_path:
|
|
1001
|
-
ssa += 1
|
|
1002
|
-
|
|
1003
|
-
if (i not in groups) or (j not in groups):
|
|
1004
|
-
# children already too big
|
|
1005
|
-
continue
|
|
1006
|
-
|
|
1007
|
-
if len(groups[i]) + len(groups[j]) > max_group_size:
|
|
1008
|
-
# too big, skip
|
|
1009
|
-
continue
|
|
1010
|
-
|
|
1011
|
-
# merge groups
|
|
1012
|
-
groups[ssa] = groups.pop(i) + groups.pop(j)
|
|
1013
|
-
|
|
1014
|
-
# now sort groups by when their nodes in leaf ordering
|
|
1015
|
-
ordering = tn.compute_hierarchical_ordering(
|
|
1016
|
-
tids=tids,
|
|
1017
|
-
method=method,
|
|
1018
|
-
exclude_inds=exclude_inds,
|
|
1019
|
-
optimal_ordering=optimal_ordering,
|
|
1020
|
-
linkage=linkage,
|
|
1021
|
-
)
|
|
1022
|
-
score = {tid: i for i, tid in enumerate(ordering)}
|
|
1023
|
-
groups = sorted(
|
|
1024
|
-
groups.items(), key=lambda kv: sum(map(score.__getitem__, kv[1]))
|
|
1025
|
-
)
|
|
1026
|
-
|
|
1027
|
-
return tuple(kv[1] for kv in groups)
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
def compute_centralities(tn):
|
|
1031
|
-
"""Compute a simple centrality measure for each tensor in the network. The
|
|
1032
|
-
values go from 0 to 1, with 1 being the most central tensor.
|
|
1033
|
-
|
|
1034
|
-
Parameters
|
|
1035
|
-
----------
|
|
1036
|
-
tn : TensorNetwork
|
|
1037
|
-
The tensor network to compute centralities for.
|
|
1038
|
-
|
|
1039
|
-
Returns
|
|
1040
|
-
-------
|
|
1041
|
-
dict[int, float]
|
|
1042
|
-
"""
|
|
1043
|
-
import cotengra as ctg
|
|
1044
|
-
|
|
1045
|
-
hg = ctg.get_hypergraph({tid: t.inds for tid, t in tn.tensor_map.items()})
|
|
1046
|
-
return hg.simple_centrality()
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
def most_central_tid(tn):
|
|
1050
|
-
"""Find the most central tensor in the network."""
|
|
1051
|
-
cents = tn.compute_centralities()
|
|
1052
|
-
return max((score, tid) for tid, score in cents.items())[1]
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
def least_central_tid(tn):
|
|
1056
|
-
"""Find the least central tensor in the network."""
|
|
1057
|
-
cents = tn.compute_centralities()
|
|
1058
|
-
return min((score, tid) for tid, score in cents.items())[1]
|