Trajectree 0.0.1__py3-none-any.whl → 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- trajectree/__init__.py +0 -3
- trajectree/fock_optics/devices.py +1 -1
- trajectree/fock_optics/light_sources.py +2 -2
- trajectree/fock_optics/measurement.py +3 -3
- trajectree/fock_optics/utils.py +6 -6
- trajectree/trajectory.py +2 -2
- {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/METADATA +2 -3
- trajectree-0.0.2.dist-info/RECORD +16 -0
- trajectree/quimb/docs/_pygments/_pygments_dark.py +0 -118
- trajectree/quimb/docs/_pygments/_pygments_light.py +0 -118
- trajectree/quimb/docs/conf.py +0 -158
- trajectree/quimb/docs/examples/ex_mpi_expm_evo.py +0 -62
- trajectree/quimb/quimb/__init__.py +0 -507
- trajectree/quimb/quimb/calc.py +0 -1491
- trajectree/quimb/quimb/core.py +0 -2279
- trajectree/quimb/quimb/evo.py +0 -712
- trajectree/quimb/quimb/experimental/__init__.py +0 -0
- trajectree/quimb/quimb/experimental/autojittn.py +0 -129
- trajectree/quimb/quimb/experimental/belief_propagation/__init__.py +0 -109
- trajectree/quimb/quimb/experimental/belief_propagation/bp_common.py +0 -397
- trajectree/quimb/quimb/experimental/belief_propagation/d1bp.py +0 -316
- trajectree/quimb/quimb/experimental/belief_propagation/d2bp.py +0 -653
- trajectree/quimb/quimb/experimental/belief_propagation/hd1bp.py +0 -571
- trajectree/quimb/quimb/experimental/belief_propagation/hv1bp.py +0 -775
- trajectree/quimb/quimb/experimental/belief_propagation/l1bp.py +0 -316
- trajectree/quimb/quimb/experimental/belief_propagation/l2bp.py +0 -537
- trajectree/quimb/quimb/experimental/belief_propagation/regions.py +0 -194
- trajectree/quimb/quimb/experimental/cluster_update.py +0 -286
- trajectree/quimb/quimb/experimental/merabuilder.py +0 -865
- trajectree/quimb/quimb/experimental/operatorbuilder/__init__.py +0 -15
- trajectree/quimb/quimb/experimental/operatorbuilder/operatorbuilder.py +0 -1631
- trajectree/quimb/quimb/experimental/schematic.py +0 -7
- trajectree/quimb/quimb/experimental/tn_marginals.py +0 -130
- trajectree/quimb/quimb/experimental/tnvmc.py +0 -1483
- trajectree/quimb/quimb/gates.py +0 -36
- trajectree/quimb/quimb/gen/__init__.py +0 -2
- trajectree/quimb/quimb/gen/operators.py +0 -1167
- trajectree/quimb/quimb/gen/rand.py +0 -713
- trajectree/quimb/quimb/gen/states.py +0 -479
- trajectree/quimb/quimb/linalg/__init__.py +0 -6
- trajectree/quimb/quimb/linalg/approx_spectral.py +0 -1109
- trajectree/quimb/quimb/linalg/autoblock.py +0 -258
- trajectree/quimb/quimb/linalg/base_linalg.py +0 -719
- trajectree/quimb/quimb/linalg/mpi_launcher.py +0 -397
- trajectree/quimb/quimb/linalg/numpy_linalg.py +0 -244
- trajectree/quimb/quimb/linalg/rand_linalg.py +0 -514
- trajectree/quimb/quimb/linalg/scipy_linalg.py +0 -293
- trajectree/quimb/quimb/linalg/slepc_linalg.py +0 -892
- trajectree/quimb/quimb/schematic.py +0 -1518
- trajectree/quimb/quimb/tensor/__init__.py +0 -401
- trajectree/quimb/quimb/tensor/array_ops.py +0 -610
- trajectree/quimb/quimb/tensor/circuit.py +0 -4824
- trajectree/quimb/quimb/tensor/circuit_gen.py +0 -411
- trajectree/quimb/quimb/tensor/contraction.py +0 -336
- trajectree/quimb/quimb/tensor/decomp.py +0 -1255
- trajectree/quimb/quimb/tensor/drawing.py +0 -1646
- trajectree/quimb/quimb/tensor/fitting.py +0 -385
- trajectree/quimb/quimb/tensor/geometry.py +0 -583
- trajectree/quimb/quimb/tensor/interface.py +0 -114
- trajectree/quimb/quimb/tensor/networking.py +0 -1058
- trajectree/quimb/quimb/tensor/optimize.py +0 -1818
- trajectree/quimb/quimb/tensor/tensor_1d.py +0 -4778
- trajectree/quimb/quimb/tensor/tensor_1d_compress.py +0 -1854
- trajectree/quimb/quimb/tensor/tensor_1d_tebd.py +0 -662
- trajectree/quimb/quimb/tensor/tensor_2d.py +0 -5954
- trajectree/quimb/quimb/tensor/tensor_2d_compress.py +0 -96
- trajectree/quimb/quimb/tensor/tensor_2d_tebd.py +0 -1230
- trajectree/quimb/quimb/tensor/tensor_3d.py +0 -2869
- trajectree/quimb/quimb/tensor/tensor_3d_tebd.py +0 -46
- trajectree/quimb/quimb/tensor/tensor_approx_spectral.py +0 -60
- trajectree/quimb/quimb/tensor/tensor_arbgeom.py +0 -3237
- trajectree/quimb/quimb/tensor/tensor_arbgeom_compress.py +0 -565
- trajectree/quimb/quimb/tensor/tensor_arbgeom_tebd.py +0 -1138
- trajectree/quimb/quimb/tensor/tensor_builder.py +0 -5411
- trajectree/quimb/quimb/tensor/tensor_core.py +0 -11179
- trajectree/quimb/quimb/tensor/tensor_dmrg.py +0 -1472
- trajectree/quimb/quimb/tensor/tensor_mera.py +0 -204
- trajectree/quimb/quimb/utils.py +0 -892
- trajectree/quimb/tests/__init__.py +0 -0
- trajectree/quimb/tests/test_accel.py +0 -501
- trajectree/quimb/tests/test_calc.py +0 -788
- trajectree/quimb/tests/test_core.py +0 -847
- trajectree/quimb/tests/test_evo.py +0 -565
- trajectree/quimb/tests/test_gen/__init__.py +0 -0
- trajectree/quimb/tests/test_gen/test_operators.py +0 -361
- trajectree/quimb/tests/test_gen/test_rand.py +0 -296
- trajectree/quimb/tests/test_gen/test_states.py +0 -261
- trajectree/quimb/tests/test_linalg/__init__.py +0 -0
- trajectree/quimb/tests/test_linalg/test_approx_spectral.py +0 -368
- trajectree/quimb/tests/test_linalg/test_base_linalg.py +0 -351
- trajectree/quimb/tests/test_linalg/test_mpi_linalg.py +0 -127
- trajectree/quimb/tests/test_linalg/test_numpy_linalg.py +0 -84
- trajectree/quimb/tests/test_linalg/test_rand_linalg.py +0 -134
- trajectree/quimb/tests/test_linalg/test_slepc_linalg.py +0 -283
- trajectree/quimb/tests/test_tensor/__init__.py +0 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/__init__.py +0 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d1bp.py +0 -39
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d2bp.py +0 -67
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hd1bp.py +0 -64
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hv1bp.py +0 -51
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l1bp.py +0 -142
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l2bp.py +0 -101
- trajectree/quimb/tests/test_tensor/test_circuit.py +0 -816
- trajectree/quimb/tests/test_tensor/test_contract.py +0 -67
- trajectree/quimb/tests/test_tensor/test_decomp.py +0 -40
- trajectree/quimb/tests/test_tensor/test_mera.py +0 -52
- trajectree/quimb/tests/test_tensor/test_optimizers.py +0 -488
- trajectree/quimb/tests/test_tensor/test_tensor_1d.py +0 -1171
- trajectree/quimb/tests/test_tensor/test_tensor_2d.py +0 -606
- trajectree/quimb/tests/test_tensor/test_tensor_2d_tebd.py +0 -144
- trajectree/quimb/tests/test_tensor/test_tensor_3d.py +0 -123
- trajectree/quimb/tests/test_tensor/test_tensor_arbgeom.py +0 -226
- trajectree/quimb/tests/test_tensor/test_tensor_builder.py +0 -441
- trajectree/quimb/tests/test_tensor/test_tensor_core.py +0 -2066
- trajectree/quimb/tests/test_tensor/test_tensor_dmrg.py +0 -388
- trajectree/quimb/tests/test_tensor/test_tensor_spectral_approx.py +0 -63
- trajectree/quimb/tests/test_tensor/test_tensor_tebd.py +0 -270
- trajectree/quimb/tests/test_utils.py +0 -85
- trajectree-0.0.1.dist-info/RECORD +0 -126
- {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/WHEEL +0 -0
- {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/licenses/LICENSE +0 -0
- {trajectree-0.0.1.dist-info → trajectree-0.0.2.dist-info}/top_level.txt +0 -0
|
@@ -1,3237 +0,0 @@
|
|
|
1
|
-
"""Classes and algorithms related to arbitrary geometry tensor networks."""
|
|
2
|
-
"""Modified by Ansh Singal to work with qudit simulations"""
|
|
3
|
-
|
|
4
|
-
import functools
|
|
5
|
-
from operator import add, mul
|
|
6
|
-
|
|
7
|
-
from autoray import dag, do
|
|
8
|
-
|
|
9
|
-
from ..utils import check_opt, deprecated, ensure_dict
|
|
10
|
-
from ..utils import progbar as Progbar
|
|
11
|
-
from .contraction import get_symbol
|
|
12
|
-
from .tensor_core import (
|
|
13
|
-
TensorNetwork,
|
|
14
|
-
oset,
|
|
15
|
-
rand_uuid,
|
|
16
|
-
tags_to_oset,
|
|
17
|
-
)
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def get_coordinate_formatter(ndims):
|
|
21
|
-
return ",".join("{}" for _ in range(ndims))
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def prod(xs):
|
|
25
|
-
"""Product of all elements in ``xs``."""
|
|
26
|
-
return functools.reduce(mul, xs)
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def tensor_network_align(*tns, ind_ids=None, trace=False, inplace=False):
|
|
30
|
-
r"""Align an arbitrary number of tensor networks in a stack-like geometry::
|
|
31
|
-
|
|
32
|
-
a-a-a-a-a-a-a-a-a-a-a-a-a-a-a-a-a-a
|
|
33
|
-
| | | | | | | | | | | | | | | | | | <- ind_ids[0] (defaults to 1st id)
|
|
34
|
-
b-b-b-b-b-b-b-b-b-b-b-b-b-b-b-b-b-b
|
|
35
|
-
| | | | | | | | | | | | | | | | | | <- ind_ids[1]
|
|
36
|
-
...
|
|
37
|
-
| | | | | | | | | | | | | | | | | | <- ind_ids[-2]
|
|
38
|
-
y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y
|
|
39
|
-
| | | | | | | | | | | | | | | | | | <- ind_ids[-1]
|
|
40
|
-
z-z-z-z-z-z-z-z-z-z-z-z-z-z-z-z-z-z
|
|
41
|
-
|
|
42
|
-
Parameters
|
|
43
|
-
----------
|
|
44
|
-
tns : sequence of TensorNetwork
|
|
45
|
-
The TNs to align, should be structured and either effective 'vectors'
|
|
46
|
-
(have a ``site_ind_id``) or 'operators' (have a ``up_ind_id`` and
|
|
47
|
-
``lower_ind_id``).
|
|
48
|
-
ind_ids : None, or sequence of str
|
|
49
|
-
String with format specifiers to id each level of sites with. Will be
|
|
50
|
-
automatically generated like ``(tns[0].site_ind_id, "__ind_a{}__",
|
|
51
|
-
"__ind_b{}__", ...)`` if not given.
|
|
52
|
-
inplace : bool
|
|
53
|
-
Whether to modify the input tensor networks inplace.
|
|
54
|
-
|
|
55
|
-
Returns
|
|
56
|
-
-------
|
|
57
|
-
tns_aligned : sequence of TensorNetwork
|
|
58
|
-
"""
|
|
59
|
-
if not inplace:
|
|
60
|
-
tns = [tn.copy() for tn in tns]
|
|
61
|
-
|
|
62
|
-
n = len(tns)
|
|
63
|
-
coordinate_formatter = get_coordinate_formatter(tns[0]._NDIMS)
|
|
64
|
-
|
|
65
|
-
if ind_ids is None:
|
|
66
|
-
if hasattr(tns[0], "site_ind_id"):
|
|
67
|
-
ind_ids = [tns[0].site_ind_id]
|
|
68
|
-
else:
|
|
69
|
-
ind_ids = [tns[0].lower_ind_id]
|
|
70
|
-
ind_ids.extend(
|
|
71
|
-
f"__ind_{get_symbol(i)}{coordinate_formatter}__"
|
|
72
|
-
for i in range(n - 2)
|
|
73
|
-
)
|
|
74
|
-
else:
|
|
75
|
-
ind_ids = tuple(ind_ids)
|
|
76
|
-
|
|
77
|
-
for i, tn in enumerate(tns):
|
|
78
|
-
if hasattr(tn, "site_ind_id"):
|
|
79
|
-
if i == 0:
|
|
80
|
-
tn.site_ind_id = ind_ids[i]
|
|
81
|
-
elif i == n - 1:
|
|
82
|
-
tn.site_ind_id = ind_ids[i - 1]
|
|
83
|
-
else:
|
|
84
|
-
raise ValueError(
|
|
85
|
-
"An TN 'vector' can only be aligned as the "
|
|
86
|
-
"first or last TN in a sequence."
|
|
87
|
-
)
|
|
88
|
-
|
|
89
|
-
elif hasattr(tn, "upper_ind_id") and hasattr(tn, "lower_ind_id"):
|
|
90
|
-
if i != 0:
|
|
91
|
-
tn.upper_ind_id = ind_ids[i - 1]
|
|
92
|
-
if i != n - 1:
|
|
93
|
-
tn.lower_ind_id = ind_ids[i]
|
|
94
|
-
|
|
95
|
-
else:
|
|
96
|
-
raise ValueError("Can only align vectors and operators currently.")
|
|
97
|
-
|
|
98
|
-
if trace:
|
|
99
|
-
tns[-1].lower_ind_id = tns[0].upper_ind_id
|
|
100
|
-
|
|
101
|
-
return tns
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
def tensor_network_apply_op_vec(
|
|
105
|
-
A,
|
|
106
|
-
x,
|
|
107
|
-
which_A="lower",
|
|
108
|
-
contract=False,
|
|
109
|
-
fuse_multibonds=True,
|
|
110
|
-
compress=False,
|
|
111
|
-
inplace=False,
|
|
112
|
-
inplace_A=False,
|
|
113
|
-
**compress_opts,
|
|
114
|
-
):
|
|
115
|
-
"""Apply a general a general tensor network representing an operator (has
|
|
116
|
-
``upper_ind_id`` and ``lower_ind_id``) to a tensor network representing a
|
|
117
|
-
vector (has ``site_ind_id``), by contracting each pair of tensors at each
|
|
118
|
-
site then compressing the resulting tensor network. How the compression
|
|
119
|
-
takes place is determined by the type of tensor network passed in. The
|
|
120
|
-
returned tensor network has the same site indices as ``x``, and it is
|
|
121
|
-
the ``lower_ind_id`` of ``A`` that is contracted.
|
|
122
|
-
|
|
123
|
-
This is like performing ``A.to_dense() @ x.to_dense()``, or the transpose
|
|
124
|
-
thereof, depending on the value of ``which_A``.
|
|
125
|
-
|
|
126
|
-
Parameters
|
|
127
|
-
----------
|
|
128
|
-
A : TensorNetworkGenOperator
|
|
129
|
-
The tensor network representing the operator.
|
|
130
|
-
x : TensorNetworkGenVector
|
|
131
|
-
The tensor network representing the vector.
|
|
132
|
-
which_A : {"lower", "upper"}, optional
|
|
133
|
-
Whether to contract the lower or upper indices of ``A`` with the site
|
|
134
|
-
indices of ``x``.
|
|
135
|
-
contract : bool
|
|
136
|
-
Whether to contract the tensors at each site after applying the
|
|
137
|
-
operator, yielding a single tensor at each site.
|
|
138
|
-
fuse_multibonds : bool
|
|
139
|
-
If ``contract=True``, whether to fuse any multibonds after contracting
|
|
140
|
-
the tensors at each site.
|
|
141
|
-
compress : bool
|
|
142
|
-
Whether to compress the resulting tensor network.
|
|
143
|
-
inplace : bool
|
|
144
|
-
Whether to modify ``x``, the input vector tensor network inplace.
|
|
145
|
-
inplace_A : bool
|
|
146
|
-
Whether to modify ``A``, the operator tensor network inplace.
|
|
147
|
-
compress_opts
|
|
148
|
-
Options to pass to ``tn.compress``, where ``tn`` is the resulting
|
|
149
|
-
tensor network, if ``compress=True``.
|
|
150
|
-
|
|
151
|
-
Returns
|
|
152
|
-
-------
|
|
153
|
-
TensorNetworkGenVector
|
|
154
|
-
The same type as ``x``.
|
|
155
|
-
"""
|
|
156
|
-
x = x if inplace else x.copy()
|
|
157
|
-
A = A if inplace_A else A.copy()
|
|
158
|
-
|
|
159
|
-
coordinate_formatter = get_coordinate_formatter(A._NDIMS)
|
|
160
|
-
inner_ind_id = rand_uuid() + f"{coordinate_formatter}"
|
|
161
|
-
|
|
162
|
-
if which_A == "lower":
|
|
163
|
-
# align the indices
|
|
164
|
-
#
|
|
165
|
-
# | <- upper_ind_id to be site_ind_id (outerid)
|
|
166
|
-
# -A- ...
|
|
167
|
-
# | <- lower_ind_id to be innerid
|
|
168
|
-
# :
|
|
169
|
-
# | <- site_ind_id to be innerid
|
|
170
|
-
# -x- ...
|
|
171
|
-
#
|
|
172
|
-
A.lower_ind_id = inner_ind_id
|
|
173
|
-
A.upper_ind_id = x.site_ind_id
|
|
174
|
-
elif which_A == "upper":
|
|
175
|
-
# transposed application
|
|
176
|
-
A.upper_ind_id = inner_ind_id
|
|
177
|
-
A.lower_ind_id = x.site_ind_id
|
|
178
|
-
else:
|
|
179
|
-
raise ValueError(
|
|
180
|
-
f"Invalid `which_A`: {which_A}, should be 'lower' or 'upper'."
|
|
181
|
-
)
|
|
182
|
-
|
|
183
|
-
# only want to reindex on sites that being acted on
|
|
184
|
-
sites_present_in_A = tuple(A.gen_sites_present())
|
|
185
|
-
x.reindex_sites_(inner_ind_id, where=sites_present_in_A)
|
|
186
|
-
|
|
187
|
-
# combine the tensor networks
|
|
188
|
-
x |= A
|
|
189
|
-
|
|
190
|
-
if contract:
|
|
191
|
-
# optionally contract all tensor at each site
|
|
192
|
-
for site in sites_present_in_A:
|
|
193
|
-
x ^= site
|
|
194
|
-
|
|
195
|
-
if fuse_multibonds:
|
|
196
|
-
x.fuse_multibonds_()
|
|
197
|
-
|
|
198
|
-
# Only change in the method, for when some node of the MPO does not contract with the MPS. The MPS index needs to be renamed
|
|
199
|
-
# back to the original index.
|
|
200
|
-
x = x.reindex({inner_ind_id.format(i): x.site_ind_id.format(i) for i in sites_present_in_A}, inplace=inplace)
|
|
201
|
-
|
|
202
|
-
# optionally compress
|
|
203
|
-
if compress:
|
|
204
|
-
x.compress(**compress_opts)
|
|
205
|
-
|
|
206
|
-
return x
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
def tensor_network_apply_op_op(
|
|
210
|
-
A,
|
|
211
|
-
B,
|
|
212
|
-
which_A="lower",
|
|
213
|
-
which_B="upper",
|
|
214
|
-
contract=False,
|
|
215
|
-
fuse_multibonds=True,
|
|
216
|
-
compress=False,
|
|
217
|
-
inplace=False,
|
|
218
|
-
inplace_A=False,
|
|
219
|
-
**compress_opts,
|
|
220
|
-
):
|
|
221
|
-
"""Apply the operator (has upper and lower site inds) represented by tensor
|
|
222
|
-
network ``A`` to the operator represented by tensor network ``B``. The
|
|
223
|
-
resulting tensor network has the same upper and lower indices as ``B``.
|
|
224
|
-
Optionally contract the tensors at each site, fuse any multibonds, and
|
|
225
|
-
compress the resulting tensor network.
|
|
226
|
-
|
|
227
|
-
This is like performing ``A.to_dense() @ B.to_dense()``, or various
|
|
228
|
-
combinations of tranposes thereof, depending on the values of ``which_A``
|
|
229
|
-
and ``which_B``.
|
|
230
|
-
|
|
231
|
-
Parameters
|
|
232
|
-
----------
|
|
233
|
-
A : TensorNetworkGenOperator
|
|
234
|
-
The tensor network representing the operator to apply.
|
|
235
|
-
B : TensorNetworkGenOperator
|
|
236
|
-
The tensor network representing the target operator.
|
|
237
|
-
which_A : {"lower", "upper"}, optional
|
|
238
|
-
Whether to contract the lower or upper indices of ``A``.
|
|
239
|
-
which_B : {"lower", "upper"}, optional
|
|
240
|
-
Whether to contract the lower or upper indices of ``B``.
|
|
241
|
-
contract : bool
|
|
242
|
-
Whether to contract the tensors at each site after applying the
|
|
243
|
-
operator, yielding a single tensor at each site.
|
|
244
|
-
fuse_multibonds : bool
|
|
245
|
-
If ``contract=True``, whether to fuse any multibonds after contracting
|
|
246
|
-
the tensors at each site.
|
|
247
|
-
compress : bool
|
|
248
|
-
Whether to compress the resulting tensor network.
|
|
249
|
-
inplace : bool
|
|
250
|
-
Whether to modify ``B``, the target tensor network inplace.
|
|
251
|
-
inplace_A : bool
|
|
252
|
-
Whether to modify ``A``, the applied operator tensor network inplace.
|
|
253
|
-
compress_opts
|
|
254
|
-
Options to pass to ``tn.compress``, where ``tn`` is the resulting
|
|
255
|
-
tensor network, if ``compress=True``.
|
|
256
|
-
|
|
257
|
-
Returns
|
|
258
|
-
-------
|
|
259
|
-
TensorNetworkGenOperator
|
|
260
|
-
The same type as ``B``.
|
|
261
|
-
"""
|
|
262
|
-
B = B if inplace else B.copy()
|
|
263
|
-
A = A if inplace_A else A.copy()
|
|
264
|
-
|
|
265
|
-
coordinate_formatter = get_coordinate_formatter(A._NDIMS)
|
|
266
|
-
inner_ind_id = rand_uuid() + f"{coordinate_formatter}"
|
|
267
|
-
|
|
268
|
-
# Evaluates A @ B
|
|
269
|
-
if (which_A, which_B) == ("lower", "upper"):
|
|
270
|
-
# align the indices (by default lower of A joined with upper of B
|
|
271
|
-
# which corresponds to matrix multiplication):
|
|
272
|
-
#
|
|
273
|
-
# | <- A upper_ind_id to be upper_ind_id
|
|
274
|
-
# -A- ...
|
|
275
|
-
# | <- A lower_ind_id to be innerid
|
|
276
|
-
# :
|
|
277
|
-
# | <- B upper_ind_id to be innerid
|
|
278
|
-
# -B- ...
|
|
279
|
-
# | <- B lower_ind_id to be lower_ind_id
|
|
280
|
-
#
|
|
281
|
-
|
|
282
|
-
A.lower_ind_id = inner_ind_id
|
|
283
|
-
A.upper_ind_id = B.upper_ind_id
|
|
284
|
-
# B.reindex_upper_sites_(inner_ind_id)
|
|
285
|
-
B = B.reindex({B.upper_ind_id.format(i): inner_ind_id.format(i) for i in range(0, B.L)}, inplace = False)
|
|
286
|
-
|
|
287
|
-
# Evaluates B @ A.T (You first flip B to give you A @ B.T, but when you put the upper of A to be the lower inds of B, you
|
|
288
|
-
# you are transposing the entire MPS, to give you B @ A.T).
|
|
289
|
-
elif (which_A, which_B) == ("lower", "lower"):
|
|
290
|
-
# rest are just permutations of above ...
|
|
291
|
-
A.lower_ind_id = inner_ind_id
|
|
292
|
-
A.upper_ind_id = B.lower_ind_id
|
|
293
|
-
# B.reindex_lower_sites_(inner_ind_id)
|
|
294
|
-
B = B.reindex({B.lower_ind_id.format(i): inner_ind_id.format(i) for i in range(0, B.L)}, inplace = False)
|
|
295
|
-
|
|
296
|
-
# Evaluates A.T @ B
|
|
297
|
-
elif (which_A, which_B) == ("upper", "upper"):
|
|
298
|
-
A.upper_ind_id = inner_ind_id
|
|
299
|
-
A.lower_ind_id = B.upper_ind_id
|
|
300
|
-
# B.reindex_upper_sites_(inner_ind_id)
|
|
301
|
-
B = B.reindex({B.upper_ind_id.format(i): inner_ind_id.format(i) for i in range(0, B.L)}, inplace = False)
|
|
302
|
-
|
|
303
|
-
# Evaluates B @ A (You first multiply A.T and B.T and then when you put the lower inds of A to be the lower of B, you are essentially taking
|
|
304
|
-
# the transpose of the entire thing, giving you B @ A as an output).
|
|
305
|
-
elif (which_A, which_B) == ("upper", "lower"):
|
|
306
|
-
A.upper_ind_id = inner_ind_id
|
|
307
|
-
A.lower_ind_id = B.lower_ind_id
|
|
308
|
-
# B.reindex_lower_sites_(inner_ind_id)
|
|
309
|
-
B = B.reindex({B.lower_ind_id.format(i): inner_ind_id.format(i) for i in range(0, B.L)}, inplace = False)
|
|
310
|
-
|
|
311
|
-
else:
|
|
312
|
-
raise ValueError("Invalid `which_A` and `which_B` combination.")
|
|
313
|
-
|
|
314
|
-
# combine the tensor networks
|
|
315
|
-
B |= A
|
|
316
|
-
|
|
317
|
-
if contract:
|
|
318
|
-
# optionally contract all tensor at each site
|
|
319
|
-
for site in B.gen_sites_present():
|
|
320
|
-
B ^= site
|
|
321
|
-
|
|
322
|
-
if fuse_multibonds:
|
|
323
|
-
B.fuse_multibonds_()
|
|
324
|
-
|
|
325
|
-
if compress:
|
|
326
|
-
B.compress(**compress_opts)
|
|
327
|
-
|
|
328
|
-
if which_B == "upper":
|
|
329
|
-
B = B.reindex({inner_ind_id.format(i): B.upper_ind_id.format(i) for i in B.gen_sites_present()}, inplace=inplace)
|
|
330
|
-
if which_B == "lower":
|
|
331
|
-
B = B.reindex({inner_ind_id.format(i): B.lower_ind_id.format(i) for i in B.gen_sites_present()}, inplace=inplace)
|
|
332
|
-
|
|
333
|
-
return B
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
def create_lazy_edge_map(tn, site_tags=None):
|
|
337
|
-
"""Given a tensor network, where each tensor is in exactly one group or
|
|
338
|
-
'site', compute which sites are connected to each other, without checking
|
|
339
|
-
each pair.
|
|
340
|
-
|
|
341
|
-
Parameters
|
|
342
|
-
----------
|
|
343
|
-
tn : TensorNetwork
|
|
344
|
-
The tensor network to analyze.
|
|
345
|
-
site_tags : None or sequence of str, optional
|
|
346
|
-
Which tags to consider as 'sites', by default uses ``tn.site_tags``.
|
|
347
|
-
|
|
348
|
-
Returns
|
|
349
|
-
-------
|
|
350
|
-
edges : dict[tuple[str, str], list[str]]
|
|
351
|
-
Each key is a sorted pair of tags, which are connected, and the value
|
|
352
|
-
is a list of the indices connecting them.
|
|
353
|
-
neighbors : dict[str, list[str]]
|
|
354
|
-
For each site tag, the other site tags it is connected to.
|
|
355
|
-
"""
|
|
356
|
-
if site_tags is None:
|
|
357
|
-
site_tags = set(tn.site_tags)
|
|
358
|
-
else:
|
|
359
|
-
site_tags = set(site_tags)
|
|
360
|
-
|
|
361
|
-
edges = {}
|
|
362
|
-
neighbors = {}
|
|
363
|
-
|
|
364
|
-
for ix in tn.ind_map:
|
|
365
|
-
ts = tn._inds_get(ix)
|
|
366
|
-
tags = {tag for t in ts for tag in t.tags if tag in site_tags}
|
|
367
|
-
if len(tags) >= 2:
|
|
368
|
-
# index spans multiple sites
|
|
369
|
-
i, j = tuple(sorted(tags))
|
|
370
|
-
|
|
371
|
-
if (i, j) not in edges:
|
|
372
|
-
# record indices per edge
|
|
373
|
-
edges[(i, j)] = [ix]
|
|
374
|
-
|
|
375
|
-
# add to neighbor map
|
|
376
|
-
neighbors.setdefault(i, []).append(j)
|
|
377
|
-
neighbors.setdefault(j, []).append(i)
|
|
378
|
-
else:
|
|
379
|
-
# already processed this edge
|
|
380
|
-
edges[(i, j)].append(ix)
|
|
381
|
-
|
|
382
|
-
return edges, neighbors
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
def tensor_network_ag_sum(
|
|
386
|
-
tna,
|
|
387
|
-
tnb,
|
|
388
|
-
site_tags=None,
|
|
389
|
-
negate=False,
|
|
390
|
-
compress=False,
|
|
391
|
-
inplace=False,
|
|
392
|
-
**compress_opts,
|
|
393
|
-
):
|
|
394
|
-
"""Add two tensor networks with arbitrary, but matching, geometries. They
|
|
395
|
-
should have the same site tags, with a single tensor per site and sites
|
|
396
|
-
connected by a single index only (but the name of this index can differ in
|
|
397
|
-
the two TNs).
|
|
398
|
-
|
|
399
|
-
Parameters
|
|
400
|
-
----------
|
|
401
|
-
tna : TensorNetworkGen
|
|
402
|
-
The first tensor network to add.
|
|
403
|
-
tnb : TensorNetworkGen
|
|
404
|
-
The second tensor network to add.
|
|
405
|
-
site_tags : None or sequence of str, optional
|
|
406
|
-
Which tags to consider as 'sites', by default uses ``tna.site_tags``.
|
|
407
|
-
negate : bool, optional
|
|
408
|
-
Whether to negate the second tensor network before adding.
|
|
409
|
-
compress : bool, optional
|
|
410
|
-
Whether to compress the resulting tensor network, by calling the
|
|
411
|
-
``compress`` method with the given options.
|
|
412
|
-
inplace : bool, optional
|
|
413
|
-
Whether to modify the first tensor network inplace.
|
|
414
|
-
|
|
415
|
-
Returns
|
|
416
|
-
-------
|
|
417
|
-
TensorNetworkGen
|
|
418
|
-
The resulting tensor network.
|
|
419
|
-
"""
|
|
420
|
-
tna = tna if inplace else tna.copy()
|
|
421
|
-
|
|
422
|
-
edges_a, neighbors_a = create_lazy_edge_map(tna, site_tags)
|
|
423
|
-
edges_b, _ = create_lazy_edge_map(tnb, site_tags)
|
|
424
|
-
|
|
425
|
-
reindex_map = {}
|
|
426
|
-
for (si, sj), inds in edges_a.items():
|
|
427
|
-
(ixa,) = inds
|
|
428
|
-
(ixb,) = edges_b.pop((si, sj))
|
|
429
|
-
reindex_map[ixb] = ixa
|
|
430
|
-
|
|
431
|
-
if edges_b:
|
|
432
|
-
raise ValueError("Not all edges matched.")
|
|
433
|
-
|
|
434
|
-
for si in neighbors_a:
|
|
435
|
-
ta, tb = tna[si], tnb[si]
|
|
436
|
-
|
|
437
|
-
# the local outer indices
|
|
438
|
-
sum_inds = [ix for ix in tb.inds if ix not in reindex_map]
|
|
439
|
-
|
|
440
|
-
tb = tb.reindex(reindex_map)
|
|
441
|
-
if negate:
|
|
442
|
-
tb.negate_()
|
|
443
|
-
# only need to negate a single tensor
|
|
444
|
-
negate = False
|
|
445
|
-
|
|
446
|
-
ta.direct_product_(tb, sum_inds)
|
|
447
|
-
|
|
448
|
-
if compress:
|
|
449
|
-
tna.compress(**compress_opts)
|
|
450
|
-
|
|
451
|
-
return tna
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
class TensorNetworkGen(TensorNetwork):
|
|
455
|
-
"""A tensor network which notionally has a single tensor per 'site',
|
|
456
|
-
though these could be labelled arbitrarily could also be linked in an
|
|
457
|
-
arbitrary geometry by bonds.
|
|
458
|
-
"""
|
|
459
|
-
|
|
460
|
-
_NDIMS = 1
|
|
461
|
-
_EXTRA_PROPS = (
|
|
462
|
-
"_sites",
|
|
463
|
-
"_site_tag_id",
|
|
464
|
-
)
|
|
465
|
-
|
|
466
|
-
def _compatible_arbgeom(self, other):
|
|
467
|
-
"""Check whether ``self`` and ``other`` represent the same set of
|
|
468
|
-
sites and are tagged equivalently.
|
|
469
|
-
"""
|
|
470
|
-
return isinstance(other, TensorNetworkGen) and all(
|
|
471
|
-
getattr(self, e, 0) == getattr(other, e, 1)
|
|
472
|
-
for e in TensorNetworkGen._EXTRA_PROPS
|
|
473
|
-
)
|
|
474
|
-
|
|
475
|
-
def combine(self, other, *, virtual=False, check_collisions=True):
|
|
476
|
-
"""Combine this tensor network with another, returning a new tensor
|
|
477
|
-
network. If the two are compatible, cast the resulting tensor network
|
|
478
|
-
to a :class:`TensorNetworkGen` instance.
|
|
479
|
-
|
|
480
|
-
Parameters
|
|
481
|
-
----------
|
|
482
|
-
other : TensorNetworkGen or TensorNetwork
|
|
483
|
-
The other tensor network to combine with.
|
|
484
|
-
virtual : bool, optional
|
|
485
|
-
Whether the new tensor network should copy all the incoming tensors
|
|
486
|
-
(``False``, the default), or view them as virtual (``True``).
|
|
487
|
-
check_collisions : bool, optional
|
|
488
|
-
Whether to check for index collisions between the two tensor
|
|
489
|
-
networks before combining them. If ``True`` (the default), any
|
|
490
|
-
inner indices that clash will be mangled.
|
|
491
|
-
|
|
492
|
-
Returns
|
|
493
|
-
-------
|
|
494
|
-
TensorNetworkGen or TensorNetwork
|
|
495
|
-
"""
|
|
496
|
-
new = super().combine(
|
|
497
|
-
other, virtual=virtual, check_collisions=check_collisions
|
|
498
|
-
)
|
|
499
|
-
if self._compatible_arbgeom(other):
|
|
500
|
-
new.view_as_(TensorNetworkGen, like=self)
|
|
501
|
-
return new
|
|
502
|
-
|
|
503
|
-
@property
|
|
504
|
-
def nsites(self):
|
|
505
|
-
"""The total number of sites."""
|
|
506
|
-
return len(self._sites)
|
|
507
|
-
|
|
508
|
-
def gen_site_coos(self):
|
|
509
|
-
"""Generate the coordinates of all sites, same as ``self.sites``."""
|
|
510
|
-
return self._sites
|
|
511
|
-
|
|
512
|
-
@property
|
|
513
|
-
def sites(self):
|
|
514
|
-
"""Tuple of the possible sites in this tensor network."""
|
|
515
|
-
sites = getattr(self, "_sites", None)
|
|
516
|
-
if sites is None:
|
|
517
|
-
sites = tuple(self.gen_site_coos())
|
|
518
|
-
return sites
|
|
519
|
-
|
|
520
|
-
def _get_site_set(self):
|
|
521
|
-
"""The set of all sites."""
|
|
522
|
-
if getattr(self, "_site_set", None) is None:
|
|
523
|
-
self._site_set = set(self.sites)
|
|
524
|
-
return self._site_set
|
|
525
|
-
|
|
526
|
-
def gen_sites_present(self):
|
|
527
|
-
"""Generate the sites which are currently present (e.g. if a local view
|
|
528
|
-
of a larger tensor network), based on whether their tags are present.
|
|
529
|
-
|
|
530
|
-
Examples
|
|
531
|
-
--------
|
|
532
|
-
|
|
533
|
-
>>> tn = qtn.TN3D_rand(4, 4, 4, 2)
|
|
534
|
-
>>> tn_sub = tn.select_local('I1,2,3', max_distance=1)
|
|
535
|
-
>>> list(tn_sub.gen_sites_present())
|
|
536
|
-
[(0, 2, 3), (1, 1, 3), (1, 2, 2), (1, 2, 3), (1, 3, 3), (2, 2, 3)]
|
|
537
|
-
|
|
538
|
-
"""
|
|
539
|
-
return (
|
|
540
|
-
site
|
|
541
|
-
for site in self.gen_site_coos()
|
|
542
|
-
if self.site_tag(site) in self.tag_map
|
|
543
|
-
)
|
|
544
|
-
|
|
545
|
-
@property
|
|
546
|
-
def site_tag_id(self):
|
|
547
|
-
"""The string specifier for tagging each site of this tensor network."""
|
|
548
|
-
return self._site_tag_id
|
|
549
|
-
|
|
550
|
-
def site_tag(self, site):
|
|
551
|
-
"""The name of the tag specifiying the tensor at ``site``."""
|
|
552
|
-
return self.site_tag_id.format(site)
|
|
553
|
-
|
|
554
|
-
def retag_sites(self, new_id, where=None, inplace=False):
|
|
555
|
-
"""Modify the site tags for all or some tensors in this tensor network
|
|
556
|
-
(without changing the ``site_tag_id``).
|
|
557
|
-
|
|
558
|
-
Parameters
|
|
559
|
-
----------
|
|
560
|
-
new_id : str
|
|
561
|
-
A string with a format placeholder to accept a site, e.g. "S{}".
|
|
562
|
-
where : None or sequence
|
|
563
|
-
Which sites to update the index labels on. If ``None`` (default)
|
|
564
|
-
all sites.
|
|
565
|
-
inplace : bool
|
|
566
|
-
Whether to retag in place.
|
|
567
|
-
"""
|
|
568
|
-
if where is None:
|
|
569
|
-
where = self.gen_sites_present()
|
|
570
|
-
|
|
571
|
-
return self.retag(
|
|
572
|
-
{self.site_tag(x): new_id.format(x) for x in where},
|
|
573
|
-
inplace=inplace,
|
|
574
|
-
)
|
|
575
|
-
|
|
576
|
-
@property
|
|
577
|
-
def site_tags(self):
|
|
578
|
-
"""All of the site tags."""
|
|
579
|
-
if getattr(self, "_site_tags", None) is None:
|
|
580
|
-
self._site_tags = tuple(map(self.site_tag, self.gen_site_coos()))
|
|
581
|
-
return self._site_tags
|
|
582
|
-
|
|
583
|
-
@property
|
|
584
|
-
def site_tags_present(self):
|
|
585
|
-
"""All of the site tags still present in this tensor network."""
|
|
586
|
-
return tuple(map(self.site_tag, self.gen_sites_present()))
|
|
587
|
-
|
|
588
|
-
@site_tag_id.setter
|
|
589
|
-
def site_tag_id(self, new_id):
|
|
590
|
-
if self._site_tag_id != new_id:
|
|
591
|
-
self.retag_sites(new_id, inplace=True)
|
|
592
|
-
self._site_tag_id = new_id
|
|
593
|
-
self._site_tags = None
|
|
594
|
-
|
|
595
|
-
def retag_all(self, new_id, inplace=False):
|
|
596
|
-
"""Retag all sites and change the ``site_tag_id``."""
|
|
597
|
-
tn = self if inplace else self.copy()
|
|
598
|
-
tn.site_tag_id = new_id
|
|
599
|
-
return tn
|
|
600
|
-
|
|
601
|
-
retag_all_ = functools.partialmethod(retag_all, inplace=True)
|
|
602
|
-
|
|
603
|
-
def _get_site_tag_set(self):
|
|
604
|
-
"""The oset of all site tags."""
|
|
605
|
-
if getattr(self, "_site_tag_set", None) is None:
|
|
606
|
-
self._site_tag_set = set(self.site_tags)
|
|
607
|
-
return self._site_tag_set
|
|
608
|
-
|
|
609
|
-
def filter_valid_site_tags(self, tags):
|
|
610
|
-
"""Get the valid site tags from ``tags``."""
|
|
611
|
-
return oset(sorted(self._get_site_tag_set().intersection(tags)))
|
|
612
|
-
|
|
613
|
-
def maybe_convert_coo(self, x):
|
|
614
|
-
"""Check if ``x`` is a valid site and convert to the corresponding site
|
|
615
|
-
tag if so, else return ``x``.
|
|
616
|
-
"""
|
|
617
|
-
try:
|
|
618
|
-
if x in self._get_site_set():
|
|
619
|
-
return self.site_tag(x)
|
|
620
|
-
except TypeError:
|
|
621
|
-
pass
|
|
622
|
-
return x
|
|
623
|
-
|
|
624
|
-
def gen_tags_from_coos(self, coos):
|
|
625
|
-
"""Generate the site tags corresponding to the given coordinates."""
|
|
626
|
-
return map(self.site_tag, coos)
|
|
627
|
-
|
|
628
|
-
def _get_tids_from_tags(self, tags, which="all"):
|
|
629
|
-
"""This is the function that lets coordinates such as ``site`` be
|
|
630
|
-
used for many 'tag' based functions.
|
|
631
|
-
"""
|
|
632
|
-
tags = self.maybe_convert_coo(tags)
|
|
633
|
-
return super()._get_tids_from_tags(tags, which=which)
|
|
634
|
-
|
|
635
|
-
def _get_tid_to_site_map(self):
|
|
636
|
-
"""Get a mapping from low level tensor id to the site it represents,
|
|
637
|
-
assuming there is a single tensor per site.
|
|
638
|
-
"""
|
|
639
|
-
tid2site = {}
|
|
640
|
-
for site in self.sites:
|
|
641
|
-
(tid,) = self._get_tids_from_tags(site)
|
|
642
|
-
tid2site[tid] = site
|
|
643
|
-
return tid2site
|
|
644
|
-
|
|
645
|
-
def gen_bond_coos(self):
|
|
646
|
-
"""Generate the coordinates (pairs of sites) of all bonds."""
|
|
647
|
-
tid2site = self._get_tid_to_site_map()
|
|
648
|
-
seen = set()
|
|
649
|
-
for tida in self.tensor_map:
|
|
650
|
-
for tidb in self._get_neighbor_tids(tida):
|
|
651
|
-
sitea, siteb = tid2site[tida], tid2site[tidb]
|
|
652
|
-
if sitea > siteb:
|
|
653
|
-
sitea, siteb = siteb, sitea
|
|
654
|
-
bond = (sitea, siteb)
|
|
655
|
-
if bond not in seen:
|
|
656
|
-
yield bond
|
|
657
|
-
seen.add(bond)
|
|
658
|
-
|
|
659
|
-
def get_site_neighbor_map(self):
|
|
660
|
-
"""Get a mapping from each site to its neighbors."""
|
|
661
|
-
tid2site = self._get_tid_to_site_map()
|
|
662
|
-
return {
|
|
663
|
-
tid2site[tid]: tuple(
|
|
664
|
-
tid2site[ntid] for ntid in self._get_neighbor_tids(tid)
|
|
665
|
-
)
|
|
666
|
-
for tid in self.tensor_map
|
|
667
|
-
}
|
|
668
|
-
|
|
669
|
-
def gen_regions_sites(self, max_region_size=None, sites=None):
|
|
670
|
-
"""Generate sets of sites that represent 'regions' where every node is
|
|
671
|
-
connected to at least two other region nodes. This is a simple wrapper
|
|
672
|
-
around ``TensorNewtork.gen_regions`` that works with the sites
|
|
673
|
-
rather than ``tids``.
|
|
674
|
-
|
|
675
|
-
Parameters
|
|
676
|
-
----------
|
|
677
|
-
max_region_size : None or int
|
|
678
|
-
Set the maximum number of tensors that can appear in a region. If
|
|
679
|
-
``None``, wait until any valid region is found and set that as the
|
|
680
|
-
maximum size.
|
|
681
|
-
tags : None or sequence of str
|
|
682
|
-
If supplied, only consider regions containing these tids.
|
|
683
|
-
|
|
684
|
-
Yields
|
|
685
|
-
------
|
|
686
|
-
tuple[hashable]
|
|
687
|
-
"""
|
|
688
|
-
if sites is not None:
|
|
689
|
-
tags = tuple(map(self.site_tag, sites))
|
|
690
|
-
tids = self._get_tids_from_tags(tags, "any")
|
|
691
|
-
else:
|
|
692
|
-
tids = None
|
|
693
|
-
|
|
694
|
-
tid2site = self._get_tid_to_site_map()
|
|
695
|
-
|
|
696
|
-
for region in self.gen_regions(
|
|
697
|
-
max_region_size=max_region_size, tids=tids
|
|
698
|
-
):
|
|
699
|
-
yield tuple(tid2site[tid] for tid in region)
|
|
700
|
-
|
|
701
|
-
def reset_cached_properties(self):
|
|
702
|
-
"""Reset any cached properties, one should call this when changing the
|
|
703
|
-
actual geometry of a TN inplace, for example.
|
|
704
|
-
"""
|
|
705
|
-
self._site_set = None
|
|
706
|
-
self._site_tag_set = None
|
|
707
|
-
self._site_tags = None
|
|
708
|
-
|
|
709
|
-
@functools.wraps(tensor_network_align)
|
|
710
|
-
def align(self, *args, inplace=False, **kwargs):
|
|
711
|
-
return tensor_network_align(self, *args, inplace=inplace, **kwargs)
|
|
712
|
-
|
|
713
|
-
align_ = functools.partialmethod(align, inplace=True)
|
|
714
|
-
|
|
715
|
-
def __add__(self, other):
|
|
716
|
-
return tensor_network_ag_sum(self, other)
|
|
717
|
-
|
|
718
|
-
def __sub__(self, other):
|
|
719
|
-
return tensor_network_ag_sum(self, other, negate=True)
|
|
720
|
-
|
|
721
|
-
def __iadd__(self, other):
|
|
722
|
-
return tensor_network_ag_sum(self, other, inplace=True)
|
|
723
|
-
|
|
724
|
-
def __isub__(self, other):
|
|
725
|
-
return tensor_network_ag_sum(self, other, negate=True, inplace=True)
|
|
726
|
-
|
|
727
|
-
def normalize_simple(self, gauges, **contract_opts):
|
|
728
|
-
"""Normalize this network using simple local gauges. After calling
|
|
729
|
-
this, any tree-like sub network gauged with ``gauges`` will have
|
|
730
|
-
2-norm 1. Inplace operation on both the tensor network and ``gauges``.
|
|
731
|
-
|
|
732
|
-
Parameters
|
|
733
|
-
----------
|
|
734
|
-
gauges : dict[str, array_like]
|
|
735
|
-
The gauges to normalize with.
|
|
736
|
-
"""
|
|
737
|
-
# normalize gauges
|
|
738
|
-
for ix, g in gauges.items():
|
|
739
|
-
gauges[ix] = g / do("linalg.norm", g)
|
|
740
|
-
|
|
741
|
-
nfactor = 1.0
|
|
742
|
-
|
|
743
|
-
# normalize sites
|
|
744
|
-
for site in self.sites:
|
|
745
|
-
tn_site = self.select(site)
|
|
746
|
-
tn_site_gauged = tn_site.copy()
|
|
747
|
-
tn_site_gauged.gauge_simple_insert(gauges)
|
|
748
|
-
lnorm = (tn_site_gauged.H | tn_site_gauged).contract(
|
|
749
|
-
all, **contract_opts
|
|
750
|
-
) ** 0.5
|
|
751
|
-
tn_site /= lnorm
|
|
752
|
-
nfactor *= lnorm
|
|
753
|
-
|
|
754
|
-
return nfactor
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
def gauge_product_boundary_vector(
|
|
758
|
-
tn,
|
|
759
|
-
tags,
|
|
760
|
-
which="all",
|
|
761
|
-
max_bond=1,
|
|
762
|
-
smudge=1e-6,
|
|
763
|
-
canonize_distance=0,
|
|
764
|
-
select_local_distance=None,
|
|
765
|
-
select_local_opts=None,
|
|
766
|
-
**contract_around_opts,
|
|
767
|
-
):
|
|
768
|
-
tids = tn._get_tids_from_tags(tags, which)
|
|
769
|
-
|
|
770
|
-
# form the double layer tensor network - this is the TN we will
|
|
771
|
-
# generate the actual gauges with
|
|
772
|
-
if select_local_distance is None:
|
|
773
|
-
# use the whole tensor network ...
|
|
774
|
-
outer_inds = tn.outer_inds()
|
|
775
|
-
dtn = tn.H & tn
|
|
776
|
-
else:
|
|
777
|
-
# ... or just a local patch
|
|
778
|
-
select_local_opts = ensure_dict(select_local_opts)
|
|
779
|
-
ltn = tn._select_local_tids(
|
|
780
|
-
tids,
|
|
781
|
-
max_distance=select_local_distance,
|
|
782
|
-
virtual=False,
|
|
783
|
-
**select_local_opts,
|
|
784
|
-
)
|
|
785
|
-
outer_inds = ltn.outer_inds()
|
|
786
|
-
dtn = ltn.H | ltn
|
|
787
|
-
|
|
788
|
-
# get all inds in the tagged region
|
|
789
|
-
region_inds = set.union(*(set(tn.tensor_map[tid].inds) for tid in tids))
|
|
790
|
-
|
|
791
|
-
# contract all 'physical' indices so that we have a single layer TN
|
|
792
|
-
# outside region and double layer sandwich inside region
|
|
793
|
-
for ix in outer_inds:
|
|
794
|
-
if (ix in region_inds) or (ix not in dtn.ind_map):
|
|
795
|
-
# 1st condition - don't contract region sandwich
|
|
796
|
-
# 2nd condition - if local selecting, will get multibonds so
|
|
797
|
-
# some indices already contracted
|
|
798
|
-
continue
|
|
799
|
-
dtn.contract_ind(ix)
|
|
800
|
-
|
|
801
|
-
# form the single layer boundary of double layer tagged region
|
|
802
|
-
dtids = dtn._get_tids_from_tags(tags, which)
|
|
803
|
-
dtn._contract_around_tids(
|
|
804
|
-
dtids,
|
|
805
|
-
min_distance=1,
|
|
806
|
-
max_bond=max_bond,
|
|
807
|
-
canonize_distance=canonize_distance,
|
|
808
|
-
**contract_around_opts,
|
|
809
|
-
)
|
|
810
|
-
|
|
811
|
-
# select this boundary and compress to ensure it is a product operator
|
|
812
|
-
dtn = dtn._select_without_tids(dtids, virtual=True)
|
|
813
|
-
dtn.compress_all_(max_bond=1)
|
|
814
|
-
dtn.squeeze_()
|
|
815
|
-
|
|
816
|
-
# each tensor in the boundary should now have exactly two inds
|
|
817
|
-
# connecting to the top and bottom of the tagged region double
|
|
818
|
-
# layer. Iterate over these, inserting the gauge into the original
|
|
819
|
-
# tensor network that would turn each of these boundary tensors
|
|
820
|
-
# into identities.
|
|
821
|
-
for t in dtn:
|
|
822
|
-
(ix,) = [i for i in t.inds if i in region_inds]
|
|
823
|
-
_, s, VH = do("linalg.svd", t.data)
|
|
824
|
-
s = s + smudge
|
|
825
|
-
G = do("reshape", s**0.5, (-1, 1)) * VH
|
|
826
|
-
Ginv = dag(VH) * do("reshape", s**-0.5, (1, -1))
|
|
827
|
-
|
|
828
|
-
tid_l, tid_r = sorted(tn.ind_map[ix], key=lambda tid: tid in tids)
|
|
829
|
-
tn.tensor_map[tid_l].gate_(Ginv.T, ix)
|
|
830
|
-
tn.tensor_map[tid_r].gate_(G, ix)
|
|
831
|
-
|
|
832
|
-
return tn
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
def region_remove_dangling(sites, neighbors, where=()):
|
|
836
|
-
sites = list(sites)
|
|
837
|
-
i = 0
|
|
838
|
-
while i < len(sites):
|
|
839
|
-
# check next site
|
|
840
|
-
site = sites[i]
|
|
841
|
-
# can only reduce non target sites
|
|
842
|
-
if site not in where:
|
|
843
|
-
num_neighbors = sum(nsite in sites for nsite in neighbors[site])
|
|
844
|
-
if num_neighbors < 2:
|
|
845
|
-
# dangling -> remove!
|
|
846
|
-
sites.pop(i)
|
|
847
|
-
# back to beginning
|
|
848
|
-
i = -1
|
|
849
|
-
i += 1
|
|
850
|
-
return frozenset(sites)
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
_VALID_GATE_PROPAGATE = {"sites", "register", False, True}
|
|
854
|
-
_LAZY_GATE_CONTRACT = {
|
|
855
|
-
False,
|
|
856
|
-
"split-gate",
|
|
857
|
-
"swap-split-gate",
|
|
858
|
-
"auto-split-gate",
|
|
859
|
-
}
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
class TensorNetworkGenVector(TensorNetworkGen):
|
|
863
|
-
"""A tensor network which notionally has a single tensor and outer index
|
|
864
|
-
per 'site', though these could be labelled arbitrarily and could also be
|
|
865
|
-
linked in an arbitrary geometry by bonds.
|
|
866
|
-
"""
|
|
867
|
-
|
|
868
|
-
_EXTRA_PROPS = (
|
|
869
|
-
"_sites",
|
|
870
|
-
"_site_tag_id",
|
|
871
|
-
"_site_ind_id",
|
|
872
|
-
)
|
|
873
|
-
|
|
874
|
-
@property
|
|
875
|
-
def site_ind_id(self):
|
|
876
|
-
"""The string specifier for the physical indices."""
|
|
877
|
-
return self._site_ind_id
|
|
878
|
-
|
|
879
|
-
def site_ind(self, site):
|
|
880
|
-
return self.site_ind_id.format(site)
|
|
881
|
-
|
|
882
|
-
@property
|
|
883
|
-
def site_inds(self):
|
|
884
|
-
"""Return a tuple of all site indices."""
|
|
885
|
-
if getattr(self, "_site_inds", None) is None:
|
|
886
|
-
self._site_inds = tuple(map(self.site_ind, self.gen_site_coos()))
|
|
887
|
-
return self._site_inds
|
|
888
|
-
|
|
889
|
-
@property
|
|
890
|
-
def site_inds_present(self):
|
|
891
|
-
"""All of the site inds still present in this tensor network."""
|
|
892
|
-
return tuple(map(self.site_ind, self.gen_sites_present()))
|
|
893
|
-
|
|
894
|
-
def reset_cached_properties(self):
|
|
895
|
-
"""Reset any cached properties, one should call this when changing the
|
|
896
|
-
actual geometry of a TN inplace, for example.
|
|
897
|
-
"""
|
|
898
|
-
self._site_inds = None
|
|
899
|
-
return super().reset_cached_properties()
|
|
900
|
-
|
|
901
|
-
def reindex_sites(self, new_id, where=None, inplace=False):
|
|
902
|
-
"""Modify the site indices for all or some tensors in this vector
|
|
903
|
-
tensor network (without changing the ``site_ind_id``).
|
|
904
|
-
|
|
905
|
-
Parameters
|
|
906
|
-
----------
|
|
907
|
-
new_id : str
|
|
908
|
-
A string with a format placeholder to accept a site, e.g. "ket{}".
|
|
909
|
-
where : None or sequence
|
|
910
|
-
Which sites to update the index labels on. If ``None`` (default)
|
|
911
|
-
all sites.
|
|
912
|
-
inplace : bool
|
|
913
|
-
Whether to reindex in place.
|
|
914
|
-
"""
|
|
915
|
-
if where is None:
|
|
916
|
-
where = self.gen_sites_present()
|
|
917
|
-
|
|
918
|
-
return self.reindex(
|
|
919
|
-
{self.site_ind(x): new_id.format(x) for x in where},
|
|
920
|
-
inplace=inplace,
|
|
921
|
-
)
|
|
922
|
-
|
|
923
|
-
reindex_sites_ = functools.partialmethod(reindex_sites, inplace=True)
|
|
924
|
-
|
|
925
|
-
@site_ind_id.setter
|
|
926
|
-
def site_ind_id(self, new_id):
|
|
927
|
-
if self._site_ind_id != new_id:
|
|
928
|
-
self.reindex_sites_(new_id)
|
|
929
|
-
self._site_ind_id = new_id
|
|
930
|
-
self._site_inds = None
|
|
931
|
-
|
|
932
|
-
def reindex_all(self, new_id, inplace=False):
|
|
933
|
-
"""Reindex all physical sites and change the ``site_ind_id``."""
|
|
934
|
-
tn = self if inplace else self.copy()
|
|
935
|
-
tn.site_ind_id = new_id
|
|
936
|
-
return tn
|
|
937
|
-
|
|
938
|
-
reindex_all_ = functools.partialmethod(reindex_all, inplace=True)
|
|
939
|
-
|
|
940
|
-
def gen_inds_from_coos(self, coos):
|
|
941
|
-
"""Generate the site inds corresponding to the given coordinates."""
|
|
942
|
-
return map(self.site_ind, coos)
|
|
943
|
-
|
|
944
|
-
def phys_dim(self, site=None):
|
|
945
|
-
"""Get the physical dimension of ``site``, defaulting to the first site
|
|
946
|
-
if not specified.
|
|
947
|
-
"""
|
|
948
|
-
if site is None:
|
|
949
|
-
site = next(iter(self.gen_sites_present()))
|
|
950
|
-
return self.ind_size(self.site_ind(site))
|
|
951
|
-
|
|
952
|
-
def to_dense(
|
|
953
|
-
self, *inds_seq, to_qarray=False, to_ket=None, **contract_opts
|
|
954
|
-
):
|
|
955
|
-
"""Contract this tensor network 'vector' into a dense array. By
|
|
956
|
-
default, turn into a 'ket' ``qarray``, i.e. column vector of shape
|
|
957
|
-
``(d, 1)``.
|
|
958
|
-
|
|
959
|
-
Parameters
|
|
960
|
-
----------
|
|
961
|
-
inds_seq : sequence of sequences of str
|
|
962
|
-
How to group the site indices into the dense array. By default,
|
|
963
|
-
use a single group ordered like ``sites``, but only containing
|
|
964
|
-
those sites which are still present.
|
|
965
|
-
to_qarray : bool
|
|
966
|
-
Whether to turn the dense array into a ``qarray``, if the backend
|
|
967
|
-
would otherwise be ``'numpy'``.
|
|
968
|
-
to_ket : None or str
|
|
969
|
-
Whether to reshape the dense array into a ket (shape ``(d, 1)``
|
|
970
|
-
array). If ``None`` (default), do this only if the ``inds_seq`` is
|
|
971
|
-
not supplied.
|
|
972
|
-
contract_opts
|
|
973
|
-
Options to pass to
|
|
974
|
-
:meth:`~quimb.tensor.tensor_core.TensorNewtork.contract`.
|
|
975
|
-
|
|
976
|
-
Returns
|
|
977
|
-
-------
|
|
978
|
-
array
|
|
979
|
-
"""
|
|
980
|
-
if not inds_seq:
|
|
981
|
-
inds_seq = (self.site_inds_present,)
|
|
982
|
-
if to_ket is None:
|
|
983
|
-
to_ket = True
|
|
984
|
-
|
|
985
|
-
x = TensorNetwork.to_dense(
|
|
986
|
-
self, *inds_seq, to_qarray=to_qarray, **contract_opts
|
|
987
|
-
)
|
|
988
|
-
|
|
989
|
-
if to_ket:
|
|
990
|
-
x = do("reshape", x, (-1, 1))
|
|
991
|
-
|
|
992
|
-
return x
|
|
993
|
-
|
|
994
|
-
to_qarray = functools.partialmethod(to_dense, to_qarray=True)
|
|
995
|
-
|
|
996
|
-
def gate_with_op_lazy(self, A, transpose=False, inplace=False, **kwargs):
|
|
997
|
-
r"""Act lazily with the operator tensor network ``A``, which should
|
|
998
|
-
have matching structure, on this vector/state tensor network, like
|
|
999
|
-
``A @ x``. The returned tensor network will have the same structure as
|
|
1000
|
-
this one, but with the operator gated in lazily, i.e. uncontracted.
|
|
1001
|
-
|
|
1002
|
-
.. math::
|
|
1003
|
-
|
|
1004
|
-
| x \rangle \rightarrow A | x \rangle
|
|
1005
|
-
|
|
1006
|
-
or (if ``transpose=True``):
|
|
1007
|
-
|
|
1008
|
-
.. math::
|
|
1009
|
-
|
|
1010
|
-
| x \rangle \rightarrow A^T | x \rangle
|
|
1011
|
-
|
|
1012
|
-
Parameters
|
|
1013
|
-
----------
|
|
1014
|
-
A : TensorNetworkGenOperator
|
|
1015
|
-
The operator tensor network to gate with, or apply to this tensor
|
|
1016
|
-
network.
|
|
1017
|
-
transpose : bool, optional
|
|
1018
|
-
Whether to contract the lower or upper indices of ``A`` with the
|
|
1019
|
-
site indices of ``x``. If ``False`` (the default), the lower
|
|
1020
|
-
indices of ``A`` will be contracted with the site indices of ``x``,
|
|
1021
|
-
if ``True`` the upper indices of ``A`` will be contracted with
|
|
1022
|
-
the site indices of ``x``, which is like applying ``A.T @ x``.
|
|
1023
|
-
inplace : bool, optional
|
|
1024
|
-
Whether to perform the gate operation inplace on this tensor
|
|
1025
|
-
network.
|
|
1026
|
-
|
|
1027
|
-
Returns
|
|
1028
|
-
-------
|
|
1029
|
-
TensorNetworkGenVector
|
|
1030
|
-
"""
|
|
1031
|
-
return tensor_network_apply_op_vec(
|
|
1032
|
-
A=A,
|
|
1033
|
-
x=self,
|
|
1034
|
-
which_A="upper" if transpose else "lower",
|
|
1035
|
-
contract=False,
|
|
1036
|
-
inplace=inplace,
|
|
1037
|
-
**kwargs,
|
|
1038
|
-
)
|
|
1039
|
-
|
|
1040
|
-
gate_with_op_lazy_ = functools.partialmethod(
|
|
1041
|
-
gate_with_op_lazy, inplace=True
|
|
1042
|
-
)
|
|
1043
|
-
|
|
1044
|
-
def gate(
|
|
1045
|
-
self,
|
|
1046
|
-
G,
|
|
1047
|
-
where,
|
|
1048
|
-
contract=False,
|
|
1049
|
-
tags=None,
|
|
1050
|
-
propagate_tags=False,
|
|
1051
|
-
info=None,
|
|
1052
|
-
inplace=False,
|
|
1053
|
-
**compress_opts,
|
|
1054
|
-
):
|
|
1055
|
-
r"""Apply a gate to this vector tensor network at sites ``where``. This
|
|
1056
|
-
is essentially a wrapper around
|
|
1057
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.gate_inds` apart from
|
|
1058
|
-
``where`` can be specified as a list of sites, and tags can be
|
|
1059
|
-
optionally, intelligently propagated to the new gate tensor.
|
|
1060
|
-
|
|
1061
|
-
.. math::
|
|
1062
|
-
|
|
1063
|
-
| \psi \rangle \rightarrow G_\mathrm{where} | \psi \rangle
|
|
1064
|
-
|
|
1065
|
-
Parameters
|
|
1066
|
-
----------
|
|
1067
|
-
G : array_ike
|
|
1068
|
-
The gate array to apply, should match or be factorable into the
|
|
1069
|
-
shape ``(*phys_dims, *phys_dims)``.
|
|
1070
|
-
where : node or sequence[node]
|
|
1071
|
-
The sites to apply the gate to.
|
|
1072
|
-
contract : {False, True, 'split', 'reduce-split', 'split-gate',
|
|
1073
|
-
'swap-split-gate', 'auto-split-gate'}, optional
|
|
1074
|
-
How to apply the gate, see
|
|
1075
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.gate_inds`.
|
|
1076
|
-
tags : str or sequence of str, optional
|
|
1077
|
-
Tags to add to the new gate tensor.
|
|
1078
|
-
propagate_tags : {False, True, 'register', 'sites'}, optional
|
|
1079
|
-
Whether to propagate tags to the new gate tensor::
|
|
1080
|
-
|
|
1081
|
-
- False: no tags are propagated
|
|
1082
|
-
- True: all tags are propagated
|
|
1083
|
-
- 'register': only site tags corresponding to ``where`` are
|
|
1084
|
-
added.
|
|
1085
|
-
- 'sites': all site tags on the current sites are propgated,
|
|
1086
|
-
resulting in a lightcone like tagging.
|
|
1087
|
-
|
|
1088
|
-
info : None or dict, optional
|
|
1089
|
-
Used to store extra optional information such as the singular
|
|
1090
|
-
values if not absorbed.
|
|
1091
|
-
inplace : bool, optional
|
|
1092
|
-
Whether to perform the gate operation inplace on the tensor network
|
|
1093
|
-
or not.
|
|
1094
|
-
compress_opts
|
|
1095
|
-
Supplied to :func:`~quimb.tensor.tensor_core.tensor_split` for any
|
|
1096
|
-
``contract`` methods that involve splitting. Ignored otherwise.
|
|
1097
|
-
|
|
1098
|
-
Returns
|
|
1099
|
-
-------
|
|
1100
|
-
TensorNetworkGenVector
|
|
1101
|
-
|
|
1102
|
-
See Also
|
|
1103
|
-
--------
|
|
1104
|
-
TensorNetwork.gate_inds
|
|
1105
|
-
"""
|
|
1106
|
-
check_opt("propagate_tags", propagate_tags, _VALID_GATE_PROPAGATE)
|
|
1107
|
-
|
|
1108
|
-
tn = self if inplace else self.copy()
|
|
1109
|
-
|
|
1110
|
-
if not isinstance(where, (tuple, list)):
|
|
1111
|
-
where = (where,)
|
|
1112
|
-
inds = tuple(map(tn.site_ind, where))
|
|
1113
|
-
|
|
1114
|
-
# potentially add tags from current tensors to the new ones,
|
|
1115
|
-
# only do this if we are lazily adding the gate tensor(s)
|
|
1116
|
-
if (contract in _LAZY_GATE_CONTRACT) and (
|
|
1117
|
-
propagate_tags in (True, "sites")
|
|
1118
|
-
):
|
|
1119
|
-
old_tags = oset.union(*(t.tags for t in tn._inds_get(*inds)))
|
|
1120
|
-
if propagate_tags == "sites":
|
|
1121
|
-
old_tags = tn.filter_valid_site_tags(old_tags)
|
|
1122
|
-
|
|
1123
|
-
tags = tags_to_oset(tags)
|
|
1124
|
-
tags.update(old_tags)
|
|
1125
|
-
|
|
1126
|
-
# perform the actual gating
|
|
1127
|
-
tn.gate_inds_(
|
|
1128
|
-
G, inds, contract=contract, tags=tags, info=info, **compress_opts
|
|
1129
|
-
)
|
|
1130
|
-
|
|
1131
|
-
# possibly add tags based on where the gate was applied
|
|
1132
|
-
if propagate_tags == "register":
|
|
1133
|
-
for ix, site in zip(inds, where):
|
|
1134
|
-
(t,) = tn._inds_get(ix)
|
|
1135
|
-
t.add_tag(tn.site_tag(site))
|
|
1136
|
-
|
|
1137
|
-
return tn
|
|
1138
|
-
|
|
1139
|
-
gate_ = functools.partialmethod(gate, inplace=True)
|
|
1140
|
-
|
|
1141
|
-
def gate_simple_(
|
|
1142
|
-
self,
|
|
1143
|
-
G,
|
|
1144
|
-
where,
|
|
1145
|
-
gauges,
|
|
1146
|
-
renorm=True,
|
|
1147
|
-
smudge=1e-12,
|
|
1148
|
-
power=1.0,
|
|
1149
|
-
**gate_opts,
|
|
1150
|
-
):
|
|
1151
|
-
"""Apply a gate to this vector tensor network at sites ``where``, using
|
|
1152
|
-
simple update style gauging of the tensors first, as supplied in
|
|
1153
|
-
``gauges``. The new singular values for the bond are reinserted into
|
|
1154
|
-
``gauges``.
|
|
1155
|
-
|
|
1156
|
-
Parameters
|
|
1157
|
-
----------
|
|
1158
|
-
G : array_like
|
|
1159
|
-
The gate to be applied.
|
|
1160
|
-
where : node or sequence[node]
|
|
1161
|
-
The sites to apply the gate to.
|
|
1162
|
-
gauges : dict[str, array_like]
|
|
1163
|
-
The store of gauge bonds, the keys being indices and the values
|
|
1164
|
-
being the vectors. Only bonds present in this dictionary will be
|
|
1165
|
-
used.
|
|
1166
|
-
renorm : bool, optional
|
|
1167
|
-
Whether to renormalise the singular after the gate is applied,
|
|
1168
|
-
before reinserting them into ``gauges``.
|
|
1169
|
-
smudge : float, optional
|
|
1170
|
-
A small value to add to the gauges before multiplying them in and
|
|
1171
|
-
inverting them to avoid numerical issues.
|
|
1172
|
-
power : float, optional
|
|
1173
|
-
The power to raise the singular values to before multiplying them
|
|
1174
|
-
in and inverting them.
|
|
1175
|
-
gate_opts
|
|
1176
|
-
Supplied to
|
|
1177
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.gate_inds`.
|
|
1178
|
-
"""
|
|
1179
|
-
if isinstance(where, int):
|
|
1180
|
-
where = (where,)
|
|
1181
|
-
|
|
1182
|
-
site_tags = tuple(map(self.site_tag, where))
|
|
1183
|
-
tids = self._get_tids_from_tags(site_tags, "any")
|
|
1184
|
-
|
|
1185
|
-
if len(tids) == 1:
|
|
1186
|
-
# gate acts on a single tensor
|
|
1187
|
-
return self.gate_(G, where, contract=True)
|
|
1188
|
-
|
|
1189
|
-
gate_opts.setdefault("absorb", None)
|
|
1190
|
-
gate_opts.setdefault("contract", "reduce-split")
|
|
1191
|
-
tn_where = self._select_tids(tids)
|
|
1192
|
-
|
|
1193
|
-
with tn_where.gauge_simple_temp(
|
|
1194
|
-
gauges,
|
|
1195
|
-
smudge=smudge,
|
|
1196
|
-
power=power,
|
|
1197
|
-
ungauge_inner=False,
|
|
1198
|
-
):
|
|
1199
|
-
info = {}
|
|
1200
|
-
tn_where.gate_(G, where, info=info, **gate_opts)
|
|
1201
|
-
|
|
1202
|
-
# inner ungauging is performed by tracking the new singular values
|
|
1203
|
-
(((_, ix), s),) = info.items()
|
|
1204
|
-
if renorm:
|
|
1205
|
-
s = s / do("linalg.norm", s)
|
|
1206
|
-
gauges[ix] = s
|
|
1207
|
-
|
|
1208
|
-
return self
|
|
1209
|
-
|
|
1210
|
-
def gate_fit_local_(
|
|
1211
|
-
self,
|
|
1212
|
-
G,
|
|
1213
|
-
where,
|
|
1214
|
-
max_distance=0,
|
|
1215
|
-
fillin=0,
|
|
1216
|
-
gauges=None,
|
|
1217
|
-
**fit_opts,
|
|
1218
|
-
):
|
|
1219
|
-
# select a local neighborhood of tensors
|
|
1220
|
-
tids = self._get_tids_from_tags(
|
|
1221
|
-
tuple(map(self.site_tag, where)), "any"
|
|
1222
|
-
)
|
|
1223
|
-
if len(tids) == 2:
|
|
1224
|
-
tids = self.get_path_between_tids(*tids).tids
|
|
1225
|
-
|
|
1226
|
-
k = self._select_local_tids(
|
|
1227
|
-
tids,
|
|
1228
|
-
max_distance=max_distance,
|
|
1229
|
-
fillin=fillin,
|
|
1230
|
-
virtual=True,
|
|
1231
|
-
)
|
|
1232
|
-
|
|
1233
|
-
if gauges:
|
|
1234
|
-
outer, inner = k.gauge_simple_insert(gauges)
|
|
1235
|
-
|
|
1236
|
-
Gk = k.gate(G, where)
|
|
1237
|
-
k.fit_(Gk, **fit_opts)
|
|
1238
|
-
|
|
1239
|
-
if gauges:
|
|
1240
|
-
k.gauge_simple_remove(outer, inner)
|
|
1241
|
-
|
|
1242
|
-
if gauges is not None:
|
|
1243
|
-
k.gauge_all_simple_(gauges=gauges)
|
|
1244
|
-
|
|
1245
|
-
def make_reduced_density_matrix(
|
|
1246
|
-
self,
|
|
1247
|
-
where,
|
|
1248
|
-
allow_dangling=True,
|
|
1249
|
-
bra_ind_id="b{}",
|
|
1250
|
-
mangle_append="*",
|
|
1251
|
-
layer_tags=("KET", "BRA"),
|
|
1252
|
-
):
|
|
1253
|
-
"""Form the tensor network representation of the reduced density
|
|
1254
|
-
matrix, taking special care to handle potential hyper inner and outer
|
|
1255
|
-
indices.
|
|
1256
|
-
|
|
1257
|
-
Parameters
|
|
1258
|
-
----------
|
|
1259
|
-
where : node or sequence[node]
|
|
1260
|
-
The sites to keep.
|
|
1261
|
-
allow_dangling : bool, optional
|
|
1262
|
-
Whether to allow dangling indices in the resulting density matrix.
|
|
1263
|
-
These are non-physical indices, that usually result from having
|
|
1264
|
-
cut a region of the tensor network.
|
|
1265
|
-
bra_ind_id : str, optional
|
|
1266
|
-
The string format to use for the bra indices.
|
|
1267
|
-
mangle_append : str, optional
|
|
1268
|
-
The string to append to indices that are not traced out.
|
|
1269
|
-
layer_tags : tuple of str, optional
|
|
1270
|
-
The tags to apply to the ket and bra tensor network layers.
|
|
1271
|
-
"""
|
|
1272
|
-
where = set(where)
|
|
1273
|
-
reindex_map = {}
|
|
1274
|
-
phys_inds = set()
|
|
1275
|
-
|
|
1276
|
-
for coo in self.gen_site_coos():
|
|
1277
|
-
kix = self.site_ind(coo)
|
|
1278
|
-
if coo in where:
|
|
1279
|
-
reindex_map[kix] = bra_ind_id.format(coo)
|
|
1280
|
-
phys_inds.add(kix)
|
|
1281
|
-
|
|
1282
|
-
for ix, tids in self.ind_map.items():
|
|
1283
|
-
if ix in phys_inds:
|
|
1284
|
-
# traced out or handled above
|
|
1285
|
-
continue
|
|
1286
|
-
|
|
1287
|
-
if (len(tids) == 1) and allow_dangling:
|
|
1288
|
-
# dangling indices appear most often in cluster methods
|
|
1289
|
-
continue
|
|
1290
|
-
|
|
1291
|
-
reindex_map[ix] = ix + mangle_append
|
|
1292
|
-
|
|
1293
|
-
ket = self.copy()
|
|
1294
|
-
bra = self.reindex(reindex_map).conj_()
|
|
1295
|
-
|
|
1296
|
-
if layer_tags:
|
|
1297
|
-
ket.add_tag(layer_tags[0])
|
|
1298
|
-
bra.add_tag(layer_tags[1])
|
|
1299
|
-
|
|
1300
|
-
# index collisions already handled above
|
|
1301
|
-
return ket.combine(bra, virtual=True, check_collisions=False)
|
|
1302
|
-
|
|
1303
|
-
def partial_trace_exact(
|
|
1304
|
-
self,
|
|
1305
|
-
where,
|
|
1306
|
-
optimize="auto-hq",
|
|
1307
|
-
normalized=True,
|
|
1308
|
-
rehearse=False,
|
|
1309
|
-
get="matrix",
|
|
1310
|
-
**contract_opts,
|
|
1311
|
-
):
|
|
1312
|
-
"""Compute the reduced density matrix at sites ``where`` by exactly
|
|
1313
|
-
contracting the full overlap tensor network.
|
|
1314
|
-
|
|
1315
|
-
Parameters
|
|
1316
|
-
----------
|
|
1317
|
-
where : sequence[node]
|
|
1318
|
-
The sites to keep.
|
|
1319
|
-
optimize : str or PathOptimizer, optional
|
|
1320
|
-
The contraction path optimizer to use, when exactly contracting the
|
|
1321
|
-
full tensor network.
|
|
1322
|
-
normalized : bool or "return", optional
|
|
1323
|
-
Whether to normalize the result. If "return", return the norm
|
|
1324
|
-
separately.
|
|
1325
|
-
rehearse : bool, optional
|
|
1326
|
-
Whether to perform the computation or not, if ``True`` return a
|
|
1327
|
-
rehearsal info dict.
|
|
1328
|
-
get : {'matrix', 'array', 'tensor'}, optional
|
|
1329
|
-
Whether to return the result as a dense array, the data itself, or
|
|
1330
|
-
a tensor network.
|
|
1331
|
-
|
|
1332
|
-
Returns
|
|
1333
|
-
-------
|
|
1334
|
-
array or Tensor or dict or (array, float), (Tensor, float)
|
|
1335
|
-
"""
|
|
1336
|
-
k_inds = tuple(map(self.site_ind, where))
|
|
1337
|
-
bra_ind_id = "_bra{}"
|
|
1338
|
-
b_inds = tuple(map(bra_ind_id.format, where))
|
|
1339
|
-
|
|
1340
|
-
tn = self.make_reduced_density_matrix(where, bra_ind_id=bra_ind_id)
|
|
1341
|
-
|
|
1342
|
-
if rehearse:
|
|
1343
|
-
return _handle_rehearse(
|
|
1344
|
-
rehearse, tn, optimize, output_inds=k_inds + b_inds
|
|
1345
|
-
)
|
|
1346
|
-
|
|
1347
|
-
rho = tn.contract(
|
|
1348
|
-
output_inds=(*k_inds, *b_inds),
|
|
1349
|
-
optimize=optimize,
|
|
1350
|
-
**contract_opts,
|
|
1351
|
-
)
|
|
1352
|
-
|
|
1353
|
-
if normalized:
|
|
1354
|
-
rho_array_fused = rho.to_dense(k_inds, b_inds)
|
|
1355
|
-
nfactor = do("trace", rho_array_fused)
|
|
1356
|
-
else:
|
|
1357
|
-
rho_array_fused = nfactor = None
|
|
1358
|
-
|
|
1359
|
-
if get == "matrix":
|
|
1360
|
-
if rho_array_fused is None:
|
|
1361
|
-
# might have computed already
|
|
1362
|
-
rho_array_fused = rho.to_dense(k_inds, b_inds)
|
|
1363
|
-
if normalized is True:
|
|
1364
|
-
# multiply norm in
|
|
1365
|
-
rho = rho_array_fused / nfactor
|
|
1366
|
-
else:
|
|
1367
|
-
rho = rho_array_fused
|
|
1368
|
-
elif get == "array":
|
|
1369
|
-
if normalized is True:
|
|
1370
|
-
# multiply norm in
|
|
1371
|
-
rho = rho.data / nfactor
|
|
1372
|
-
else:
|
|
1373
|
-
rho = rho.data
|
|
1374
|
-
elif get == "tensor":
|
|
1375
|
-
if normalized is True:
|
|
1376
|
-
# multiply norm in, inplace
|
|
1377
|
-
rho.multiply_(1 / nfactor)
|
|
1378
|
-
else:
|
|
1379
|
-
raise ValueError(f"Unrecognized 'get' value: {get}")
|
|
1380
|
-
|
|
1381
|
-
if normalized == "return":
|
|
1382
|
-
return rho, nfactor
|
|
1383
|
-
else:
|
|
1384
|
-
return rho
|
|
1385
|
-
|
|
1386
|
-
def local_expectation_exact(
|
|
1387
|
-
self,
|
|
1388
|
-
G,
|
|
1389
|
-
where,
|
|
1390
|
-
optimize="auto-hq",
|
|
1391
|
-
normalized=True,
|
|
1392
|
-
rehearse=False,
|
|
1393
|
-
**contract_opts,
|
|
1394
|
-
):
|
|
1395
|
-
"""Compute the local expectation of operator ``G`` at sites ``where``
|
|
1396
|
-
by exactly contracting the full overlap tensor network.
|
|
1397
|
-
|
|
1398
|
-
Parameters
|
|
1399
|
-
----------
|
|
1400
|
-
G : array_like
|
|
1401
|
-
The operator to compute the expectation of.
|
|
1402
|
-
where : sequence[node]
|
|
1403
|
-
The sites to compute the expectation at.
|
|
1404
|
-
optimize : str or PathOptimizer, optional
|
|
1405
|
-
The contraction path optimizer to use, when exactly contracting the
|
|
1406
|
-
full tensor network.
|
|
1407
|
-
normalized : bool or "return", optional
|
|
1408
|
-
Whether to normalize the result. If "return", return the norm
|
|
1409
|
-
separately.
|
|
1410
|
-
rehearse : bool, optional
|
|
1411
|
-
Whether to perform the computation or not, if ``True`` return a
|
|
1412
|
-
rehearsal info dict.
|
|
1413
|
-
contract_opts
|
|
1414
|
-
Supplied to
|
|
1415
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.contract`.
|
|
1416
|
-
|
|
1417
|
-
Returns
|
|
1418
|
-
-------
|
|
1419
|
-
float or (float, float)
|
|
1420
|
-
"""
|
|
1421
|
-
rho = self.partial_trace_exact(
|
|
1422
|
-
where=where,
|
|
1423
|
-
optimize=optimize,
|
|
1424
|
-
rehearse=rehearse,
|
|
1425
|
-
normalized=normalized,
|
|
1426
|
-
get="array",
|
|
1427
|
-
**contract_opts,
|
|
1428
|
-
)
|
|
1429
|
-
|
|
1430
|
-
if rehearse:
|
|
1431
|
-
# immediately return the info dict
|
|
1432
|
-
return rho
|
|
1433
|
-
|
|
1434
|
-
if normalized == "return":
|
|
1435
|
-
# separate out the norm
|
|
1436
|
-
rho, nfactor = rho
|
|
1437
|
-
|
|
1438
|
-
ng = len(where)
|
|
1439
|
-
if do("ndim", G) != 2 * ng:
|
|
1440
|
-
# might be supplied in matrix form
|
|
1441
|
-
G = do("reshape", G, rho.shape)
|
|
1442
|
-
|
|
1443
|
-
# contract the expectation!
|
|
1444
|
-
expec = do(
|
|
1445
|
-
"tensordot",
|
|
1446
|
-
rho,
|
|
1447
|
-
G,
|
|
1448
|
-
axes=(range(2 * ng), (*range(ng, 2 * ng), *range(0, ng))),
|
|
1449
|
-
)
|
|
1450
|
-
|
|
1451
|
-
if normalized == "return":
|
|
1452
|
-
return expec, nfactor
|
|
1453
|
-
else:
|
|
1454
|
-
return expec
|
|
1455
|
-
|
|
1456
|
-
def compute_local_expectation_exact(
|
|
1457
|
-
self,
|
|
1458
|
-
terms,
|
|
1459
|
-
optimize="auto-hq",
|
|
1460
|
-
*,
|
|
1461
|
-
normalized=True,
|
|
1462
|
-
return_all=False,
|
|
1463
|
-
rehearse=False,
|
|
1464
|
-
executor=None,
|
|
1465
|
-
progbar=False,
|
|
1466
|
-
**contract_opts,
|
|
1467
|
-
):
|
|
1468
|
-
"""Compute the local expectations of many operators,
|
|
1469
|
-
by exactly contracting the full overlap tensor network.
|
|
1470
|
-
|
|
1471
|
-
Parameters
|
|
1472
|
-
----------
|
|
1473
|
-
terms : dict[node or (node, node), array_like]
|
|
1474
|
-
The terms to compute the expectation of, with keys being the sites
|
|
1475
|
-
and values being the local operators.
|
|
1476
|
-
optimize : str or PathOptimizer, optional
|
|
1477
|
-
The contraction path optimizer to use, when exactly contracting the
|
|
1478
|
-
full tensor network.
|
|
1479
|
-
normalized : bool, optional
|
|
1480
|
-
Whether to normalize the result.
|
|
1481
|
-
return_all : bool, optional
|
|
1482
|
-
Whether to return all results, or just the summed expectation.
|
|
1483
|
-
rehearse : {False, 'tn', 'tree', True}, optional
|
|
1484
|
-
Whether to perform the computations or not::
|
|
1485
|
-
|
|
1486
|
-
- False: perform the computation.
|
|
1487
|
-
- 'tn': return the tensor networks of each local expectation,
|
|
1488
|
-
without running the path optimizer.
|
|
1489
|
-
- 'tree': run the path optimizer and return the
|
|
1490
|
-
``cotengra.ContractonTree`` for each local expectation.
|
|
1491
|
-
- True: run the path optimizer and return the ``PathInfo`` for
|
|
1492
|
-
each local expectation.
|
|
1493
|
-
|
|
1494
|
-
executor : Executor, optional
|
|
1495
|
-
If supplied compute the terms in parallel using this executor.
|
|
1496
|
-
progbar : bool, optional
|
|
1497
|
-
Whether to show a progress bar.
|
|
1498
|
-
contract_opts
|
|
1499
|
-
Supplied to
|
|
1500
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.contract`.
|
|
1501
|
-
|
|
1502
|
-
Returns
|
|
1503
|
-
-------
|
|
1504
|
-
expecs : float or dict[node or (node, node), float]
|
|
1505
|
-
If ``return_all==False``, return the summed expectation value of
|
|
1506
|
-
the given terms. Otherwise, return a dictionary mapping each term's
|
|
1507
|
-
location to the expectation value.
|
|
1508
|
-
"""
|
|
1509
|
-
return _compute_expecs_maybe_in_parallel(
|
|
1510
|
-
fn=_tn_local_expectation_exact,
|
|
1511
|
-
tn=self,
|
|
1512
|
-
terms=terms,
|
|
1513
|
-
return_all=return_all,
|
|
1514
|
-
executor=executor,
|
|
1515
|
-
progbar=progbar,
|
|
1516
|
-
optimize=optimize,
|
|
1517
|
-
normalized=normalized,
|
|
1518
|
-
rehearse=rehearse,
|
|
1519
|
-
**contract_opts,
|
|
1520
|
-
)
|
|
1521
|
-
|
|
1522
|
-
def get_cluster(
|
|
1523
|
-
self,
|
|
1524
|
-
where,
|
|
1525
|
-
gauges=None,
|
|
1526
|
-
max_distance=0,
|
|
1527
|
-
fillin=0,
|
|
1528
|
-
smudge=1e-12,
|
|
1529
|
-
power=1.0,
|
|
1530
|
-
):
|
|
1531
|
-
"""Get the wavefunction cluster tensor network for the sites
|
|
1532
|
-
surrounding ``where``, potentially gauging the region with the simple
|
|
1533
|
-
update style bond gauges in ``gauges``.
|
|
1534
|
-
|
|
1535
|
-
Parameters
|
|
1536
|
-
----------
|
|
1537
|
-
where : sequence[node]
|
|
1538
|
-
The sites around which to form the cluster.
|
|
1539
|
-
gauges : dict[str, array_like], optional
|
|
1540
|
-
The store of gauge bonds, the keys being indices and the values
|
|
1541
|
-
being the vectors. Only bonds present in this dictionary will be
|
|
1542
|
-
used.
|
|
1543
|
-
max_distance : int, optional
|
|
1544
|
-
The maximum graph distance to include tensors neighboring ``where``
|
|
1545
|
-
when computing the expectation. The default 0 means only the
|
|
1546
|
-
tensors at sites ``where`` are used, 1 includes there direct
|
|
1547
|
-
neighbors, etc.
|
|
1548
|
-
fillin : bool or int, optional
|
|
1549
|
-
When selecting the local tensors, whether and how many times to
|
|
1550
|
-
'fill-in' corner tensors attached multiple times to the local
|
|
1551
|
-
region. On a lattice this fills in the corners. See
|
|
1552
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.select_local`.
|
|
1553
|
-
smudge : float, optional
|
|
1554
|
-
A small value to add to the gauges before multiplying them in and
|
|
1555
|
-
inverting them to avoid numerical issues.
|
|
1556
|
-
power : float, optional
|
|
1557
|
-
The power to raise the singular values to before multiplying them
|
|
1558
|
-
in and inverting them.
|
|
1559
|
-
|
|
1560
|
-
Returns
|
|
1561
|
-
-------
|
|
1562
|
-
TensorNetworkGenVector
|
|
1563
|
-
"""
|
|
1564
|
-
# select a local neighborhood of tensor tids
|
|
1565
|
-
tids = self._get_tids_from_tags(
|
|
1566
|
-
tuple(map(self.site_tag, where)), "any"
|
|
1567
|
-
)
|
|
1568
|
-
|
|
1569
|
-
if len(tids) == 2:
|
|
1570
|
-
# connect the sites up
|
|
1571
|
-
tids = self.get_path_between_tids(*tids).tids
|
|
1572
|
-
|
|
1573
|
-
# select the local patch!
|
|
1574
|
-
k = self._select_local_tids(
|
|
1575
|
-
tids,
|
|
1576
|
-
max_distance=max_distance,
|
|
1577
|
-
fillin=fillin,
|
|
1578
|
-
virtual=False,
|
|
1579
|
-
)
|
|
1580
|
-
|
|
1581
|
-
if gauges is not None:
|
|
1582
|
-
# gauge the region with simple update style bond gauges
|
|
1583
|
-
k.gauge_simple_insert(gauges, smudge=smudge, power=power)
|
|
1584
|
-
|
|
1585
|
-
return k
|
|
1586
|
-
|
|
1587
|
-
def partial_trace_cluster(
|
|
1588
|
-
self,
|
|
1589
|
-
where,
|
|
1590
|
-
gauges=None,
|
|
1591
|
-
optimize="auto-hq",
|
|
1592
|
-
normalized=True,
|
|
1593
|
-
max_distance=0,
|
|
1594
|
-
fillin=0,
|
|
1595
|
-
smudge=1e-12,
|
|
1596
|
-
power=1.0,
|
|
1597
|
-
get="matrix",
|
|
1598
|
-
rehearse=False,
|
|
1599
|
-
**contract_opts,
|
|
1600
|
-
):
|
|
1601
|
-
"""Compute the approximate reduced density matrix at sites ``where`` by
|
|
1602
|
-
contracting a local cluster of tensors, potentially gauging the region
|
|
1603
|
-
with the simple update style bond gauges in ``gauges``.
|
|
1604
|
-
|
|
1605
|
-
Parameters
|
|
1606
|
-
----------
|
|
1607
|
-
where : sequence[node]
|
|
1608
|
-
The sites to keep.
|
|
1609
|
-
gauges : dict[str, array_like], optional
|
|
1610
|
-
The store of gauge bonds, the keys being indices and the values
|
|
1611
|
-
being the vectors. Only bonds present in this dictionary will be
|
|
1612
|
-
used.
|
|
1613
|
-
optimize : str or PathOptimizer, optional
|
|
1614
|
-
The contraction path optimizer to use, when exactly contracting the
|
|
1615
|
-
local tensors.
|
|
1616
|
-
normalized : bool or "return", optional
|
|
1617
|
-
Whether to normalize the result. If "return", return the norm
|
|
1618
|
-
separately.
|
|
1619
|
-
max_distance : int, optional
|
|
1620
|
-
The maximum graph distance to include tensors neighboring ``where``
|
|
1621
|
-
when computing the expectation. The default 0 means only the
|
|
1622
|
-
tensors at sites ``where`` are used, 1 includes there direct
|
|
1623
|
-
neighbors, etc.
|
|
1624
|
-
fillin : bool or int, optional
|
|
1625
|
-
When selecting the local tensors, whether and how many times to
|
|
1626
|
-
'fill-in' corner tensors attached multiple times to the local
|
|
1627
|
-
region. On a lattice this fills in the corners. See
|
|
1628
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.select_local`.
|
|
1629
|
-
smudge : float, optional
|
|
1630
|
-
A small value to add to the gauges before multiplying them in and
|
|
1631
|
-
inverting them to avoid numerical issues.
|
|
1632
|
-
power : float, optional
|
|
1633
|
-
The power to raise the singular values to before multiplying them
|
|
1634
|
-
in and inverting them.
|
|
1635
|
-
get : {'matrix', 'array', 'tensor'}, optional
|
|
1636
|
-
Whether to return the result as a fused matrix (i.e. always 2D),
|
|
1637
|
-
unfused array, or still labeled Tensor.
|
|
1638
|
-
rehearse : bool, optional
|
|
1639
|
-
Whether to perform the computation or not, if ``True`` return a
|
|
1640
|
-
rehearsal info dict.
|
|
1641
|
-
contract_opts
|
|
1642
|
-
Supplied to
|
|
1643
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.contract`.
|
|
1644
|
-
"""
|
|
1645
|
-
k = self.get_cluster(
|
|
1646
|
-
where,
|
|
1647
|
-
gauges=gauges,
|
|
1648
|
-
max_distance=max_distance,
|
|
1649
|
-
fillin=fillin,
|
|
1650
|
-
smudge=smudge,
|
|
1651
|
-
power=power,
|
|
1652
|
-
)
|
|
1653
|
-
|
|
1654
|
-
return k.partial_trace_exact(
|
|
1655
|
-
where=where,
|
|
1656
|
-
optimize=optimize,
|
|
1657
|
-
normalized=normalized,
|
|
1658
|
-
rehearse=rehearse,
|
|
1659
|
-
get=get,
|
|
1660
|
-
**contract_opts,
|
|
1661
|
-
)
|
|
1662
|
-
|
|
1663
|
-
def local_expectation_cluster(
|
|
1664
|
-
self,
|
|
1665
|
-
G,
|
|
1666
|
-
where,
|
|
1667
|
-
normalized=True,
|
|
1668
|
-
max_distance=0,
|
|
1669
|
-
fillin=False,
|
|
1670
|
-
gauges=None,
|
|
1671
|
-
smudge=0.0,
|
|
1672
|
-
power=1.0,
|
|
1673
|
-
optimize="auto",
|
|
1674
|
-
max_bond=None,
|
|
1675
|
-
rehearse=False,
|
|
1676
|
-
**contract_opts,
|
|
1677
|
-
):
|
|
1678
|
-
r"""Approximately compute a single local expectation value of the gate
|
|
1679
|
-
``G`` at sites ``where``, either treating the environment beyond
|
|
1680
|
-
``max_distance`` as the identity, or using simple update style bond
|
|
1681
|
-
gauges as supplied in ``gauges``.
|
|
1682
|
-
|
|
1683
|
-
This selects a local neighbourhood of tensors up to distance
|
|
1684
|
-
``max_distance`` away from ``where``, then traces over dangling bonds
|
|
1685
|
-
after potentially inserting the bond gauges, to form an approximate
|
|
1686
|
-
version of the reduced density matrix.
|
|
1687
|
-
|
|
1688
|
-
.. math::
|
|
1689
|
-
|
|
1690
|
-
\langle \psi | G | \psi \rangle
|
|
1691
|
-
\approx
|
|
1692
|
-
\frac{
|
|
1693
|
-
\mathrm{Tr} [ G \tilde{\rho}_\mathrm{where} ]
|
|
1694
|
-
}{
|
|
1695
|
-
\mathrm{Tr} [ \tilde{\rho}_\mathrm{where} ]
|
|
1696
|
-
}
|
|
1697
|
-
|
|
1698
|
-
assuming ``normalized==True``.
|
|
1699
|
-
|
|
1700
|
-
Parameters
|
|
1701
|
-
----------
|
|
1702
|
-
G : array_like
|
|
1703
|
-
The gate to compute the expecation of.
|
|
1704
|
-
where : node or sequence[node]
|
|
1705
|
-
The sites to compute the expectation at.
|
|
1706
|
-
normalized : bool, optional
|
|
1707
|
-
Whether to locally normalize the result, i.e. divide by the
|
|
1708
|
-
expectation value of the identity.
|
|
1709
|
-
max_distance : int, optional
|
|
1710
|
-
The maximum graph distance to include tensors neighboring ``where``
|
|
1711
|
-
when computing the expectation. The default 0 means only the
|
|
1712
|
-
tensors at sites ``where`` are used, 1 includes there direct
|
|
1713
|
-
neighbors, etc.
|
|
1714
|
-
fillin : bool or int, optional
|
|
1715
|
-
When selecting the local tensors, whether and how many times to
|
|
1716
|
-
'fill-in' corner tensors attached multiple times to the local
|
|
1717
|
-
region. On a lattice this fills in the corners. See
|
|
1718
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.select_local`.
|
|
1719
|
-
gauges : dict[str, array_like], optional
|
|
1720
|
-
The store of gauge bonds, the keys being indices and the values
|
|
1721
|
-
being the vectors. Only bonds present in this dictionary will be
|
|
1722
|
-
used.
|
|
1723
|
-
optimize : str or PathOptimizer, optional
|
|
1724
|
-
The contraction path optimizer to use, when exactly contracting the
|
|
1725
|
-
local tensors.
|
|
1726
|
-
max_bond : None or int, optional
|
|
1727
|
-
If specified, use compressed contraction.
|
|
1728
|
-
rehearse : {False, 'tn', 'tree', True}, optional
|
|
1729
|
-
Whether to perform the computations or not::
|
|
1730
|
-
|
|
1731
|
-
- False: perform the computation.
|
|
1732
|
-
- 'tn': return the tensor networks of each local expectation,
|
|
1733
|
-
without running the path optimizer.
|
|
1734
|
-
- 'tree': run the path optimizer and return the
|
|
1735
|
-
``cotengra.ContractonTree`` for each local expectation.
|
|
1736
|
-
- True: run the path optimizer and return the ``PathInfo`` for
|
|
1737
|
-
each local expectation.
|
|
1738
|
-
|
|
1739
|
-
Returns
|
|
1740
|
-
-------
|
|
1741
|
-
expectation : float
|
|
1742
|
-
"""
|
|
1743
|
-
k = self.get_cluster(
|
|
1744
|
-
where,
|
|
1745
|
-
gauges=gauges,
|
|
1746
|
-
max_distance=max_distance,
|
|
1747
|
-
fillin=fillin,
|
|
1748
|
-
smudge=smudge,
|
|
1749
|
-
power=power,
|
|
1750
|
-
)
|
|
1751
|
-
|
|
1752
|
-
if max_bond is not None:
|
|
1753
|
-
return k.local_expectation(
|
|
1754
|
-
G=G,
|
|
1755
|
-
where=where,
|
|
1756
|
-
max_bond=max_bond,
|
|
1757
|
-
optimize=optimize,
|
|
1758
|
-
normalized=normalized,
|
|
1759
|
-
rehearse=rehearse,
|
|
1760
|
-
**contract_opts,
|
|
1761
|
-
)
|
|
1762
|
-
|
|
1763
|
-
return k.local_expectation_exact(
|
|
1764
|
-
G=G,
|
|
1765
|
-
where=where,
|
|
1766
|
-
optimize=optimize,
|
|
1767
|
-
normalized=normalized,
|
|
1768
|
-
rehearse=rehearse,
|
|
1769
|
-
**contract_opts,
|
|
1770
|
-
)
|
|
1771
|
-
|
|
1772
|
-
local_expectation_simple = deprecated(
|
|
1773
|
-
local_expectation_cluster,
|
|
1774
|
-
"local_expectation_simple",
|
|
1775
|
-
"local_expectation_cluster",
|
|
1776
|
-
)
|
|
1777
|
-
|
|
1778
|
-
def compute_local_expectation_cluster(
|
|
1779
|
-
self,
|
|
1780
|
-
terms,
|
|
1781
|
-
*,
|
|
1782
|
-
max_distance=0,
|
|
1783
|
-
fillin=False,
|
|
1784
|
-
normalized=True,
|
|
1785
|
-
gauges=None,
|
|
1786
|
-
optimize="auto",
|
|
1787
|
-
max_bond=None,
|
|
1788
|
-
return_all=False,
|
|
1789
|
-
rehearse=False,
|
|
1790
|
-
executor=None,
|
|
1791
|
-
progbar=False,
|
|
1792
|
-
**contract_opts,
|
|
1793
|
-
):
|
|
1794
|
-
r"""Compute all local expectations of the given terms, either treating
|
|
1795
|
-
the environment beyond ``max_distance`` as the identity, or using
|
|
1796
|
-
simple update style bond gauges as supplied in ``gauges``.
|
|
1797
|
-
|
|
1798
|
-
This selects a local neighbourhood of tensors up to distance
|
|
1799
|
-
``max_distance`` away from each term's sites, then traces over
|
|
1800
|
-
dangling bonds after potentially inserting the bond gauges, to form
|
|
1801
|
-
an approximate version of the reduced density matrix.
|
|
1802
|
-
|
|
1803
|
-
.. math::
|
|
1804
|
-
|
|
1805
|
-
\sum_\mathrm{i}
|
|
1806
|
-
\langle \psi | G_\mathrm{i} | \psi \rangle
|
|
1807
|
-
\approx
|
|
1808
|
-
\sum_\mathrm{i}
|
|
1809
|
-
\frac{
|
|
1810
|
-
\mathrm{Tr} [ G_\mathrm{i} \tilde{\rho}_\mathrm{i} ]
|
|
1811
|
-
}{
|
|
1812
|
-
\mathrm{Tr} [ \tilde{\rho}_\mathrm{i} ]
|
|
1813
|
-
}
|
|
1814
|
-
|
|
1815
|
-
assuming ``normalized==True``.
|
|
1816
|
-
|
|
1817
|
-
Parameters
|
|
1818
|
-
----------
|
|
1819
|
-
terms : dict[node or (node, node), array_like]
|
|
1820
|
-
The terms to compute the expectation of, with keys being the sites
|
|
1821
|
-
and values being the local operators.
|
|
1822
|
-
max_distance : int, optional
|
|
1823
|
-
The maximum graph distance to include tensors neighboring ``where``
|
|
1824
|
-
when computing the expectation. The default 0 means only the
|
|
1825
|
-
tensors at sites ``where`` are used, 1 includes there direct
|
|
1826
|
-
neighbors, etc.
|
|
1827
|
-
fillin : bool or int, optional
|
|
1828
|
-
When selecting the local tensors, whether and how many times to
|
|
1829
|
-
'fill-in' corner tensors attached multiple times to the local
|
|
1830
|
-
region. On a lattice this fills in the corners. See
|
|
1831
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.select_local`.
|
|
1832
|
-
normalized : bool, optional
|
|
1833
|
-
Whether to locally normalize the result, i.e. divide by the
|
|
1834
|
-
expectation value of the identity. This implies that a different
|
|
1835
|
-
normalization factor is used for each term.
|
|
1836
|
-
gauges : dict[str, array_like], optional
|
|
1837
|
-
The store of gauge bonds, the keys being indices and the values
|
|
1838
|
-
being the vectors. Only bonds present in this dictionary will be
|
|
1839
|
-
used.
|
|
1840
|
-
optimize : str or PathOptimizer, optional
|
|
1841
|
-
The contraction path optimizer to use, when exactly contracting the
|
|
1842
|
-
local tensors.
|
|
1843
|
-
max_bond : None or int, optional
|
|
1844
|
-
If specified, use compressed contraction.
|
|
1845
|
-
return_all : bool, optional
|
|
1846
|
-
Whether to return all results, or just the summed expectation.
|
|
1847
|
-
rehearse : {False, 'tn', 'tree', True}, optional
|
|
1848
|
-
Whether to perform the computations or not::
|
|
1849
|
-
|
|
1850
|
-
- False: perform the computation.
|
|
1851
|
-
- 'tn': return the tensor networks of each local expectation,
|
|
1852
|
-
without running the path optimizer.
|
|
1853
|
-
- 'tree': run the path optimizer and return the
|
|
1854
|
-
``cotengra.ContractonTree`` for each local expectation.
|
|
1855
|
-
- True: run the path optimizer and return the ``PathInfo`` for
|
|
1856
|
-
each local expectation.
|
|
1857
|
-
|
|
1858
|
-
executor : Executor, optional
|
|
1859
|
-
If supplied compute the terms in parallel using this executor.
|
|
1860
|
-
progbar : bool, optional
|
|
1861
|
-
Whether to show a progress bar.
|
|
1862
|
-
contract_opts
|
|
1863
|
-
Supplied to
|
|
1864
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.contract`.
|
|
1865
|
-
|
|
1866
|
-
Returns
|
|
1867
|
-
-------
|
|
1868
|
-
expecs : float or dict[node or (node, node), float]
|
|
1869
|
-
If ``return_all==False``, return the summed expectation value of
|
|
1870
|
-
the given terms. Otherwise, return a dictionary mapping each term's
|
|
1871
|
-
location to the expectation value.
|
|
1872
|
-
"""
|
|
1873
|
-
return _compute_expecs_maybe_in_parallel(
|
|
1874
|
-
fn=_tn_local_expectation_cluster,
|
|
1875
|
-
tn=self,
|
|
1876
|
-
terms=terms,
|
|
1877
|
-
return_all=return_all,
|
|
1878
|
-
executor=executor,
|
|
1879
|
-
progbar=progbar,
|
|
1880
|
-
normalized=normalized,
|
|
1881
|
-
max_distance=max_distance,
|
|
1882
|
-
fillin=fillin,
|
|
1883
|
-
gauges=gauges,
|
|
1884
|
-
optimize=optimize,
|
|
1885
|
-
rehearse=rehearse,
|
|
1886
|
-
max_bond=max_bond,
|
|
1887
|
-
**contract_opts,
|
|
1888
|
-
)
|
|
1889
|
-
|
|
1890
|
-
compute_local_expectation_simple = deprecated(
|
|
1891
|
-
compute_local_expectation_cluster,
|
|
1892
|
-
"compute_local_expectation_simple",
|
|
1893
|
-
"compute_local_expectation_cluster",
|
|
1894
|
-
)
|
|
1895
|
-
|
|
1896
|
-
def local_expectation_loop_expansion(
|
|
1897
|
-
self,
|
|
1898
|
-
G,
|
|
1899
|
-
where,
|
|
1900
|
-
loops=None,
|
|
1901
|
-
gauges=None,
|
|
1902
|
-
normalized=True,
|
|
1903
|
-
optimize="auto",
|
|
1904
|
-
intersect=False,
|
|
1905
|
-
use_all_starting_paths=False,
|
|
1906
|
-
info=None,
|
|
1907
|
-
progbar=False,
|
|
1908
|
-
**contract_opts,
|
|
1909
|
-
):
|
|
1910
|
-
"""Compute the expectation of operator ``G`` at site(s) ``where`` by
|
|
1911
|
-
expanding the expectation in terms of loops of tensors.
|
|
1912
|
-
|
|
1913
|
-
Parameters
|
|
1914
|
-
----------
|
|
1915
|
-
G : array_like
|
|
1916
|
-
The operator to compute the expectation of.
|
|
1917
|
-
where : node or sequence[node]
|
|
1918
|
-
The sites to compute the expectation at.
|
|
1919
|
-
loops : None or sequence[NetworkPath], optional
|
|
1920
|
-
The loops to use. If an integer, all loops up to and including that
|
|
1921
|
-
length will be used if the loop passes through all sites in
|
|
1922
|
-
``where``. If ``None`` the maximum loop length is set as the
|
|
1923
|
-
shortest loop found. If an explicit set of loops is given, only
|
|
1924
|
-
these loops are considered, but only if they pass through all sites
|
|
1925
|
-
in ``where``. ``intersect`` is ignored.
|
|
1926
|
-
gauges : dict[str, array_like], optional
|
|
1927
|
-
The store of gauge bonds, the keys being indices and the values
|
|
1928
|
-
being the vectors. Only bonds present in this dictionary will be
|
|
1929
|
-
gauged.
|
|
1930
|
-
normalized : bool or "local", optional
|
|
1931
|
-
Whether to normalize the result. If "local" each loop term is
|
|
1932
|
-
normalized separately. If ``True`` each term is normalized using
|
|
1933
|
-
a loop expansion estimate of the norm. If ``False`` no
|
|
1934
|
-
normalization is performed.
|
|
1935
|
-
optimize : str or PathOptimizer, optional
|
|
1936
|
-
The contraction path optimizer to use.
|
|
1937
|
-
info : dict, optional
|
|
1938
|
-
A dictionary to store intermediate results in to avoid recomputing
|
|
1939
|
-
them. This is useful when computing various expectations with
|
|
1940
|
-
different sets of loops. This should only be reused when both the
|
|
1941
|
-
tensor network and gauges remain the same.
|
|
1942
|
-
intersect : bool, optional
|
|
1943
|
-
If ``loops`` is not an explicit set of loops, whether to consider
|
|
1944
|
-
self intersecting loops in the search for loops passing through
|
|
1945
|
-
``where``.
|
|
1946
|
-
contract_opts
|
|
1947
|
-
Supplied to
|
|
1948
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.contract`.
|
|
1949
|
-
|
|
1950
|
-
Returns
|
|
1951
|
-
-------
|
|
1952
|
-
expec : scalar
|
|
1953
|
-
"""
|
|
1954
|
-
from quimb.experimental.belief_propagation import RegionGraph
|
|
1955
|
-
|
|
1956
|
-
info = info if info is not None else {}
|
|
1957
|
-
info.setdefault("tns", {})
|
|
1958
|
-
info.setdefault("expecs", {})
|
|
1959
|
-
|
|
1960
|
-
if isinstance(loops, int):
|
|
1961
|
-
max_loop_length = loops
|
|
1962
|
-
loops = None
|
|
1963
|
-
else:
|
|
1964
|
-
max_loop_length = None
|
|
1965
|
-
|
|
1966
|
-
if len(where) == 1:
|
|
1967
|
-
(tid,) = self._get_tids_from_tags(where[0])
|
|
1968
|
-
paths = [(tid,)]
|
|
1969
|
-
elif len(where) == 2:
|
|
1970
|
-
(tida,) = self._get_tids_from_tags(where[0])
|
|
1971
|
-
(tidb,) = self._get_tids_from_tags(where[1])
|
|
1972
|
-
if use_all_starting_paths:
|
|
1973
|
-
paths = self.get_path_between_tids(tida, tidb, return_all=True)
|
|
1974
|
-
else:
|
|
1975
|
-
paths = [self.get_path_between_tids(tida, tidb)]
|
|
1976
|
-
else:
|
|
1977
|
-
raise NotImplementedError("Only 1 or 2 sites supported.")
|
|
1978
|
-
|
|
1979
|
-
if loops is None:
|
|
1980
|
-
# find all loops that pass through local bonds
|
|
1981
|
-
loops = tuple(
|
|
1982
|
-
self.gen_paths_loops(
|
|
1983
|
-
max_loop_length=max_loop_length,
|
|
1984
|
-
intersect=intersect,
|
|
1985
|
-
paths=paths,
|
|
1986
|
-
)
|
|
1987
|
-
)
|
|
1988
|
-
|
|
1989
|
-
else:
|
|
1990
|
-
# have explicit loop specification, maybe as a larger set ->
|
|
1991
|
-
# need to select only the loops covering whole of `where`
|
|
1992
|
-
loops = tuple(loops)
|
|
1993
|
-
|
|
1994
|
-
if "lookup" in info and hash(loops) == info["lookup_hash"]:
|
|
1995
|
-
# want to share info across different expectations and also
|
|
1996
|
-
# different sets of loops -> but if the loop set has changed
|
|
1997
|
-
# then "lookup" specifically is probably incomplete
|
|
1998
|
-
lookup = info["lookup"]
|
|
1999
|
-
else:
|
|
2000
|
-
# build cache of which coordinates are in which loop to avoid
|
|
2001
|
-
# quadratic loop checking cost doing every time
|
|
2002
|
-
lookup = {}
|
|
2003
|
-
tid2site = self._get_tid_to_site_map()
|
|
2004
|
-
for loop in loops:
|
|
2005
|
-
for tid in loop.tids:
|
|
2006
|
-
site = tid2site[tid]
|
|
2007
|
-
lookup.setdefault(site, set()).add(loop)
|
|
2008
|
-
|
|
2009
|
-
info["lookup"] = lookup
|
|
2010
|
-
info["lookup_hash"] = hash(loops)
|
|
2011
|
-
|
|
2012
|
-
# get all loops which contain *all* sites in `where`
|
|
2013
|
-
loops = set.intersection(*(lookup[coo] for coo in where))
|
|
2014
|
-
|
|
2015
|
-
# XXX: for larger intersecting loops the counting is not quite
|
|
2016
|
-
# right subregion intersections are not all generated above?
|
|
2017
|
-
rg = RegionGraph(loops, autocomplete=False)
|
|
2018
|
-
|
|
2019
|
-
# make sure the tree contribution is included
|
|
2020
|
-
for path0 in paths:
|
|
2021
|
-
rg.add_region(path0)
|
|
2022
|
-
|
|
2023
|
-
expecs = []
|
|
2024
|
-
norms = []
|
|
2025
|
-
counts = []
|
|
2026
|
-
|
|
2027
|
-
if progbar:
|
|
2028
|
-
regions = Progbar(rg.regions)
|
|
2029
|
-
else:
|
|
2030
|
-
regions = rg.regions
|
|
2031
|
-
|
|
2032
|
-
for loop in regions:
|
|
2033
|
-
C = rg.get_count(loop)
|
|
2034
|
-
if C == 0:
|
|
2035
|
-
# redundant loop
|
|
2036
|
-
continue
|
|
2037
|
-
|
|
2038
|
-
try:
|
|
2039
|
-
# have already computed this term in a different expectation
|
|
2040
|
-
# e.g. with different set of loops
|
|
2041
|
-
expec_loop, norm_loop = info["expecs"][loop, where]
|
|
2042
|
-
except KeyError:
|
|
2043
|
-
# get the gauged loop tn
|
|
2044
|
-
try:
|
|
2045
|
-
tnl = info["tns"][loop]
|
|
2046
|
-
except KeyError:
|
|
2047
|
-
tnl = self.select_path(loop, gauges=gauges)
|
|
2048
|
-
info["tns"][loop] = tnl
|
|
2049
|
-
|
|
2050
|
-
# compute the expectation with exact contraction
|
|
2051
|
-
expec_loop, norm_loop = tnl.local_expectation_exact(
|
|
2052
|
-
G,
|
|
2053
|
-
where,
|
|
2054
|
-
normalized="return",
|
|
2055
|
-
optimize=optimize,
|
|
2056
|
-
**contract_opts,
|
|
2057
|
-
)
|
|
2058
|
-
# store for efficient calls with multiply loop sets
|
|
2059
|
-
info["expecs"][loop, where] = expec_loop, norm_loop
|
|
2060
|
-
|
|
2061
|
-
expecs.append(expec_loop)
|
|
2062
|
-
norms.append(norm_loop)
|
|
2063
|
-
counts.append(C)
|
|
2064
|
-
|
|
2065
|
-
if normalized == "local":
|
|
2066
|
-
# each loop expectation is normalized separately
|
|
2067
|
-
expec = sum(
|
|
2068
|
-
C * expec_loop / norm_loop
|
|
2069
|
-
for C, expec_loop, norm_loop in zip(counts, expecs, norms)
|
|
2070
|
-
)
|
|
2071
|
-
elif normalized == "prod":
|
|
2072
|
-
# each term is normalized by an overall normalization factor
|
|
2073
|
-
expec = prod(e**C for C, e in zip(counts, expecs))
|
|
2074
|
-
norm = prod(n**C for C, n in zip(counts, norms))
|
|
2075
|
-
expec = expec / norm
|
|
2076
|
-
elif normalized:
|
|
2077
|
-
# each term is normalized by an simulteneous normalization factor
|
|
2078
|
-
expec = sum(C * e for C, e in zip(counts, expecs))
|
|
2079
|
-
norm = sum(C * n for C, n in zip(counts, norms))
|
|
2080
|
-
expec = expec / norm
|
|
2081
|
-
else:
|
|
2082
|
-
# no normalization
|
|
2083
|
-
expec = sum(
|
|
2084
|
-
C * expec_loop for C, expec_loop in zip(counts, expecs)
|
|
2085
|
-
)
|
|
2086
|
-
|
|
2087
|
-
return expec
|
|
2088
|
-
|
|
2089
|
-
def compute_local_expectation_loop_expansion(
|
|
2090
|
-
self,
|
|
2091
|
-
terms,
|
|
2092
|
-
loops=None,
|
|
2093
|
-
*,
|
|
2094
|
-
gauges=None,
|
|
2095
|
-
normalized=True,
|
|
2096
|
-
optimize="auto",
|
|
2097
|
-
info=None,
|
|
2098
|
-
intersect=False,
|
|
2099
|
-
return_all=False,
|
|
2100
|
-
executor=None,
|
|
2101
|
-
progbar=False,
|
|
2102
|
-
**contract_opts,
|
|
2103
|
-
):
|
|
2104
|
-
info = info if info is not None else {}
|
|
2105
|
-
|
|
2106
|
-
return _compute_expecs_maybe_in_parallel(
|
|
2107
|
-
fn=_tn_local_expectation_loop_expansion,
|
|
2108
|
-
tn=self,
|
|
2109
|
-
terms=terms,
|
|
2110
|
-
loops=loops,
|
|
2111
|
-
intersect=intersect,
|
|
2112
|
-
return_all=return_all,
|
|
2113
|
-
executor=executor,
|
|
2114
|
-
progbar=progbar,
|
|
2115
|
-
normalized=normalized,
|
|
2116
|
-
gauges=gauges,
|
|
2117
|
-
optimize=optimize,
|
|
2118
|
-
info=info,
|
|
2119
|
-
**contract_opts,
|
|
2120
|
-
)
|
|
2121
|
-
|
|
2122
|
-
def local_expectation_cluster_expansion(
|
|
2123
|
-
self,
|
|
2124
|
-
G,
|
|
2125
|
-
where,
|
|
2126
|
-
clusters=None,
|
|
2127
|
-
gauges=None,
|
|
2128
|
-
normalized=True,
|
|
2129
|
-
autocomplete=True,
|
|
2130
|
-
autoreduce=True,
|
|
2131
|
-
optimize="auto",
|
|
2132
|
-
info=None,
|
|
2133
|
-
**contract_opts,
|
|
2134
|
-
):
|
|
2135
|
-
"""Compute the expectation of operator ``G`` at site(s) ``where`` by
|
|
2136
|
-
expanding the expectation in terms of clusters of tensors.
|
|
2137
|
-
|
|
2138
|
-
Parameters
|
|
2139
|
-
----------
|
|
2140
|
-
G : array_like
|
|
2141
|
-
The operator to compute the expectation of.
|
|
2142
|
-
where : node or sequence[node]
|
|
2143
|
-
The sites to compute the expectation at.
|
|
2144
|
-
clusters : None or sequence[sequence[node]], optional
|
|
2145
|
-
The clusters to use. If an integer, all cluster up to and including
|
|
2146
|
-
that size will be used if the cluster contains all sites in
|
|
2147
|
-
``where``. If ``None`` the maximum cluster size is set as the
|
|
2148
|
-
smallest non-trivial cluster (2-connected subgraph) found.
|
|
2149
|
-
If an explicit set of clusters is given, only these clusters are
|
|
2150
|
-
considered, but only if they contain all sites in ``where``.
|
|
2151
|
-
gauges : dict[str, array_like], optional
|
|
2152
|
-
The store of gauge bonds, the keys being indices and the values
|
|
2153
|
-
being the vectors. Only bonds present in this dictionary will be
|
|
2154
|
-
gauged.
|
|
2155
|
-
normalized : bool or "local", optional
|
|
2156
|
-
Whether to normalize the result. If "local" each cluster term is
|
|
2157
|
-
normalized separately. If ``True`` each term is normalized using
|
|
2158
|
-
a loop expansion estimate of the norm. If ``False`` no
|
|
2159
|
-
normalization is performed.
|
|
2160
|
-
optimize : str or PathOptimizer, optional
|
|
2161
|
-
The contraction path optimizer to use.
|
|
2162
|
-
info : dict, optional
|
|
2163
|
-
A dictionary to store intermediate results in to avoid recomputing
|
|
2164
|
-
them. This is useful when computing various expectations with
|
|
2165
|
-
different sets of loops. This should only be reused when both the
|
|
2166
|
-
tensor network and gauges remain the same.
|
|
2167
|
-
contract_opts
|
|
2168
|
-
Supplied to
|
|
2169
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.contract`.
|
|
2170
|
-
|
|
2171
|
-
Returns
|
|
2172
|
-
-------
|
|
2173
|
-
expec : scalar
|
|
2174
|
-
"""
|
|
2175
|
-
from quimb.experimental.belief_propagation import RegionGraph
|
|
2176
|
-
|
|
2177
|
-
info = info if info is not None else {}
|
|
2178
|
-
info.setdefault("tns", {})
|
|
2179
|
-
info.setdefault("expecs", {})
|
|
2180
|
-
|
|
2181
|
-
if isinstance(clusters, int):
|
|
2182
|
-
max_cluster_size = clusters
|
|
2183
|
-
clusters = None
|
|
2184
|
-
else:
|
|
2185
|
-
max_cluster_size = None
|
|
2186
|
-
|
|
2187
|
-
if clusters is None:
|
|
2188
|
-
clusters = tuple(
|
|
2189
|
-
self.gen_regions_sites(
|
|
2190
|
-
max_region_size=max_cluster_size,
|
|
2191
|
-
sites=where,
|
|
2192
|
-
)
|
|
2193
|
-
)
|
|
2194
|
-
else:
|
|
2195
|
-
clusters = tuple(clusters)
|
|
2196
|
-
|
|
2197
|
-
if not clusters:
|
|
2198
|
-
# always include base sites
|
|
2199
|
-
clusters = (tuple(where),)
|
|
2200
|
-
|
|
2201
|
-
if "lookup" in info and hash(clusters) == info["lookup_hash"]:
|
|
2202
|
-
# want to share info across different expectations and also
|
|
2203
|
-
# different sets of clusters -> but if the cluster set has
|
|
2204
|
-
# changed then "lookup" specifically is probably incomplete
|
|
2205
|
-
lookup = info["lookup"]
|
|
2206
|
-
else:
|
|
2207
|
-
# build cache of which coordinates are in which cluster to
|
|
2208
|
-
# avoid quadratic cluster checking cost doing every time
|
|
2209
|
-
lookup = {}
|
|
2210
|
-
for cluster in clusters:
|
|
2211
|
-
for site in cluster:
|
|
2212
|
-
lookup.setdefault(site, set()).add(cluster)
|
|
2213
|
-
|
|
2214
|
-
info["lookup"] = lookup
|
|
2215
|
-
info["lookup_hash"] = hash(clusters)
|
|
2216
|
-
|
|
2217
|
-
# get all clusters which contain *all* sites in `where`
|
|
2218
|
-
clusters = set.intersection(*(lookup[coo] for coo in where))
|
|
2219
|
-
|
|
2220
|
-
if autoreduce:
|
|
2221
|
-
neighbors = self.get_site_neighbor_map()
|
|
2222
|
-
else:
|
|
2223
|
-
neighbors = None
|
|
2224
|
-
|
|
2225
|
-
rg = RegionGraph(clusters, autocomplete=autocomplete)
|
|
2226
|
-
|
|
2227
|
-
# make sure the tree contribution is included
|
|
2228
|
-
rg.add_region(where)
|
|
2229
|
-
|
|
2230
|
-
expecs = []
|
|
2231
|
-
norms = []
|
|
2232
|
-
counts = []
|
|
2233
|
-
|
|
2234
|
-
for cluster in rg.regions:
|
|
2235
|
-
C = rg.get_count(cluster)
|
|
2236
|
-
if C == 0:
|
|
2237
|
-
# redundant cluster
|
|
2238
|
-
continue
|
|
2239
|
-
|
|
2240
|
-
if autoreduce:
|
|
2241
|
-
# check if we can map cluster to a smaller generalized loop
|
|
2242
|
-
cluster = region_remove_dangling(cluster, neighbors, where)
|
|
2243
|
-
|
|
2244
|
-
try:
|
|
2245
|
-
# have already computed this term in a different expectation
|
|
2246
|
-
# e.g. with different set of clusters
|
|
2247
|
-
expec_cluster, norm_cluster = info["expecs"][cluster, where]
|
|
2248
|
-
except KeyError:
|
|
2249
|
-
# get the gauged cluster tn
|
|
2250
|
-
try:
|
|
2251
|
-
tnl = info["tns"][cluster]
|
|
2252
|
-
except KeyError:
|
|
2253
|
-
tags = tuple(map(self.site_tag, cluster))
|
|
2254
|
-
# take copy as inserting gauges
|
|
2255
|
-
tnl = self.select_any(tags, virtual=False)
|
|
2256
|
-
tnl.gauge_simple_insert(gauges)
|
|
2257
|
-
info["tns"][cluster] = tnl
|
|
2258
|
-
|
|
2259
|
-
# compute the expectation with exact contraction
|
|
2260
|
-
expec_cluster, norm_cluster = tnl.local_expectation_exact(
|
|
2261
|
-
G,
|
|
2262
|
-
where,
|
|
2263
|
-
normalized="return",
|
|
2264
|
-
optimize=optimize,
|
|
2265
|
-
**contract_opts,
|
|
2266
|
-
)
|
|
2267
|
-
# store for efficient calls with multiple cluster sets
|
|
2268
|
-
info["expecs"][cluster, where] = expec_cluster, norm_cluster
|
|
2269
|
-
|
|
2270
|
-
expecs.append(expec_cluster)
|
|
2271
|
-
norms.append(norm_cluster)
|
|
2272
|
-
counts.append(C)
|
|
2273
|
-
|
|
2274
|
-
if normalized == "local":
|
|
2275
|
-
# each loop expectation is normalized separately
|
|
2276
|
-
expec = sum(C * e / n for C, e, n in zip(counts, expecs, norms))
|
|
2277
|
-
elif normalized == "prod":
|
|
2278
|
-
expec = prod(e**C for C, e in zip(counts, expecs))
|
|
2279
|
-
norm = prod(n**C for C, n in zip(counts, norms))
|
|
2280
|
-
expec = expec / norm
|
|
2281
|
-
elif normalized:
|
|
2282
|
-
# each term is normalized by an simulteneous normalization factor
|
|
2283
|
-
expec = sum(C * e for C, e in zip(counts, expecs))
|
|
2284
|
-
norm = sum(C * n for C, n in zip(counts, norms))
|
|
2285
|
-
expec = expec / norm
|
|
2286
|
-
else:
|
|
2287
|
-
# no normalization
|
|
2288
|
-
expec = sum(C * e for C, e in zip(counts, expecs))
|
|
2289
|
-
|
|
2290
|
-
return expec
|
|
2291
|
-
|
|
2292
|
-
def norm_cluster_expansion(
|
|
2293
|
-
self,
|
|
2294
|
-
clusters=None,
|
|
2295
|
-
autocomplete=False,
|
|
2296
|
-
autoreduce=True,
|
|
2297
|
-
gauges=None,
|
|
2298
|
-
optimize="auto",
|
|
2299
|
-
progbar=False,
|
|
2300
|
-
**contract_opts,
|
|
2301
|
-
):
|
|
2302
|
-
"""Compute the norm of this tensor network by expanding it in terms of
|
|
2303
|
-
clusters of tensors.
|
|
2304
|
-
"""
|
|
2305
|
-
from quimb.experimental.belief_propagation import RegionGraph
|
|
2306
|
-
|
|
2307
|
-
if isinstance(clusters, int):
|
|
2308
|
-
max_cluster_size = clusters
|
|
2309
|
-
clusters = None
|
|
2310
|
-
else:
|
|
2311
|
-
max_cluster_size = None
|
|
2312
|
-
|
|
2313
|
-
if clusters is None:
|
|
2314
|
-
clusters = tuple(
|
|
2315
|
-
self.gen_regions_sites(max_region_size=max_cluster_size)
|
|
2316
|
-
)
|
|
2317
|
-
else:
|
|
2318
|
-
clusters = tuple(clusters)
|
|
2319
|
-
|
|
2320
|
-
psi = self.copy()
|
|
2321
|
-
|
|
2322
|
-
# make all tree like norms 1.0 -> region intersections
|
|
2323
|
-
# which are tree like can thus be ignored
|
|
2324
|
-
nfactor = psi.normalize_simple(gauges)
|
|
2325
|
-
|
|
2326
|
-
if autoreduce:
|
|
2327
|
-
neighbors = self.get_site_neighbor_map()
|
|
2328
|
-
else:
|
|
2329
|
-
neighbors = None
|
|
2330
|
-
|
|
2331
|
-
rg = RegionGraph(clusters, autocomplete=autocomplete)
|
|
2332
|
-
for site in psi.sites:
|
|
2333
|
-
if site not in rg.lookup:
|
|
2334
|
-
# site is not covered by any cluster -> might be tree like
|
|
2335
|
-
rg.add_region({site})
|
|
2336
|
-
|
|
2337
|
-
if progbar:
|
|
2338
|
-
regions = Progbar(rg.regions)
|
|
2339
|
-
else:
|
|
2340
|
-
regions = rg.regions
|
|
2341
|
-
|
|
2342
|
-
local_norms = []
|
|
2343
|
-
for region in regions:
|
|
2344
|
-
C = rg.get_count(region)
|
|
2345
|
-
if C == 0:
|
|
2346
|
-
continue
|
|
2347
|
-
|
|
2348
|
-
if autoreduce:
|
|
2349
|
-
# check if we can map cluster to a smaller generalized loop
|
|
2350
|
-
region = region_remove_dangling(region, neighbors)
|
|
2351
|
-
|
|
2352
|
-
if not region:
|
|
2353
|
-
# region is tree like -> contributes 1.0
|
|
2354
|
-
continue
|
|
2355
|
-
|
|
2356
|
-
tags = tuple(map(psi.site_tag, region))
|
|
2357
|
-
kr = psi.select(tags, which="any", virtual=False)
|
|
2358
|
-
kr.gauge_simple_insert(gauges)
|
|
2359
|
-
|
|
2360
|
-
lni = (kr.H | kr).contract(optimize=optimize, **contract_opts)
|
|
2361
|
-
local_norms.append(do("log10", lni) * C)
|
|
2362
|
-
|
|
2363
|
-
return (10 ** sum(local_norms) * nfactor) ** 0.5
|
|
2364
|
-
|
|
2365
|
-
def compute_local_expectation_cluster_expansion(
|
|
2366
|
-
self,
|
|
2367
|
-
terms,
|
|
2368
|
-
clusters=None,
|
|
2369
|
-
*,
|
|
2370
|
-
gauges=None,
|
|
2371
|
-
normalized=True,
|
|
2372
|
-
autocomplete=True,
|
|
2373
|
-
optimize="auto",
|
|
2374
|
-
info=None,
|
|
2375
|
-
return_all=False,
|
|
2376
|
-
executor=None,
|
|
2377
|
-
progbar=False,
|
|
2378
|
-
**contract_opts,
|
|
2379
|
-
):
|
|
2380
|
-
info = info if info is not None else {}
|
|
2381
|
-
|
|
2382
|
-
if normalized == "global":
|
|
2383
|
-
nfactor = self.norm_cluster_expansion(
|
|
2384
|
-
clusters=clusters,
|
|
2385
|
-
autocomplete=autocomplete,
|
|
2386
|
-
gauges=gauges,
|
|
2387
|
-
optimize=optimize,
|
|
2388
|
-
**contract_opts,
|
|
2389
|
-
)
|
|
2390
|
-
tn = self / nfactor
|
|
2391
|
-
normalized = False
|
|
2392
|
-
else:
|
|
2393
|
-
tn = self
|
|
2394
|
-
|
|
2395
|
-
return _compute_expecs_maybe_in_parallel(
|
|
2396
|
-
fn=_tn_local_expectation_cluster_expansion,
|
|
2397
|
-
tn=tn,
|
|
2398
|
-
terms=terms,
|
|
2399
|
-
clusters=clusters,
|
|
2400
|
-
return_all=return_all,
|
|
2401
|
-
executor=executor,
|
|
2402
|
-
progbar=progbar,
|
|
2403
|
-
normalized=normalized,
|
|
2404
|
-
gauges=gauges,
|
|
2405
|
-
autocomplete=autocomplete,
|
|
2406
|
-
optimize=optimize,
|
|
2407
|
-
info=info,
|
|
2408
|
-
**contract_opts,
|
|
2409
|
-
)
|
|
2410
|
-
|
|
2411
|
-
def partial_trace(
|
|
2412
|
-
self,
|
|
2413
|
-
keep,
|
|
2414
|
-
max_bond,
|
|
2415
|
-
optimize,
|
|
2416
|
-
flatten=True,
|
|
2417
|
-
reduce=False,
|
|
2418
|
-
normalized=True,
|
|
2419
|
-
symmetrized="auto",
|
|
2420
|
-
rehearse=False,
|
|
2421
|
-
method="contract_compressed",
|
|
2422
|
-
**contract_compressed_opts,
|
|
2423
|
-
):
|
|
2424
|
-
"""Partially trace this tensor network state, keeping only the sites in
|
|
2425
|
-
``keep``, using compressed contraction.
|
|
2426
|
-
|
|
2427
|
-
Parameters
|
|
2428
|
-
----------
|
|
2429
|
-
keep : iterable of hashable
|
|
2430
|
-
The sites to keep.
|
|
2431
|
-
max_bond : int
|
|
2432
|
-
The maximum bond dimensions to use while compressed contracting.
|
|
2433
|
-
optimize : str or PathOptimizer, optional
|
|
2434
|
-
The contraction path optimizer to use, should specifically generate
|
|
2435
|
-
contractions paths designed for compressed contraction.
|
|
2436
|
-
flatten : {False, True, 'all'}, optional
|
|
2437
|
-
Whether to force 'flattening' (contracting all physical indices) of
|
|
2438
|
-
the tensor network before contraction, whilst this makes the TN
|
|
2439
|
-
generally more complex to contract, the accuracy is usually
|
|
2440
|
-
improved. If ``'all'`` also flatten the tensors in ``keep``.
|
|
2441
|
-
reduce : bool, optional
|
|
2442
|
-
Whether to first 'pull' the physical indices off their respective
|
|
2443
|
-
tensors using QR reduction. Experimental.
|
|
2444
|
-
normalized : bool, optional
|
|
2445
|
-
Whether to normalize the reduced density matrix at the end.
|
|
2446
|
-
symmetrized : {'auto', True, False}, optional
|
|
2447
|
-
Whether to symmetrize the reduced density matrix at the end. This
|
|
2448
|
-
should be unecessary if ``flatten`` is set to ``True``.
|
|
2449
|
-
rehearse : {False, 'tn', 'tree', True}, optional
|
|
2450
|
-
Whether to perform the computation or not::
|
|
2451
|
-
|
|
2452
|
-
- False: perform the computation.
|
|
2453
|
-
- 'tn': return the tensor network without running the path
|
|
2454
|
-
optimizer.
|
|
2455
|
-
- 'tree': run the path optimizer and return the
|
|
2456
|
-
``cotengra.ContractonTree``..
|
|
2457
|
-
- True: run the path optimizer and return the ``PathInfo``.
|
|
2458
|
-
|
|
2459
|
-
contract_compressed_opts : dict, optional
|
|
2460
|
-
Additional keyword arguments to pass to
|
|
2461
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.contract_compressed`.
|
|
2462
|
-
|
|
2463
|
-
Returns
|
|
2464
|
-
-------
|
|
2465
|
-
rho : array_like
|
|
2466
|
-
The reduce density matrix of sites in ``keep``.
|
|
2467
|
-
"""
|
|
2468
|
-
if symmetrized == "auto":
|
|
2469
|
-
symmetrized = not flatten
|
|
2470
|
-
|
|
2471
|
-
# form the partial trace
|
|
2472
|
-
k_inds = tuple(map(self.site_ind, keep))
|
|
2473
|
-
|
|
2474
|
-
k = self.copy()
|
|
2475
|
-
if reduce:
|
|
2476
|
-
k.reduce_inds_onto_bond(*k_inds, tags="__BOND__", drop_tags=True)
|
|
2477
|
-
|
|
2478
|
-
# b = k.conj().reindex_(dict(zip(k_inds, b_inds)))
|
|
2479
|
-
# tn = (b | k)
|
|
2480
|
-
bra_ind_id = "_bra{}"
|
|
2481
|
-
b_inds = tuple(map(bra_ind_id.format, keep))
|
|
2482
|
-
tn = k.make_reduced_density_matrix(keep, bra_ind_id=bra_ind_id)
|
|
2483
|
-
output_inds = k_inds + b_inds
|
|
2484
|
-
|
|
2485
|
-
if flatten:
|
|
2486
|
-
for site in self.gen_site_coos():
|
|
2487
|
-
if (site not in keep) or (flatten == "all"):
|
|
2488
|
-
# check if site exists still to permit e.g. local methods
|
|
2489
|
-
# to use this same logic
|
|
2490
|
-
tag = tn.site_tag(site)
|
|
2491
|
-
if tag in tn.tag_map:
|
|
2492
|
-
tn ^= tag
|
|
2493
|
-
|
|
2494
|
-
tn.fuse_multibonds_()
|
|
2495
|
-
|
|
2496
|
-
if method == "contract_compressed":
|
|
2497
|
-
if reduce:
|
|
2498
|
-
output_inds = None
|
|
2499
|
-
tn, tn_reduced = tn.partition("__BOND__", inplace=True)
|
|
2500
|
-
|
|
2501
|
-
if rehearse:
|
|
2502
|
-
return _handle_rehearse(
|
|
2503
|
-
rehearse, tn, optimize, output_inds=output_inds
|
|
2504
|
-
)
|
|
2505
|
-
|
|
2506
|
-
t_rho = tn.contract_compressed(
|
|
2507
|
-
optimize,
|
|
2508
|
-
max_bond=max_bond,
|
|
2509
|
-
output_inds=output_inds,
|
|
2510
|
-
**contract_compressed_opts,
|
|
2511
|
-
)
|
|
2512
|
-
|
|
2513
|
-
if reduce:
|
|
2514
|
-
t_rho |= tn_reduced
|
|
2515
|
-
|
|
2516
|
-
rho = t_rho.to_dense(k_inds, b_inds)
|
|
2517
|
-
|
|
2518
|
-
elif method == "contract_around":
|
|
2519
|
-
tn.contract_around_(
|
|
2520
|
-
tuple(map(self.site_tag, keep)),
|
|
2521
|
-
"any",
|
|
2522
|
-
max_bond=max_bond,
|
|
2523
|
-
**contract_compressed_opts,
|
|
2524
|
-
)
|
|
2525
|
-
|
|
2526
|
-
if rehearse:
|
|
2527
|
-
return _handle_rehearse(
|
|
2528
|
-
rehearse, tn, optimize, output_inds=output_inds
|
|
2529
|
-
)
|
|
2530
|
-
|
|
2531
|
-
rho = tn.to_dense(
|
|
2532
|
-
k_inds,
|
|
2533
|
-
b_inds,
|
|
2534
|
-
optimize=optimize,
|
|
2535
|
-
)
|
|
2536
|
-
|
|
2537
|
-
else:
|
|
2538
|
-
raise ValueError(f"Unknown method: {method}.")
|
|
2539
|
-
|
|
2540
|
-
if symmetrized:
|
|
2541
|
-
rho = (rho + dag(rho)) / 2
|
|
2542
|
-
|
|
2543
|
-
if normalized:
|
|
2544
|
-
rho = rho / do("trace", rho)
|
|
2545
|
-
|
|
2546
|
-
return rho
|
|
2547
|
-
|
|
2548
|
-
def local_expectation(
|
|
2549
|
-
self,
|
|
2550
|
-
G,
|
|
2551
|
-
where,
|
|
2552
|
-
max_bond,
|
|
2553
|
-
optimize,
|
|
2554
|
-
flatten=True,
|
|
2555
|
-
normalized=True,
|
|
2556
|
-
symmetrized="auto",
|
|
2557
|
-
reduce=False,
|
|
2558
|
-
rehearse=False,
|
|
2559
|
-
**contract_compressed_opts,
|
|
2560
|
-
):
|
|
2561
|
-
"""Compute the local expectation of operator ``G`` at site(s) ``where``
|
|
2562
|
-
by approximately contracting the full overlap tensor network.
|
|
2563
|
-
|
|
2564
|
-
Parameters
|
|
2565
|
-
----------
|
|
2566
|
-
G : array_like
|
|
2567
|
-
The local operator to compute the expectation of.
|
|
2568
|
-
where : node or sequence of nodes
|
|
2569
|
-
The sites to compute the expectation for.
|
|
2570
|
-
max_bond : int
|
|
2571
|
-
The maximum bond dimensions to use while compressed contracting.
|
|
2572
|
-
optimize : str or PathOptimizer, optional
|
|
2573
|
-
The contraction path optimizer to use, should specifically generate
|
|
2574
|
-
contractions paths designed for compressed contraction.
|
|
2575
|
-
method : {'rho', 'rho-reduced'}, optional
|
|
2576
|
-
The method to use to compute the expectation value.
|
|
2577
|
-
flatten : bool, optional
|
|
2578
|
-
Whether to force 'flattening' (contracting all physical indices) of
|
|
2579
|
-
the tensor network before contraction, whilst this makes the TN
|
|
2580
|
-
generally more complex to contract, the accuracy is usually much
|
|
2581
|
-
improved.
|
|
2582
|
-
normalized : bool, optional
|
|
2583
|
-
If computing via `partial_trace`, whether to normalize the reduced
|
|
2584
|
-
density matrix at the end.
|
|
2585
|
-
symmetrized : {'auto', True, False}, optional
|
|
2586
|
-
If computing via `partial_trace`, whether to symmetrize the reduced
|
|
2587
|
-
density matrix at the end. This should be unecessary if ``flatten``
|
|
2588
|
-
is set to ``True``.
|
|
2589
|
-
rehearse : {False, 'tn', 'tree', True}, optional
|
|
2590
|
-
Whether to perform the computation or not::
|
|
2591
|
-
|
|
2592
|
-
- False: perform the computation.
|
|
2593
|
-
- 'tn': return the tensor network without running the path
|
|
2594
|
-
optimizer.
|
|
2595
|
-
- 'tree': run the path optimizer and return the
|
|
2596
|
-
``cotengra.ContractonTree``..
|
|
2597
|
-
- True: run the path optimizer and return the ``PathInfo``.
|
|
2598
|
-
|
|
2599
|
-
contract_compressed_opts : dict, optional
|
|
2600
|
-
Additional keyword arguments to pass to
|
|
2601
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.contract_compressed`.
|
|
2602
|
-
|
|
2603
|
-
Returns
|
|
2604
|
-
-------
|
|
2605
|
-
expec : float
|
|
2606
|
-
"""
|
|
2607
|
-
rho = self.partial_trace(
|
|
2608
|
-
keep=where,
|
|
2609
|
-
max_bond=max_bond,
|
|
2610
|
-
optimize=optimize,
|
|
2611
|
-
flatten=flatten,
|
|
2612
|
-
reduce=reduce,
|
|
2613
|
-
normalized=normalized,
|
|
2614
|
-
symmetrized=symmetrized,
|
|
2615
|
-
rehearse=rehearse,
|
|
2616
|
-
**contract_compressed_opts,
|
|
2617
|
-
)
|
|
2618
|
-
if rehearse:
|
|
2619
|
-
return rho
|
|
2620
|
-
|
|
2621
|
-
return do("tensordot", rho, G, axes=((0, 1), (1, 0)))
|
|
2622
|
-
|
|
2623
|
-
def compute_local_expectation(
|
|
2624
|
-
self,
|
|
2625
|
-
terms,
|
|
2626
|
-
max_bond,
|
|
2627
|
-
optimize,
|
|
2628
|
-
*,
|
|
2629
|
-
flatten=True,
|
|
2630
|
-
normalized=True,
|
|
2631
|
-
symmetrized="auto",
|
|
2632
|
-
reduce=False,
|
|
2633
|
-
return_all=False,
|
|
2634
|
-
rehearse=False,
|
|
2635
|
-
executor=None,
|
|
2636
|
-
progbar=False,
|
|
2637
|
-
**contract_compressed_opts,
|
|
2638
|
-
):
|
|
2639
|
-
"""Compute the local expectations of many local operators, by
|
|
2640
|
-
approximately contracting the full overlap tensor network.
|
|
2641
|
-
|
|
2642
|
-
Parameters
|
|
2643
|
-
----------
|
|
2644
|
-
terms : dict[node or (node, node), array_like]
|
|
2645
|
-
The terms to compute the expectation of, with keys being the sites
|
|
2646
|
-
and values being the local operators.
|
|
2647
|
-
max_bond : int
|
|
2648
|
-
The maximum bond dimension to use during contraction.
|
|
2649
|
-
optimize : str or PathOptimizer
|
|
2650
|
-
The compressed contraction path optimizer to use.
|
|
2651
|
-
method : {'rho', 'rho-reduced'}, optional
|
|
2652
|
-
The method to use to compute the expectation value.
|
|
2653
|
-
|
|
2654
|
-
- 'rho': compute the expectation value via the reduced density
|
|
2655
|
-
matrix.
|
|
2656
|
-
- 'rho-reduced': compute the expectation value via the reduced
|
|
2657
|
-
density matrix, having reduced the physical indices onto the
|
|
2658
|
-
bonds first.
|
|
2659
|
-
|
|
2660
|
-
flatten : bool, optional
|
|
2661
|
-
Whether to force 'flattening' (contracting all physical indices) of
|
|
2662
|
-
the tensor network before contraction, whilst this makes the TN
|
|
2663
|
-
generally more complex to contract, the accuracy can often be much
|
|
2664
|
-
improved.
|
|
2665
|
-
normalized : bool, optional
|
|
2666
|
-
Whether to locally normalize the result.
|
|
2667
|
-
symmetrized : {'auto', True, False}, optional
|
|
2668
|
-
Whether to symmetrize the reduced density matrix at the end. This
|
|
2669
|
-
should be unecessary if ``flatten`` is set to ``True``.
|
|
2670
|
-
return_all : bool, optional
|
|
2671
|
-
Whether to return all results, or just the summed expectation. If
|
|
2672
|
-
``rehease is not False``, this is ignored and a dict is always
|
|
2673
|
-
returned.
|
|
2674
|
-
rehearse : {False, 'tn', 'tree', True}, optional
|
|
2675
|
-
Whether to perform the computations or not::
|
|
2676
|
-
|
|
2677
|
-
- False: perform the computation.
|
|
2678
|
-
- 'tn': return the tensor networks of each local expectation,
|
|
2679
|
-
without running the path optimizer.
|
|
2680
|
-
- 'tree': run the path optimizer and return the
|
|
2681
|
-
``cotengra.ContractonTree`` for each local expectation.
|
|
2682
|
-
- True: run the path optimizer and return the ``PathInfo`` for
|
|
2683
|
-
each local expectation.
|
|
2684
|
-
|
|
2685
|
-
executor : Executor, optional
|
|
2686
|
-
If supplied compute the terms in parallel using this executor.
|
|
2687
|
-
progbar : bool, optional
|
|
2688
|
-
Whether to show a progress bar.
|
|
2689
|
-
contract_compressed_opts
|
|
2690
|
-
Supplied to
|
|
2691
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.contract_compressed`.
|
|
2692
|
-
|
|
2693
|
-
Returns
|
|
2694
|
-
-------
|
|
2695
|
-
expecs : float or dict[node or (node, node), float]
|
|
2696
|
-
If ``return_all==False``, return the summed expectation value of
|
|
2697
|
-
the given terms. Otherwise, return a dictionary mapping each term's
|
|
2698
|
-
location to the expectation value.
|
|
2699
|
-
"""
|
|
2700
|
-
return _compute_expecs_maybe_in_parallel(
|
|
2701
|
-
fn=_tn_local_expectation,
|
|
2702
|
-
tn=self,
|
|
2703
|
-
terms=terms,
|
|
2704
|
-
return_all=return_all,
|
|
2705
|
-
executor=executor,
|
|
2706
|
-
progbar=progbar,
|
|
2707
|
-
max_bond=max_bond,
|
|
2708
|
-
optimize=optimize,
|
|
2709
|
-
normalized=normalized,
|
|
2710
|
-
symmetrized=symmetrized,
|
|
2711
|
-
reduce=reduce,
|
|
2712
|
-
flatten=flatten,
|
|
2713
|
-
rehearse=rehearse,
|
|
2714
|
-
**contract_compressed_opts,
|
|
2715
|
-
)
|
|
2716
|
-
|
|
2717
|
-
compute_local_expectation_rehearse = functools.partialmethod(
|
|
2718
|
-
compute_local_expectation, rehearse=True
|
|
2719
|
-
)
|
|
2720
|
-
|
|
2721
|
-
compute_local_expectation_tn = functools.partialmethod(
|
|
2722
|
-
compute_local_expectation, rehearse="tn"
|
|
2723
|
-
)
|
|
2724
|
-
|
|
2725
|
-
def sample_configuration_cluster(
|
|
2726
|
-
self,
|
|
2727
|
-
gauges=None,
|
|
2728
|
-
max_distance=0,
|
|
2729
|
-
fillin=0,
|
|
2730
|
-
max_iterations=100,
|
|
2731
|
-
tol=5e-6,
|
|
2732
|
-
optimize="auto-hq",
|
|
2733
|
-
seed=None,
|
|
2734
|
-
):
|
|
2735
|
-
"""Sample a configuration for this state using the simple update or
|
|
2736
|
-
cluster style environement approximation. The algorithms proceeds as
|
|
2737
|
-
a decimation:
|
|
2738
|
-
|
|
2739
|
-
1. Compute every remaining site's local density matrix.
|
|
2740
|
-
2. The site with the largest bias is sampled.
|
|
2741
|
-
3. The site is projected into this sampled local config.
|
|
2742
|
-
4. The state is regauged given the projection.
|
|
2743
|
-
5. Repeat until all sites are sampled.
|
|
2744
|
-
|
|
2745
|
-
Parameters
|
|
2746
|
-
----------
|
|
2747
|
-
gauges : dict[str, array_like], optional
|
|
2748
|
-
The store of gauge bonds, the keys being indices and the values
|
|
2749
|
-
being the vectors. Only bonds present in this dictionary will be
|
|
2750
|
-
gauged.
|
|
2751
|
-
max_distance : int, optional
|
|
2752
|
-
The maximum distance to consider when computing the local density
|
|
2753
|
-
matrix for each site. Zero meaning on the site itself, 1 meaning
|
|
2754
|
-
the site and its immediate neighbors, etc.
|
|
2755
|
-
fillin : bool or int, optional
|
|
2756
|
-
When selecting the local tensors, whether and how many times to
|
|
2757
|
-
'fill-in' corner tensors attached multiple times to the local
|
|
2758
|
-
region. On a lattice this fills in the corners. See
|
|
2759
|
-
:meth:`~quimb.tensor.tensor_core.TensorNetwork.select_local`.
|
|
2760
|
-
max_iterations : int, optional
|
|
2761
|
-
The maximum number of iterations to perform when gauging the state.
|
|
2762
|
-
tol : float, optional
|
|
2763
|
-
The tolerance to converge to when gauging the state.
|
|
2764
|
-
optimize : str or PathOptimizer, optional
|
|
2765
|
-
The contraction path optimizer to use.
|
|
2766
|
-
seed : None, int or np.random.Generator, optional
|
|
2767
|
-
A random seed or random number generator to use.
|
|
2768
|
-
|
|
2769
|
-
Returns
|
|
2770
|
-
-------
|
|
2771
|
-
config : dict[Node, int]
|
|
2772
|
-
The sampled configuration.
|
|
2773
|
-
omega : float
|
|
2774
|
-
The probability of the sampled configuration in terms of the
|
|
2775
|
-
approximate distribution induced by the cluster scheme.
|
|
2776
|
-
"""
|
|
2777
|
-
import numpy as np
|
|
2778
|
-
|
|
2779
|
-
rng = np.random.default_rng(seed)
|
|
2780
|
-
|
|
2781
|
-
psi = self.copy()
|
|
2782
|
-
gauges = gauges.copy() if gauges is not None else {}
|
|
2783
|
-
|
|
2784
|
-
# do an initial equilibration of the bond gauges
|
|
2785
|
-
psi.gauge_all_simple_(
|
|
2786
|
-
max_iterations=max_iterations,
|
|
2787
|
-
tol=tol,
|
|
2788
|
-
gauges=gauges,
|
|
2789
|
-
)
|
|
2790
|
-
|
|
2791
|
-
config = {}
|
|
2792
|
-
omega = 1.0
|
|
2793
|
-
remaining = set(psi.sites)
|
|
2794
|
-
|
|
2795
|
-
while remaining:
|
|
2796
|
-
probs = {}
|
|
2797
|
-
|
|
2798
|
-
for i in remaining:
|
|
2799
|
-
# get the approx local density matrix for each remaining site
|
|
2800
|
-
rhoi = psi.partial_trace_cluster(
|
|
2801
|
-
where=(i,),
|
|
2802
|
-
gauges=gauges,
|
|
2803
|
-
max_distance=max_distance,
|
|
2804
|
-
fillin=fillin,
|
|
2805
|
-
optimize=optimize,
|
|
2806
|
-
normalized=False,
|
|
2807
|
-
)
|
|
2808
|
-
# extract the diagonal
|
|
2809
|
-
rhoi = do("to_numpy", rhoi)
|
|
2810
|
-
pi = np.diag(rhoi).real
|
|
2811
|
-
# normalize and store
|
|
2812
|
-
probs[i] = pi / pi.sum()
|
|
2813
|
-
|
|
2814
|
-
# choose site with maximum bias to sample enxt
|
|
2815
|
-
i = max(probs, key=lambda i: np.max(probs[i]))
|
|
2816
|
-
remaining.remove(i)
|
|
2817
|
-
# get the local prob distribution at that site
|
|
2818
|
-
pmax = probs[i]
|
|
2819
|
-
# sample a local config according to the distribution
|
|
2820
|
-
xi = rng.choice(pmax.size, p=pmax)
|
|
2821
|
-
config[i] = xi
|
|
2822
|
-
# track local probability
|
|
2823
|
-
omega *= pmax[xi]
|
|
2824
|
-
# project the site into the sampled state
|
|
2825
|
-
psi.isel_({psi.site_ind(i): xi})
|
|
2826
|
-
# get the local tid, to efficiently restart the gauging
|
|
2827
|
-
tids = psi._get_tids_from_tags(i)
|
|
2828
|
-
# regauge, given the projected site
|
|
2829
|
-
psi.gauge_all_simple_(
|
|
2830
|
-
max_iterations=max_iterations,
|
|
2831
|
-
tol=tol,
|
|
2832
|
-
gauges=gauges,
|
|
2833
|
-
touched_tids=tids,
|
|
2834
|
-
)
|
|
2835
|
-
|
|
2836
|
-
return config, omega
|
|
2837
|
-
|
|
2838
|
-
|
|
2839
|
-
class TensorNetworkGenOperator(TensorNetworkGen):
|
|
2840
|
-
"""A tensor network which notionally has a single tensor and two outer
|
|
2841
|
-
indices per 'site', though these could be labelled arbitrarily and could
|
|
2842
|
-
also be linked in an arbitrary geometry by bonds. By convention, if
|
|
2843
|
-
converted to a dense matrix, the 'upper' indices would be on the left and
|
|
2844
|
-
the 'lower' indices on the right.
|
|
2845
|
-
"""
|
|
2846
|
-
|
|
2847
|
-
_EXTRA_PROPS = (
|
|
2848
|
-
"_sites",
|
|
2849
|
-
"_site_tag_id",
|
|
2850
|
-
"_upper_ind_id",
|
|
2851
|
-
"_lower_ind_id",
|
|
2852
|
-
)
|
|
2853
|
-
|
|
2854
|
-
@property
|
|
2855
|
-
def upper_ind_id(self):
|
|
2856
|
-
"""The string specifier for the upper phyiscal indices."""
|
|
2857
|
-
return self._upper_ind_id
|
|
2858
|
-
|
|
2859
|
-
def upper_ind(self, site):
|
|
2860
|
-
"""Get the upper physical index name of ``site``."""
|
|
2861
|
-
return self.upper_ind_id.format(site)
|
|
2862
|
-
|
|
2863
|
-
def reindex_upper_sites(self, new_id, where=None, inplace=False):
|
|
2864
|
-
"""Modify the upper site indices for all or some tensors in this
|
|
2865
|
-
operator tensor network (without changing the ``upper_ind_id``).
|
|
2866
|
-
|
|
2867
|
-
Parameters
|
|
2868
|
-
----------
|
|
2869
|
-
new_id : str
|
|
2870
|
-
A string with a format placeholder to accept a site, e.g. "up{}".
|
|
2871
|
-
where : None or sequence
|
|
2872
|
-
Which sites to update the index labels on. If ``None`` (default)
|
|
2873
|
-
all sites.
|
|
2874
|
-
inplace : bool
|
|
2875
|
-
Whether to reindex in place.
|
|
2876
|
-
"""
|
|
2877
|
-
if where is None:
|
|
2878
|
-
where = self.gen_sites_present()
|
|
2879
|
-
|
|
2880
|
-
return self.reindex(
|
|
2881
|
-
{self.upper_ind(x): new_id.format(x) for x in where},
|
|
2882
|
-
inplace=inplace,
|
|
2883
|
-
)
|
|
2884
|
-
|
|
2885
|
-
reindex_upper_sites_ = functools.partialmethod(
|
|
2886
|
-
reindex_upper_sites, inplace=True
|
|
2887
|
-
)
|
|
2888
|
-
|
|
2889
|
-
@upper_ind_id.setter
|
|
2890
|
-
def upper_ind_id(self, new_id):
|
|
2891
|
-
if new_id == self._lower_ind_id:
|
|
2892
|
-
raise ValueError(
|
|
2893
|
-
"Setting the same upper and upper index ids will"
|
|
2894
|
-
" make the two ambiguous."
|
|
2895
|
-
)
|
|
2896
|
-
|
|
2897
|
-
if self._upper_ind_id != new_id:
|
|
2898
|
-
self.reindex_upper_sites_(new_id)
|
|
2899
|
-
self._upper_ind_id = new_id
|
|
2900
|
-
self._upper_inds = None
|
|
2901
|
-
|
|
2902
|
-
@property
|
|
2903
|
-
def upper_inds(self):
|
|
2904
|
-
"""Return a tuple of all upper indices."""
|
|
2905
|
-
if getattr(self, "_upper_inds", None) is None:
|
|
2906
|
-
self._upper_inds = tuple(map(self.upper_ind, self.gen_site_coos()))
|
|
2907
|
-
return self._upper_inds
|
|
2908
|
-
|
|
2909
|
-
@property
|
|
2910
|
-
def upper_inds_present(self):
|
|
2911
|
-
"""Return a tuple of all upper indices still present in the tensor
|
|
2912
|
-
network.
|
|
2913
|
-
"""
|
|
2914
|
-
return tuple(map(self.upper_ind, self.gen_sites_present()))
|
|
2915
|
-
|
|
2916
|
-
@property
|
|
2917
|
-
def lower_ind_id(self):
|
|
2918
|
-
"""The string specifier for the lower phyiscal indices."""
|
|
2919
|
-
return self._lower_ind_id
|
|
2920
|
-
|
|
2921
|
-
def lower_ind(self, site):
|
|
2922
|
-
"""Get the lower physical index name of ``site``."""
|
|
2923
|
-
return self.lower_ind_id.format(site)
|
|
2924
|
-
|
|
2925
|
-
def reindex_lower_sites(self, new_id, where=None, inplace=False):
|
|
2926
|
-
"""Modify the lower site indices for all or some tensors in this
|
|
2927
|
-
operator tensor network (without changing the ``lower_ind_id``).
|
|
2928
|
-
|
|
2929
|
-
Parameters
|
|
2930
|
-
----------
|
|
2931
|
-
new_id : str
|
|
2932
|
-
A string with a format placeholder to accept a site, e.g. "up{}".
|
|
2933
|
-
where : None or sequence
|
|
2934
|
-
Which sites to update the index labels on. If ``None`` (default)
|
|
2935
|
-
all sites.
|
|
2936
|
-
inplace : bool
|
|
2937
|
-
Whether to reindex in place.
|
|
2938
|
-
"""
|
|
2939
|
-
if where is None:
|
|
2940
|
-
where = self.gen_sites_present()
|
|
2941
|
-
|
|
2942
|
-
return self.reindex(
|
|
2943
|
-
{self.lower_ind(x): new_id.format(x) for x in where},
|
|
2944
|
-
inplace=inplace,
|
|
2945
|
-
)
|
|
2946
|
-
|
|
2947
|
-
reindex_lower_sites_ = functools.partialmethod(
|
|
2948
|
-
reindex_lower_sites, inplace=True
|
|
2949
|
-
)
|
|
2950
|
-
|
|
2951
|
-
@lower_ind_id.setter
|
|
2952
|
-
def lower_ind_id(self, new_id):
|
|
2953
|
-
if new_id == self._upper_ind_id:
|
|
2954
|
-
raise ValueError(
|
|
2955
|
-
"Setting the same upper and lower index ids will"
|
|
2956
|
-
" make the two ambiguous."
|
|
2957
|
-
)
|
|
2958
|
-
|
|
2959
|
-
if self._lower_ind_id != new_id:
|
|
2960
|
-
self.reindex_lower_sites_(new_id)
|
|
2961
|
-
self._lower_ind_id = new_id
|
|
2962
|
-
self._lower_inds = None
|
|
2963
|
-
|
|
2964
|
-
@property
|
|
2965
|
-
def lower_inds(self):
|
|
2966
|
-
"""Return a tuple of all lower indices."""
|
|
2967
|
-
if getattr(self, "_lower_inds", None) is None:
|
|
2968
|
-
self._lower_inds = tuple(map(self.lower_ind, self.gen_site_coos()))
|
|
2969
|
-
return self._lower_inds
|
|
2970
|
-
|
|
2971
|
-
@property
|
|
2972
|
-
def lower_inds_present(self):
|
|
2973
|
-
"""Return a tuple of all lower indices still present in the tensor
|
|
2974
|
-
network.
|
|
2975
|
-
"""
|
|
2976
|
-
return tuple(map(self.lower_ind, self.gen_sites_present()))
|
|
2977
|
-
|
|
2978
|
-
def to_dense(self, *inds_seq, to_qarray=False, **contract_opts):
|
|
2979
|
-
"""Contract this tensor network 'operator' into a dense array.
|
|
2980
|
-
|
|
2981
|
-
Parameters
|
|
2982
|
-
----------
|
|
2983
|
-
inds_seq : sequence of sequences of str
|
|
2984
|
-
How to group the site indices into the dense array. By default,
|
|
2985
|
-
use a single group ordered like ``sites``, but only containing
|
|
2986
|
-
those sites which are still present.
|
|
2987
|
-
to_qarray : bool
|
|
2988
|
-
Whether to turn the dense array into a ``qarray``, if the backend
|
|
2989
|
-
would otherwise be ``'numpy'``.
|
|
2990
|
-
contract_opts
|
|
2991
|
-
Options to pass to
|
|
2992
|
-
:meth:`~quimb.tensor.tensor_core.TensorNewtork.contract`.
|
|
2993
|
-
|
|
2994
|
-
Returns
|
|
2995
|
-
-------
|
|
2996
|
-
array
|
|
2997
|
-
"""
|
|
2998
|
-
if not inds_seq:
|
|
2999
|
-
inds_seq = (self.upper_inds_present, self.lower_inds_present)
|
|
3000
|
-
|
|
3001
|
-
return TensorNetwork.to_dense(
|
|
3002
|
-
self, *inds_seq, to_qarray=to_qarray, **contract_opts
|
|
3003
|
-
)
|
|
3004
|
-
|
|
3005
|
-
to_qarray = functools.partialmethod(to_dense, to_qarray=True)
|
|
3006
|
-
|
|
3007
|
-
def phys_dim(self, site=None, which="upper"):
|
|
3008
|
-
"""Get the physical dimension of ``site``."""
|
|
3009
|
-
if site is None:
|
|
3010
|
-
site = next(iter(self.gen_sites_present()))
|
|
3011
|
-
|
|
3012
|
-
if which == "upper":
|
|
3013
|
-
return self[site].ind_size(self.upper_ind(site))
|
|
3014
|
-
|
|
3015
|
-
if which == "lower":
|
|
3016
|
-
return self[site].ind_size(self.lower_ind(site))
|
|
3017
|
-
|
|
3018
|
-
def gate_upper_with_op_lazy(
|
|
3019
|
-
self,
|
|
3020
|
-
A,
|
|
3021
|
-
transpose=False,
|
|
3022
|
-
inplace=False,
|
|
3023
|
-
):
|
|
3024
|
-
r"""Act lazily with the operator tensor network ``A``, which should
|
|
3025
|
-
have matching structure, on this operator tensor network (``B``), like
|
|
3026
|
-
``A @ B``. The returned tensor network will have the same structure as
|
|
3027
|
-
this one, but with the operator gated in lazily, i.e. uncontracted.
|
|
3028
|
-
|
|
3029
|
-
.. math::
|
|
3030
|
-
|
|
3031
|
-
B \rightarrow A B
|
|
3032
|
-
|
|
3033
|
-
or (if ``transpose=True``):
|
|
3034
|
-
|
|
3035
|
-
.. math::
|
|
3036
|
-
|
|
3037
|
-
B \rightarrow A^T B
|
|
3038
|
-
|
|
3039
|
-
Parameters
|
|
3040
|
-
----------
|
|
3041
|
-
A : TensorNetworkGenOperator
|
|
3042
|
-
The operator tensor network to gate with, or apply to this tensor
|
|
3043
|
-
network.
|
|
3044
|
-
transpose : bool, optional
|
|
3045
|
-
Whether to contract the lower or upper indices of ``A`` with the
|
|
3046
|
-
upper indices of ``B``. If ``False`` (the default), the lower
|
|
3047
|
-
indices of ``A`` will be contracted with the upper indices of
|
|
3048
|
-
``B``, if ``True`` the upper indices of ``A`` will be
|
|
3049
|
-
contracted with the upper indices of ``B``, which is like applying
|
|
3050
|
-
the transpose first.
|
|
3051
|
-
inplace : bool, optional
|
|
3052
|
-
Whether to perform the gate operation inplace on this tensor
|
|
3053
|
-
network.
|
|
3054
|
-
|
|
3055
|
-
Returns
|
|
3056
|
-
-------
|
|
3057
|
-
TensorNetworkGenOperator
|
|
3058
|
-
"""
|
|
3059
|
-
return tensor_network_apply_op_op(
|
|
3060
|
-
A=A,
|
|
3061
|
-
B=self,
|
|
3062
|
-
which_A="upper" if transpose else "lower",
|
|
3063
|
-
which_B="upper",
|
|
3064
|
-
contract=False,
|
|
3065
|
-
inplace=inplace,
|
|
3066
|
-
)
|
|
3067
|
-
|
|
3068
|
-
gate_upper_with_op_lazy_ = functools.partialmethod(
|
|
3069
|
-
gate_upper_with_op_lazy, inplace=True
|
|
3070
|
-
)
|
|
3071
|
-
|
|
3072
|
-
def gate_lower_with_op_lazy(
|
|
3073
|
-
self,
|
|
3074
|
-
A,
|
|
3075
|
-
transpose=False,
|
|
3076
|
-
inplace=False,
|
|
3077
|
-
):
|
|
3078
|
-
r"""Act lazily 'from the right' with the operator tensor network ``A``,
|
|
3079
|
-
which should have matching structure, on this operator tensor network
|
|
3080
|
-
(``B``), like ``B @ A``. The returned tensor network will have the same
|
|
3081
|
-
structure as this one, but with the operator gated in lazily, i.e.
|
|
3082
|
-
uncontracted.
|
|
3083
|
-
|
|
3084
|
-
.. math::
|
|
3085
|
-
|
|
3086
|
-
B \rightarrow B A
|
|
3087
|
-
|
|
3088
|
-
or (if ``transpose=True``):
|
|
3089
|
-
|
|
3090
|
-
.. math::
|
|
3091
|
-
|
|
3092
|
-
B \rightarrow B A^T
|
|
3093
|
-
|
|
3094
|
-
Parameters
|
|
3095
|
-
----------
|
|
3096
|
-
A : TensorNetworkGenOperator
|
|
3097
|
-
The operator tensor network to gate with, or apply to this tensor
|
|
3098
|
-
network.
|
|
3099
|
-
transpose : bool, optional
|
|
3100
|
-
Whether to contract the upper or lower indices of ``A`` with the
|
|
3101
|
-
lower indices of this TN. If ``False`` (the default), the upper
|
|
3102
|
-
indices of ``A`` will be contracted with the lower indices of
|
|
3103
|
-
``B``, if ``True`` the lower indices of ``A`` will be contracted
|
|
3104
|
-
with the lower indices of this TN, which is like applying the
|
|
3105
|
-
transpose first.
|
|
3106
|
-
inplace : bool, optional
|
|
3107
|
-
Whether to perform the gate operation inplace on this tensor
|
|
3108
|
-
network.
|
|
3109
|
-
|
|
3110
|
-
Returns
|
|
3111
|
-
-------
|
|
3112
|
-
TensorNetworkGenOperator
|
|
3113
|
-
"""
|
|
3114
|
-
return tensor_network_apply_op_op(
|
|
3115
|
-
B=self,
|
|
3116
|
-
A=A,
|
|
3117
|
-
which_A="lower" if transpose else "upper",
|
|
3118
|
-
which_B="lower",
|
|
3119
|
-
contract=False,
|
|
3120
|
-
inplace=inplace,
|
|
3121
|
-
)
|
|
3122
|
-
|
|
3123
|
-
gate_lower_with_op_lazy_ = functools.partialmethod(
|
|
3124
|
-
gate_lower_with_op_lazy, inplace=True
|
|
3125
|
-
)
|
|
3126
|
-
|
|
3127
|
-
def gate_sandwich_with_op_lazy(
|
|
3128
|
-
self,
|
|
3129
|
-
A,
|
|
3130
|
-
inplace=False,
|
|
3131
|
-
):
|
|
3132
|
-
r"""Act lazily with the operator tensor network ``A``, which should
|
|
3133
|
-
have matching structure, on this operator tensor network (``B``), like
|
|
3134
|
-
:math:`B \rightarrow A B A^\dagger`. The returned tensor network will
|
|
3135
|
-
have the same structure as this one, but with the operator gated in
|
|
3136
|
-
lazily, i.e. uncontracted.
|
|
3137
|
-
|
|
3138
|
-
Parameters
|
|
3139
|
-
----------
|
|
3140
|
-
A : TensorNetworkGenOperator
|
|
3141
|
-
The operator tensor network to gate with, or apply to this tensor
|
|
3142
|
-
network.
|
|
3143
|
-
inplace : bool, optional
|
|
3144
|
-
Whether to perform the gate operation inplace on this tensor
|
|
3145
|
-
|
|
3146
|
-
Returns
|
|
3147
|
-
-------
|
|
3148
|
-
TensorNetworkGenOperator
|
|
3149
|
-
"""
|
|
3150
|
-
B = self if inplace else self.copy()
|
|
3151
|
-
B.gate_upper_with_op_lazy_(A)
|
|
3152
|
-
B.gate_lower_with_op_lazy_(A.conj(), transpose=True)
|
|
3153
|
-
return B
|
|
3154
|
-
|
|
3155
|
-
gate_sandwich_with_op_lazy_ = functools.partialmethod(
|
|
3156
|
-
gate_sandwich_with_op_lazy, inplace=True
|
|
3157
|
-
)
|
|
3158
|
-
|
|
3159
|
-
|
|
3160
|
-
def _handle_rehearse(rehearse, tn, optimize, **kwargs):
|
|
3161
|
-
if rehearse is True:
|
|
3162
|
-
tree = tn.contraction_tree(optimize, **kwargs)
|
|
3163
|
-
return {
|
|
3164
|
-
"tn": tn,
|
|
3165
|
-
"tree": tree,
|
|
3166
|
-
"W": tree.contraction_width(log=2),
|
|
3167
|
-
"C": tree.contraction_cost(log=10),
|
|
3168
|
-
}
|
|
3169
|
-
if rehearse == "tn":
|
|
3170
|
-
return tn
|
|
3171
|
-
if rehearse == "tree":
|
|
3172
|
-
return tn.contraction_tree(optimize, **kwargs)
|
|
3173
|
-
if rehearse == "info":
|
|
3174
|
-
return tn.contraction_info(optimize, **kwargs)
|
|
3175
|
-
|
|
3176
|
-
|
|
3177
|
-
def _compute_expecs_maybe_in_parallel(
|
|
3178
|
-
fn,
|
|
3179
|
-
tn,
|
|
3180
|
-
terms,
|
|
3181
|
-
return_all=False,
|
|
3182
|
-
executor=None,
|
|
3183
|
-
progbar=False,
|
|
3184
|
-
**kwargs,
|
|
3185
|
-
):
|
|
3186
|
-
"""Unified helper function for the various methods that compute many
|
|
3187
|
-
expectations, possibly in parallel, possibly with a progress bar.
|
|
3188
|
-
"""
|
|
3189
|
-
if not isinstance(terms, dict):
|
|
3190
|
-
terms = dict(terms.items())
|
|
3191
|
-
|
|
3192
|
-
if executor is None:
|
|
3193
|
-
results = (fn(tn, G, where, **kwargs) for where, G in terms.items())
|
|
3194
|
-
else:
|
|
3195
|
-
if hasattr(executor, "scatter"):
|
|
3196
|
-
tn = executor.scatter(tn)
|
|
3197
|
-
|
|
3198
|
-
futures = [
|
|
3199
|
-
executor.submit(fn, tn, G, where, **kwargs)
|
|
3200
|
-
for where, G in terms.items()
|
|
3201
|
-
]
|
|
3202
|
-
results = (future.result() for future in futures)
|
|
3203
|
-
|
|
3204
|
-
if progbar:
|
|
3205
|
-
results = Progbar(results, total=len(terms))
|
|
3206
|
-
|
|
3207
|
-
expecs = dict(zip(terms.keys(), results))
|
|
3208
|
-
|
|
3209
|
-
if return_all or kwargs.get("rehearse", False):
|
|
3210
|
-
return expecs
|
|
3211
|
-
|
|
3212
|
-
return functools.reduce(add, expecs.values())
|
|
3213
|
-
|
|
3214
|
-
|
|
3215
|
-
def _tn_local_expectation(tn, *args, **kwargs):
|
|
3216
|
-
"""Define as function for pickleability."""
|
|
3217
|
-
return tn.local_expectation(*args, **kwargs)
|
|
3218
|
-
|
|
3219
|
-
|
|
3220
|
-
def _tn_local_expectation_cluster(tn, *args, **kwargs):
|
|
3221
|
-
"""Define as function for pickleability."""
|
|
3222
|
-
return tn.local_expectation_cluster(*args, **kwargs)
|
|
3223
|
-
|
|
3224
|
-
|
|
3225
|
-
def _tn_local_expectation_exact(tn, *args, **kwargs):
|
|
3226
|
-
"""Define as function for pickleability."""
|
|
3227
|
-
return tn.local_expectation_exact(*args, **kwargs)
|
|
3228
|
-
|
|
3229
|
-
|
|
3230
|
-
def _tn_local_expectation_loop_expansion(tn, *args, **kwargs):
|
|
3231
|
-
"""Define as function for pickleability."""
|
|
3232
|
-
return tn.local_expectation_loop_expansion(*args, **kwargs)
|
|
3233
|
-
|
|
3234
|
-
|
|
3235
|
-
def _tn_local_expectation_cluster_expansion(tn, *args, **kwargs):
|
|
3236
|
-
"""Define as function for pickleability."""
|
|
3237
|
-
return tn.local_expectation_cluster_expansion(*args, **kwargs)
|