Trajectree 0.0.0__py3-none-any.whl → 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- trajectree/__init__.py +3 -0
- trajectree/fock_optics/devices.py +1 -1
- trajectree/fock_optics/light_sources.py +2 -2
- trajectree/fock_optics/measurement.py +3 -3
- trajectree/fock_optics/utils.py +6 -6
- trajectree/quimb/docs/_pygments/_pygments_dark.py +118 -0
- trajectree/quimb/docs/_pygments/_pygments_light.py +118 -0
- trajectree/quimb/docs/conf.py +158 -0
- trajectree/quimb/docs/examples/ex_mpi_expm_evo.py +62 -0
- trajectree/quimb/quimb/__init__.py +507 -0
- trajectree/quimb/quimb/calc.py +1491 -0
- trajectree/quimb/quimb/core.py +2279 -0
- trajectree/quimb/quimb/evo.py +712 -0
- trajectree/quimb/quimb/experimental/__init__.py +0 -0
- trajectree/quimb/quimb/experimental/autojittn.py +129 -0
- trajectree/quimb/quimb/experimental/belief_propagation/__init__.py +109 -0
- trajectree/quimb/quimb/experimental/belief_propagation/bp_common.py +397 -0
- trajectree/quimb/quimb/experimental/belief_propagation/d1bp.py +316 -0
- trajectree/quimb/quimb/experimental/belief_propagation/d2bp.py +653 -0
- trajectree/quimb/quimb/experimental/belief_propagation/hd1bp.py +571 -0
- trajectree/quimb/quimb/experimental/belief_propagation/hv1bp.py +775 -0
- trajectree/quimb/quimb/experimental/belief_propagation/l1bp.py +316 -0
- trajectree/quimb/quimb/experimental/belief_propagation/l2bp.py +537 -0
- trajectree/quimb/quimb/experimental/belief_propagation/regions.py +194 -0
- trajectree/quimb/quimb/experimental/cluster_update.py +286 -0
- trajectree/quimb/quimb/experimental/merabuilder.py +865 -0
- trajectree/quimb/quimb/experimental/operatorbuilder/__init__.py +15 -0
- trajectree/quimb/quimb/experimental/operatorbuilder/operatorbuilder.py +1631 -0
- trajectree/quimb/quimb/experimental/schematic.py +7 -0
- trajectree/quimb/quimb/experimental/tn_marginals.py +130 -0
- trajectree/quimb/quimb/experimental/tnvmc.py +1483 -0
- trajectree/quimb/quimb/gates.py +36 -0
- trajectree/quimb/quimb/gen/__init__.py +2 -0
- trajectree/quimb/quimb/gen/operators.py +1167 -0
- trajectree/quimb/quimb/gen/rand.py +713 -0
- trajectree/quimb/quimb/gen/states.py +479 -0
- trajectree/quimb/quimb/linalg/__init__.py +6 -0
- trajectree/quimb/quimb/linalg/approx_spectral.py +1109 -0
- trajectree/quimb/quimb/linalg/autoblock.py +258 -0
- trajectree/quimb/quimb/linalg/base_linalg.py +719 -0
- trajectree/quimb/quimb/linalg/mpi_launcher.py +397 -0
- trajectree/quimb/quimb/linalg/numpy_linalg.py +244 -0
- trajectree/quimb/quimb/linalg/rand_linalg.py +514 -0
- trajectree/quimb/quimb/linalg/scipy_linalg.py +293 -0
- trajectree/quimb/quimb/linalg/slepc_linalg.py +892 -0
- trajectree/quimb/quimb/schematic.py +1518 -0
- trajectree/quimb/quimb/tensor/__init__.py +401 -0
- trajectree/quimb/quimb/tensor/array_ops.py +610 -0
- trajectree/quimb/quimb/tensor/circuit.py +4824 -0
- trajectree/quimb/quimb/tensor/circuit_gen.py +411 -0
- trajectree/quimb/quimb/tensor/contraction.py +336 -0
- trajectree/quimb/quimb/tensor/decomp.py +1255 -0
- trajectree/quimb/quimb/tensor/drawing.py +1646 -0
- trajectree/quimb/quimb/tensor/fitting.py +385 -0
- trajectree/quimb/quimb/tensor/geometry.py +583 -0
- trajectree/quimb/quimb/tensor/interface.py +114 -0
- trajectree/quimb/quimb/tensor/networking.py +1058 -0
- trajectree/quimb/quimb/tensor/optimize.py +1818 -0
- trajectree/quimb/quimb/tensor/tensor_1d.py +4778 -0
- trajectree/quimb/quimb/tensor/tensor_1d_compress.py +1854 -0
- trajectree/quimb/quimb/tensor/tensor_1d_tebd.py +662 -0
- trajectree/quimb/quimb/tensor/tensor_2d.py +5954 -0
- trajectree/quimb/quimb/tensor/tensor_2d_compress.py +96 -0
- trajectree/quimb/quimb/tensor/tensor_2d_tebd.py +1230 -0
- trajectree/quimb/quimb/tensor/tensor_3d.py +2869 -0
- trajectree/quimb/quimb/tensor/tensor_3d_tebd.py +46 -0
- trajectree/quimb/quimb/tensor/tensor_approx_spectral.py +60 -0
- trajectree/quimb/quimb/tensor/tensor_arbgeom.py +3237 -0
- trajectree/quimb/quimb/tensor/tensor_arbgeom_compress.py +565 -0
- trajectree/quimb/quimb/tensor/tensor_arbgeom_tebd.py +1138 -0
- trajectree/quimb/quimb/tensor/tensor_builder.py +5411 -0
- trajectree/quimb/quimb/tensor/tensor_core.py +11179 -0
- trajectree/quimb/quimb/tensor/tensor_dmrg.py +1472 -0
- trajectree/quimb/quimb/tensor/tensor_mera.py +204 -0
- trajectree/quimb/quimb/utils.py +892 -0
- trajectree/quimb/tests/__init__.py +0 -0
- trajectree/quimb/tests/test_accel.py +501 -0
- trajectree/quimb/tests/test_calc.py +788 -0
- trajectree/quimb/tests/test_core.py +847 -0
- trajectree/quimb/tests/test_evo.py +565 -0
- trajectree/quimb/tests/test_gen/__init__.py +0 -0
- trajectree/quimb/tests/test_gen/test_operators.py +361 -0
- trajectree/quimb/tests/test_gen/test_rand.py +296 -0
- trajectree/quimb/tests/test_gen/test_states.py +261 -0
- trajectree/quimb/tests/test_linalg/__init__.py +0 -0
- trajectree/quimb/tests/test_linalg/test_approx_spectral.py +368 -0
- trajectree/quimb/tests/test_linalg/test_base_linalg.py +351 -0
- trajectree/quimb/tests/test_linalg/test_mpi_linalg.py +127 -0
- trajectree/quimb/tests/test_linalg/test_numpy_linalg.py +84 -0
- trajectree/quimb/tests/test_linalg/test_rand_linalg.py +134 -0
- trajectree/quimb/tests/test_linalg/test_slepc_linalg.py +283 -0
- trajectree/quimb/tests/test_tensor/__init__.py +0 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/__init__.py +0 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d1bp.py +39 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d2bp.py +67 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hd1bp.py +64 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hv1bp.py +51 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l1bp.py +142 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l2bp.py +101 -0
- trajectree/quimb/tests/test_tensor/test_circuit.py +816 -0
- trajectree/quimb/tests/test_tensor/test_contract.py +67 -0
- trajectree/quimb/tests/test_tensor/test_decomp.py +40 -0
- trajectree/quimb/tests/test_tensor/test_mera.py +52 -0
- trajectree/quimb/tests/test_tensor/test_optimizers.py +488 -0
- trajectree/quimb/tests/test_tensor/test_tensor_1d.py +1171 -0
- trajectree/quimb/tests/test_tensor/test_tensor_2d.py +606 -0
- trajectree/quimb/tests/test_tensor/test_tensor_2d_tebd.py +144 -0
- trajectree/quimb/tests/test_tensor/test_tensor_3d.py +123 -0
- trajectree/quimb/tests/test_tensor/test_tensor_arbgeom.py +226 -0
- trajectree/quimb/tests/test_tensor/test_tensor_builder.py +441 -0
- trajectree/quimb/tests/test_tensor/test_tensor_core.py +2066 -0
- trajectree/quimb/tests/test_tensor/test_tensor_dmrg.py +388 -0
- trajectree/quimb/tests/test_tensor/test_tensor_spectral_approx.py +63 -0
- trajectree/quimb/tests/test_tensor/test_tensor_tebd.py +270 -0
- trajectree/quimb/tests/test_utils.py +85 -0
- trajectree/trajectory.py +2 -2
- {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/METADATA +2 -2
- trajectree-0.0.1.dist-info/RECORD +126 -0
- trajectree-0.0.0.dist-info/RECORD +0 -16
- {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/WHEEL +0 -0
- {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/licenses/LICENSE +0 -0
- {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,2066 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
import operator
|
|
3
|
+
import importlib
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
from numpy.testing import assert_allclose
|
|
7
|
+
import scipy.sparse.linalg as spla
|
|
8
|
+
import autoray as ar
|
|
9
|
+
|
|
10
|
+
import quimb as qu
|
|
11
|
+
import quimb.tensor as qtn
|
|
12
|
+
from quimb.tensor import (
|
|
13
|
+
bonds,
|
|
14
|
+
MPS_rand_state,
|
|
15
|
+
oset,
|
|
16
|
+
rand_tensor,
|
|
17
|
+
tensor_contract,
|
|
18
|
+
tensor_direct_product,
|
|
19
|
+
Tensor,
|
|
20
|
+
TensorNetwork,
|
|
21
|
+
TensorNetwork1D,
|
|
22
|
+
TNLinearOperator1D,
|
|
23
|
+
)
|
|
24
|
+
from quimb.tensor.decomp import _compute_number_svals_to_keep_numba
|
|
25
|
+
|
|
26
|
+
requires_autograd = pytest.mark.skipif(
|
|
27
|
+
importlib.util.find_spec("autograd") is None,
|
|
28
|
+
reason="autograd not installed",
|
|
29
|
+
)
|
|
30
|
+
requires_cotengra = pytest.mark.skipif(
|
|
31
|
+
importlib.util.find_spec("cotengra") is None,
|
|
32
|
+
reason="cotengra not installed",
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def test_trim_singular_vals():
|
|
37
|
+
s = np.array([3.0, 2.0, 1.0, 0.1])
|
|
38
|
+
assert _compute_number_svals_to_keep_numba(s, 0.5, 1) == 3
|
|
39
|
+
assert _compute_number_svals_to_keep_numba(s, 0.5, 2) == 2
|
|
40
|
+
assert _compute_number_svals_to_keep_numba(s, 2, 3) == 2
|
|
41
|
+
assert _compute_number_svals_to_keep_numba(s, 5.02, 3) == 1
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class TestBasicTensorOperations:
|
|
45
|
+
def test_tensor_construct(self):
|
|
46
|
+
x = np.random.randn(2, 3, 4)
|
|
47
|
+
a = Tensor(x, inds=[0, 1, 2], tags="blue")
|
|
48
|
+
assert_allclose(a.H.data, x.conj())
|
|
49
|
+
assert a.size == 24
|
|
50
|
+
|
|
51
|
+
with pytest.raises(ValueError):
|
|
52
|
+
Tensor(x, inds=[0, 2], tags="blue")
|
|
53
|
+
|
|
54
|
+
assert repr(a) == (
|
|
55
|
+
"Tensor(shape=(2, 3, 4), " "inds=(0, 1, 2), tags=oset(['blue']))"
|
|
56
|
+
)
|
|
57
|
+
assert str(a) == (
|
|
58
|
+
"Tensor(shape=(2, 3, 4), inds=(0, 1, 2), "
|
|
59
|
+
"tags=oset(['blue']), backend='numpy', "
|
|
60
|
+
"dtype='float64')"
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
def test_tensor_copy(self):
|
|
64
|
+
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2], tags="blue")
|
|
65
|
+
b = a.copy()
|
|
66
|
+
b.check()
|
|
67
|
+
b.add_tag("foo")
|
|
68
|
+
assert "foo" not in a.tags
|
|
69
|
+
b.data[:] = b.data / 2
|
|
70
|
+
# still reference the same underlying array
|
|
71
|
+
assert_allclose(a.data, b.data)
|
|
72
|
+
|
|
73
|
+
def test_tensor_deep_copy(self):
|
|
74
|
+
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2], tags="blue")
|
|
75
|
+
b = a.copy(deep=True)
|
|
76
|
+
b.add_tag("foo")
|
|
77
|
+
assert "foo" not in a.tags
|
|
78
|
+
b.data[:] = b.data / 2
|
|
79
|
+
# still reference the same underlying array
|
|
80
|
+
assert_allclose(a.data / 2, b.data)
|
|
81
|
+
|
|
82
|
+
def test_with_alpha_construct(self):
|
|
83
|
+
x = np.random.randn(2, 3, 4)
|
|
84
|
+
a = Tensor(x, inds="ijk", tags="blue")
|
|
85
|
+
assert_allclose(a.H.data, x.conj())
|
|
86
|
+
assert a.size == 24
|
|
87
|
+
|
|
88
|
+
with pytest.raises(ValueError):
|
|
89
|
+
Tensor(x, inds="ij", tags="blue")
|
|
90
|
+
|
|
91
|
+
x = np.random.randn(2, 3, 4)
|
|
92
|
+
a = Tensor(x, inds=["a1", "b2", "c3"], tags="blue")
|
|
93
|
+
assert_allclose(a.H.data, x.conj())
|
|
94
|
+
assert a.size == 24
|
|
95
|
+
|
|
96
|
+
with pytest.raises(ValueError):
|
|
97
|
+
Tensor(x, inds=["ijk"], tags="blue")
|
|
98
|
+
|
|
99
|
+
def test_arithmetic_scalar(self):
|
|
100
|
+
x = np.random.randn(2, 3, 4)
|
|
101
|
+
a = Tensor(x, inds=[0, 1, 2], tags="blue")
|
|
102
|
+
assert_allclose((a + 2).data, x + 2)
|
|
103
|
+
assert_allclose((a - 3).data, x - 3)
|
|
104
|
+
assert_allclose((a * 4).data, x * 4)
|
|
105
|
+
assert_allclose((a / 5).data, x / 5)
|
|
106
|
+
assert_allclose((a**2).data, x**2)
|
|
107
|
+
assert_allclose((2 + a).data, 2 + x)
|
|
108
|
+
assert_allclose((3 - a).data, 3 - x)
|
|
109
|
+
assert_allclose((4 * a).data, 4 * x)
|
|
110
|
+
assert_allclose((5 / a).data, 5 / x)
|
|
111
|
+
assert_allclose((5**a).data, 5**x)
|
|
112
|
+
|
|
113
|
+
@pytest.mark.parametrize(
|
|
114
|
+
"op",
|
|
115
|
+
[
|
|
116
|
+
operator.__add__,
|
|
117
|
+
operator.__sub__,
|
|
118
|
+
operator.__mul__,
|
|
119
|
+
operator.__pow__,
|
|
120
|
+
operator.__truediv__,
|
|
121
|
+
],
|
|
122
|
+
)
|
|
123
|
+
@pytest.mark.parametrize("mismatch", (True, False))
|
|
124
|
+
def test_tensor_tensor_arithmetic(self, op, mismatch):
|
|
125
|
+
a = Tensor(np.random.rand(2, 3, 4), inds=[0, 1, 2], tags="blue")
|
|
126
|
+
b = Tensor(np.random.rand(2, 3, 4), inds=[0, 1, 2], tags="red")
|
|
127
|
+
if mismatch:
|
|
128
|
+
b.modify(inds=(0, 1, 3))
|
|
129
|
+
with pytest.raises(ValueError):
|
|
130
|
+
op(a, b)
|
|
131
|
+
else:
|
|
132
|
+
c = op(a, b)
|
|
133
|
+
assert_allclose(c.data, op(a.data, b.data))
|
|
134
|
+
|
|
135
|
+
def test_tensor_conj_inplace(self):
|
|
136
|
+
data = np.random.rand(2, 3, 4) + 1.0j * np.random.rand(2, 3, 4)
|
|
137
|
+
a = Tensor(data, inds=[0, 1, 2], tags="blue")
|
|
138
|
+
a.conj_()
|
|
139
|
+
assert_allclose(data.conj(), a.data)
|
|
140
|
+
|
|
141
|
+
def test_contract_some(self):
|
|
142
|
+
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2])
|
|
143
|
+
b = Tensor(np.random.randn(3, 4, 5), inds=[1, 2, 3])
|
|
144
|
+
|
|
145
|
+
assert a.shared_bond_size(b) == 12
|
|
146
|
+
|
|
147
|
+
c = a @ b
|
|
148
|
+
c.check()
|
|
149
|
+
|
|
150
|
+
assert isinstance(c, Tensor)
|
|
151
|
+
assert c.shape == (2, 5)
|
|
152
|
+
assert c.inds == (0, 3)
|
|
153
|
+
|
|
154
|
+
def test_contract_all(self):
|
|
155
|
+
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2])
|
|
156
|
+
b = Tensor(np.random.randn(3, 4, 2), inds=[1, 2, 0])
|
|
157
|
+
c = a @ b
|
|
158
|
+
assert isinstance(c, float)
|
|
159
|
+
assert not isinstance(c, Tensor)
|
|
160
|
+
|
|
161
|
+
def test_contract_None(self):
|
|
162
|
+
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2])
|
|
163
|
+
b = Tensor(np.random.randn(3, 4, 5), inds=[3, 4, 5])
|
|
164
|
+
c = a @ b
|
|
165
|
+
c.check()
|
|
166
|
+
assert c.shape == (2, 3, 4, 3, 4, 5)
|
|
167
|
+
assert c.inds == (0, 1, 2, 3, 4, 5)
|
|
168
|
+
|
|
169
|
+
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2])
|
|
170
|
+
b = Tensor(np.random.randn(3, 4, 5), inds=[5, 4, 3])
|
|
171
|
+
c = a @ b
|
|
172
|
+
|
|
173
|
+
assert c.shape == (2, 3, 4, 3, 4, 5)
|
|
174
|
+
assert c.inds == (0, 1, 2, 5, 4, 3)
|
|
175
|
+
|
|
176
|
+
def test_raise_on_triple_inds(self):
|
|
177
|
+
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2])
|
|
178
|
+
b = Tensor(np.random.randn(3, 4, 5), inds=[1, 1, 2])
|
|
179
|
+
with pytest.raises(ValueError):
|
|
180
|
+
a @ b
|
|
181
|
+
|
|
182
|
+
def test_multi_contract(self):
|
|
183
|
+
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2], tags="red")
|
|
184
|
+
b = Tensor(np.random.randn(3, 4, 5), inds=[1, 2, 3], tags="blue")
|
|
185
|
+
c = Tensor(np.random.randn(5, 2, 6), inds=[3, 0, 4], tags="blue")
|
|
186
|
+
d = tensor_contract(a, b, c)
|
|
187
|
+
d.check()
|
|
188
|
+
assert isinstance(d, Tensor)
|
|
189
|
+
assert d.shape == (6,)
|
|
190
|
+
assert d.inds == (4,)
|
|
191
|
+
assert d.tags == oset(("red", "blue"))
|
|
192
|
+
|
|
193
|
+
def test_contract_with_legal_characters(self):
|
|
194
|
+
a = Tensor(np.random.randn(2, 3, 4), inds="abc", tags="red")
|
|
195
|
+
b = Tensor(np.random.randn(3, 4, 5), inds="bcd", tags="blue")
|
|
196
|
+
c = a @ b
|
|
197
|
+
assert c.shape == (2, 5)
|
|
198
|
+
assert c.inds == ("a", "d")
|
|
199
|
+
|
|
200
|
+
def test_contract_with_out_of_range_inds(self):
|
|
201
|
+
a = Tensor(np.random.randn(2, 3, 4), inds=[-1, 100, 2200], tags="red")
|
|
202
|
+
b = Tensor(np.random.randn(3, 4, 5), inds=[100, 2200, -3], tags="blue")
|
|
203
|
+
c = a @ b
|
|
204
|
+
assert c.shape == (2, 5)
|
|
205
|
+
assert c.inds == (-1, -3)
|
|
206
|
+
|
|
207
|
+
def test_contract_with_wild_mix(self):
|
|
208
|
+
a = Tensor(
|
|
209
|
+
np.random.randn(2, 3, 4), inds=["-1", "a", "foo"], tags="red"
|
|
210
|
+
)
|
|
211
|
+
b = Tensor(
|
|
212
|
+
np.random.randn(3, 4, 5), inds=["a", "foo", "42.42"], tags="blue"
|
|
213
|
+
)
|
|
214
|
+
c = a @ b
|
|
215
|
+
assert c.shape == (2, 5)
|
|
216
|
+
assert c.inds == ("-1", "42.42")
|
|
217
|
+
|
|
218
|
+
def test_fuse(self):
|
|
219
|
+
a = Tensor(np.random.rand(2, 3, 4, 5), "abcd", tags={"blue"})
|
|
220
|
+
b = a.fuse({"bra": ["a", "c"], "ket": "bd"})
|
|
221
|
+
assert set(b.shape) == {8, 15}
|
|
222
|
+
assert set(b.inds) == {"bra", "ket"}
|
|
223
|
+
assert b.tags == oset(("blue",))
|
|
224
|
+
|
|
225
|
+
b = a.fuse({"ket": "bd", "bra": "ac"})
|
|
226
|
+
assert set(b.shape) == {15, 8}
|
|
227
|
+
assert set(b.inds) == {"ket", "bra"}
|
|
228
|
+
assert b.tags == oset(("blue",))
|
|
229
|
+
|
|
230
|
+
def test_unfuse(self):
|
|
231
|
+
a = Tensor(np.random.rand(2, 3, 4, 5), "abcd", tags={"blue"})
|
|
232
|
+
b = a.fuse({"bra": ["a", "c"], "ket": "bd"})
|
|
233
|
+
|
|
234
|
+
c = b.unfuse(
|
|
235
|
+
{"bra": ["a", "c"], "ket": "bd"}, {"bra": [2, 4], "ket": [3, 5]}
|
|
236
|
+
)
|
|
237
|
+
assert set(c.shape) == {2, 3, 4, 5}
|
|
238
|
+
assert set(c.inds) == {"a", "b", "c", "d"}
|
|
239
|
+
assert c.left_inds == b.left_inds
|
|
240
|
+
assert np.allclose(c.data.reshape(8, 15), b.data)
|
|
241
|
+
|
|
242
|
+
b.modify(left_inds=["ket"])
|
|
243
|
+
c = b.unfuse({"ket": "bd"}, {"ket": [5, 3]})
|
|
244
|
+
assert set(c.shape) == {3, 5, 8}
|
|
245
|
+
assert set(c.inds) == {"b", "d", "bra"}
|
|
246
|
+
assert set(c.tags) == {"blue"}
|
|
247
|
+
assert set(c.left_inds) == {"b", "d"}
|
|
248
|
+
|
|
249
|
+
b.modify(left_inds=["bra"])
|
|
250
|
+
c = b.unfuse({"ket": "bd"}, {"ket": [5, 3]})
|
|
251
|
+
assert set(c.left_inds) == {"bra"}
|
|
252
|
+
|
|
253
|
+
def test_fuse_leftover(self):
|
|
254
|
+
a = Tensor(np.random.rand(2, 3, 4, 5, 2, 2), "abcdef", tags={"blue"})
|
|
255
|
+
b = a.fuse({"bra": "ac", "ket": "bd"})
|
|
256
|
+
assert b.shape == (8, 15, 2, 2)
|
|
257
|
+
assert b.inds == ("bra", "ket", "e", "f")
|
|
258
|
+
assert b.tags == oset(("blue",))
|
|
259
|
+
|
|
260
|
+
def test_tensor_transpose(self):
|
|
261
|
+
a = Tensor(np.random.rand(2, 3, 4, 5, 2, 2), "abcdef", tags={"blue"})
|
|
262
|
+
at = a.transpose(*"cdfeba")
|
|
263
|
+
assert at.shape == (4, 5, 2, 2, 3, 2)
|
|
264
|
+
assert at.inds == ("c", "d", "f", "e", "b", "a")
|
|
265
|
+
|
|
266
|
+
with pytest.raises(ValueError):
|
|
267
|
+
a.transpose(*"cdfebz")
|
|
268
|
+
|
|
269
|
+
def test_tensor_moveindex(self):
|
|
270
|
+
A = rand_tensor([3, 4, 5], ["a", "b", "c"])
|
|
271
|
+
B = rand_tensor([3, 4, 5], ["a", "b", "c"])
|
|
272
|
+
x = A @ B
|
|
273
|
+
A.moveindex_("b", 0)
|
|
274
|
+
B.moveindex_("b", -1)
|
|
275
|
+
assert A.inds[0] == "b" and B.inds[2] == "b"
|
|
276
|
+
assert A @ B == pytest.approx(x)
|
|
277
|
+
|
|
278
|
+
def test_tensor_trace(self):
|
|
279
|
+
t = qtn.rand_tensor((3, 3, 3), "abc", dtype="complex128")
|
|
280
|
+
tb = t.trace("a", "c")
|
|
281
|
+
assert tb.inds == ("b",)
|
|
282
|
+
assert_allclose(tb.data, np.trace(t.data, axis1=0, axis2=2))
|
|
283
|
+
tc = t.trace("a", "b")
|
|
284
|
+
assert tc.inds == ("c",)
|
|
285
|
+
assert_allclose(tc.data, np.trace(t.data, axis1=0, axis2=1))
|
|
286
|
+
with pytest.raises(ValueError):
|
|
287
|
+
t.trace("a", "z")
|
|
288
|
+
assert not isinstance(
|
|
289
|
+
qtn.rand_tensor([2, 2], "ab").trace("a", "b"), qtn.Tensor
|
|
290
|
+
)
|
|
291
|
+
assert isinstance(
|
|
292
|
+
qtn.rand_tensor([2, 2], "ab").trace(
|
|
293
|
+
"a", "b", preserve_tensor=True
|
|
294
|
+
),
|
|
295
|
+
qtn.Tensor,
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
def test_tensor_trace_multi(self):
|
|
299
|
+
t = qtn.rand_tensor((3, 3, 3, 3, 3), "abcde", dtype="complex128")
|
|
300
|
+
t1 = t.trace(["a", "c"], ["e", "b"])
|
|
301
|
+
te = t.trace("a", "e").trace("c", "b")
|
|
302
|
+
assert t1.almost_equals(te)
|
|
303
|
+
with pytest.raises(ValueError):
|
|
304
|
+
t.trace(["a", "b", "c"], ["d", "e"])
|
|
305
|
+
|
|
306
|
+
def test_sum_reduce(self):
|
|
307
|
+
t = rand_tensor((2, 3, 4), "abc")
|
|
308
|
+
ta = t.sum_reduce("a")
|
|
309
|
+
assert ta.inds == ("b", "c")
|
|
310
|
+
assert ta.ndim == 2
|
|
311
|
+
assert_allclose(ta.data, t.data.sum(axis=0))
|
|
312
|
+
tb = t.sum_reduce("b")
|
|
313
|
+
assert tb.ndim == 2
|
|
314
|
+
assert_allclose(tb.data, t.data.sum(axis=1))
|
|
315
|
+
tc = t.sum_reduce("c")
|
|
316
|
+
assert tc.ndim == 2
|
|
317
|
+
assert_allclose(tc.data, t.data.sum(axis=2))
|
|
318
|
+
with pytest.raises(ValueError):
|
|
319
|
+
t.sum_reduce_("d")
|
|
320
|
+
|
|
321
|
+
def test_vector_reduce(self):
|
|
322
|
+
t = rand_tensor((2, 3, 4), "abc")
|
|
323
|
+
g = qu.randn(3)
|
|
324
|
+
tv = t.vector_reduce("b", g)
|
|
325
|
+
assert tv.shape == (2, 4)
|
|
326
|
+
assert tv.inds == ("a", "c")
|
|
327
|
+
assert_allclose(tv.data, np.einsum("abc,b->ac", t.data, g))
|
|
328
|
+
|
|
329
|
+
def test_ownership(self):
|
|
330
|
+
a = rand_tensor((2, 2), ("a", "b"), tags={"X", "Y"})
|
|
331
|
+
b = rand_tensor((2, 2), ("b", "c"), tags={"X", "Z"})
|
|
332
|
+
assert not a.check_owners()
|
|
333
|
+
assert not b.check_owners()
|
|
334
|
+
tn = TensorNetwork((a, b), virtual=True)
|
|
335
|
+
assert a.check_owners()
|
|
336
|
+
assert b.check_owners()
|
|
337
|
+
assert a.owners[hash(tn)][0]() is tn
|
|
338
|
+
assert b.owners[hash(tn)][0]() is tn
|
|
339
|
+
assert all(map(tn.ind_map.__contains__, ("a", "b", "c")))
|
|
340
|
+
assert all(map(tn.tag_map.__contains__, ("X", "Y", "Z")))
|
|
341
|
+
a.reindex_({"a": "d"})
|
|
342
|
+
assert "a" not in tn.ind_map
|
|
343
|
+
assert "d" in tn.ind_map
|
|
344
|
+
assert len(tn.tag_map["X"]) == 2
|
|
345
|
+
b.retag_({"X": "W"})
|
|
346
|
+
assert len(tn.tag_map["X"]) == 1
|
|
347
|
+
assert "W" in tn.tag_map
|
|
348
|
+
del tn
|
|
349
|
+
assert not a.check_owners()
|
|
350
|
+
assert not b.check_owners()
|
|
351
|
+
|
|
352
|
+
def test_isel(self):
|
|
353
|
+
T = rand_tensor((2, 3, 4, 5, 6), inds=["a", "b", "c", "d", "e"])
|
|
354
|
+
Tis = T.isel({"d": 2, "b": 0})
|
|
355
|
+
assert Tis.shape == (2, 4, 6)
|
|
356
|
+
assert Tis.inds == ("a", "c", "e")
|
|
357
|
+
assert_allclose(Tis.data, T.data[:, 0, :, 2, :])
|
|
358
|
+
|
|
359
|
+
def test_cut_iter(self):
|
|
360
|
+
psi = MPS_rand_state(10, 7, cyclic=True)
|
|
361
|
+
pp = psi.H & psi
|
|
362
|
+
bnds = bonds(pp[0], pp[-1])
|
|
363
|
+
assert sum(tn ^ all for tn in pp.cut_iter(*bnds)) == pytest.approx(1.0)
|
|
364
|
+
assert pp ^ all == pytest.approx(1.0)
|
|
365
|
+
|
|
366
|
+
@pytest.mark.parametrize(
|
|
367
|
+
"method", ["qr", "svd", "exp", "cayley", "mgs", "svd"]
|
|
368
|
+
)
|
|
369
|
+
def test_isometrize(self, method):
|
|
370
|
+
t = rand_tensor((2, 3, 4), "abc")
|
|
371
|
+
assert t.H @ t != pytest.approx(3.0)
|
|
372
|
+
t.isometrize_("b", method=method)
|
|
373
|
+
assert t.H @ t == pytest.approx(3.0)
|
|
374
|
+
assert t.inds == ("b", "a", "c")
|
|
375
|
+
|
|
376
|
+
def test_connect(self):
|
|
377
|
+
x = rand_tensor((2, 3), "ab")
|
|
378
|
+
y = rand_tensor((3, 2), "cd")
|
|
379
|
+
|
|
380
|
+
with pytest.raises(ValueError):
|
|
381
|
+
qtn.connect(x, y, 0, 0)
|
|
382
|
+
|
|
383
|
+
tn = x | y
|
|
384
|
+
assert len(tn.outer_inds()) == 4
|
|
385
|
+
qtn.connect(x, y, 0, 1)
|
|
386
|
+
assert len(tn.outer_inds()) == 2
|
|
387
|
+
qtn.connect(x, y, 1, 0)
|
|
388
|
+
assert len(tn.outer_inds()) == 0
|
|
389
|
+
assert tn.contract(all, preserve_tensor=True).shape == ()
|
|
390
|
+
# make sure bond is newly labelled
|
|
391
|
+
assert set("abcd") & set(tn.all_inds()) == set()
|
|
392
|
+
|
|
393
|
+
def test_group_inds(self):
|
|
394
|
+
x = rand_tensor((2, 2, 2, 2), "abcd")
|
|
395
|
+
y = rand_tensor((2, 2, 2), "bdf")
|
|
396
|
+
lix, six, rix = qtn.group_inds(x, y)
|
|
397
|
+
assert lix == ["a", "c"]
|
|
398
|
+
assert six == ["b", "d"]
|
|
399
|
+
assert rix == ["f"]
|
|
400
|
+
|
|
401
|
+
def test_group_inds_tensor_network(self):
|
|
402
|
+
tn = qtn.TN2D_with_value(1.0, 4, 4, 2)
|
|
403
|
+
ltn = tn.select(["I1,1", "I1,2"], "any")
|
|
404
|
+
rtn = tn.select(["I2,1", "I2,2"], "any")
|
|
405
|
+
lix, six, rix = qtn.group_inds(ltn, rtn)
|
|
406
|
+
assert len(lix) == len(rix) == 4
|
|
407
|
+
assert len(six) == 2
|
|
408
|
+
assert ltn.inds_size(six) == 4
|
|
409
|
+
|
|
410
|
+
|
|
411
|
+
class TestTensorFunctions:
|
|
412
|
+
@pytest.mark.parametrize("method", ["svd", "eig", "isvd", "svds"])
|
|
413
|
+
@pytest.mark.parametrize("linds", [("a", "b", "d"), ("c", "e")])
|
|
414
|
+
@pytest.mark.parametrize("cutoff", [-1.0, 1e-13, 1e-10])
|
|
415
|
+
@pytest.mark.parametrize("cutoff_mode", ["abs", "rel", "sum2"])
|
|
416
|
+
@pytest.mark.parametrize("absorb", ["left", "both", "right"])
|
|
417
|
+
def test_split_tensor_rank_revealing(
|
|
418
|
+
self, method, linds, cutoff, cutoff_mode, absorb
|
|
419
|
+
):
|
|
420
|
+
a = rand_tensor((2, 3, 4, 5, 6), inds="abcde", tags="red")
|
|
421
|
+
a_split = a.split(
|
|
422
|
+
linds,
|
|
423
|
+
method=method,
|
|
424
|
+
cutoff=cutoff,
|
|
425
|
+
cutoff_mode=cutoff_mode,
|
|
426
|
+
absorb=absorb,
|
|
427
|
+
)
|
|
428
|
+
assert len(a_split.tensors) == 2
|
|
429
|
+
if linds == "abd":
|
|
430
|
+
assert (a_split.shape == (2, 3, 5, 4, 6)) or (
|
|
431
|
+
a_split.shape == (4, 6, 2, 3, 5)
|
|
432
|
+
)
|
|
433
|
+
elif linds == "edc":
|
|
434
|
+
assert (a_split.shape == (6, 5, 4, 2, 3)) or (
|
|
435
|
+
a_split.shape == (2, 3, 6, 5, 4)
|
|
436
|
+
)
|
|
437
|
+
assert (a_split ^ ...).almost_equals(a)
|
|
438
|
+
|
|
439
|
+
@pytest.mark.parametrize("method", ["qr", "lq"])
|
|
440
|
+
@pytest.mark.parametrize("linds", [("a", "b", "d"), ("c", "e")])
|
|
441
|
+
def test_split_tensor_rank_hidden(self, method, linds):
|
|
442
|
+
a = rand_tensor((2, 3, 4, 5, 6), inds="abcde", tags="red")
|
|
443
|
+
a_split = a.split(linds, method=method)
|
|
444
|
+
assert len(a_split.tensors) == 2
|
|
445
|
+
if linds == "abd":
|
|
446
|
+
assert (a_split.shape == (2, 3, 5, 4, 6)) or (
|
|
447
|
+
a_split.shape == (4, 6, 2, 3, 5)
|
|
448
|
+
)
|
|
449
|
+
elif linds == "edc":
|
|
450
|
+
assert (a_split.shape == (6, 5, 4, 2, 3)) or (
|
|
451
|
+
a_split.shape == (2, 3, 6, 5, 4)
|
|
452
|
+
)
|
|
453
|
+
assert (a_split ^ ...).almost_equals(a)
|
|
454
|
+
|
|
455
|
+
@pytest.mark.parametrize("method", ["svd", "eig"])
|
|
456
|
+
def test_singular_values(self, method):
|
|
457
|
+
psim = Tensor(np.eye(2) * 2**-0.5, inds="ab")
|
|
458
|
+
assert_allclose(psim.H @ psim, 1.0)
|
|
459
|
+
assert_allclose(
|
|
460
|
+
psim.singular_values("a", method=method) ** 2, [0.5, 0.5]
|
|
461
|
+
)
|
|
462
|
+
|
|
463
|
+
def test_split_renorm(self):
|
|
464
|
+
t = rand_tensor((3, 3, 3, 3), ["a", "b", "c", "d"])
|
|
465
|
+
n_nuc = t.singular_values(["a", "b"]).sum()
|
|
466
|
+
n_fro = (t.singular_values(["a", "b"]) ** 2).sum() ** 0.5
|
|
467
|
+
|
|
468
|
+
tc = t.split(["a", "b"], cutoff=0.0, max_bond=5, renorm=1) ^ all
|
|
469
|
+
nc_nuc = tc.singular_values(["a", "b"]).sum()
|
|
470
|
+
nc_fro = (tc.singular_values(["a", "b"]) ** 2).sum() ** 0.5
|
|
471
|
+
assert nc_nuc == pytest.approx(n_nuc)
|
|
472
|
+
assert nc_fro != pytest.approx(n_fro)
|
|
473
|
+
|
|
474
|
+
tc = t.split(["a", "b"], cutoff=0.0, max_bond=5, renorm=2) ^ all
|
|
475
|
+
nc_nuc = tc.singular_values(["a", "b"]).sum()
|
|
476
|
+
nc_fro = (tc.singular_values(["a", "b"]) ** 2).sum() ** 0.5
|
|
477
|
+
assert nc_fro == pytest.approx(n_fro)
|
|
478
|
+
assert nc_nuc != pytest.approx(n_nuc)
|
|
479
|
+
|
|
480
|
+
def test_absorb_none(self):
|
|
481
|
+
x = qtn.rand_tensor((4, 5, 6, 7), inds="abcd", tags="X", seed=42)
|
|
482
|
+
e = x.H @ x
|
|
483
|
+
|
|
484
|
+
with pytest.raises(ValueError):
|
|
485
|
+
x.split(["a", "c"], absorb=None, method="qr")
|
|
486
|
+
|
|
487
|
+
xs_tn = x.split(["a", "c"], absorb=None, stags="S")
|
|
488
|
+
assert isinstance(xs_tn, TensorNetwork)
|
|
489
|
+
assert xs_tn.num_tensors == 3
|
|
490
|
+
e1 = (xs_tn.H & xs_tn).contract(all, output_inds=())
|
|
491
|
+
assert e1 == pytest.approx(e)
|
|
492
|
+
assert "S" in xs_tn.tags
|
|
493
|
+
|
|
494
|
+
Tl, Ts, Tr = x.split(["a", "c"], absorb=None, get="tensors")
|
|
495
|
+
assert isinstance(Ts, Tensor)
|
|
496
|
+
assert len(Ts.inds) == 1
|
|
497
|
+
assert "X" in Ts.tags
|
|
498
|
+
Tl.multiply_index_diagonal_(Ts.inds[0], Ts.data)
|
|
499
|
+
xs_tn = Tl & Tr
|
|
500
|
+
e2 = (xs_tn.H & xs_tn).contract(all)
|
|
501
|
+
assert e2 == pytest.approx(e)
|
|
502
|
+
|
|
503
|
+
l, s, r = x.split(["a", "c"], absorb=None, get="arrays")
|
|
504
|
+
assert s.size == 24
|
|
505
|
+
y_data = np.einsum("acx,x,xbd->abcd", l, s, r)
|
|
506
|
+
assert_allclose(y_data, x.data)
|
|
507
|
+
|
|
508
|
+
l, s, r = x.split(["a", "c"], absorb=None, get="arrays", max_bond=20)
|
|
509
|
+
assert s.size == 20
|
|
510
|
+
y_data = np.einsum("acx,x,xbd->abcd", l, s, r)
|
|
511
|
+
assert qu.norm(y_data, "fro") == pytest.approx(
|
|
512
|
+
qu.norm(x.data, "fro"), rel=0.1
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
@pytest.mark.parametrize("method", ["svd", "eig"])
|
|
516
|
+
def test_renorm(self, method):
|
|
517
|
+
U = qu.rand_uni(10)
|
|
518
|
+
s = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
|
|
519
|
+
x = np.einsum("ab,b,bc->ac", U, s, qu.dag(U))
|
|
520
|
+
|
|
521
|
+
t = qtn.Tensor(x, inds="ab")
|
|
522
|
+
fn2 = t.norm() ** 2
|
|
523
|
+
trc = np.einsum("aa", t.data).real
|
|
524
|
+
|
|
525
|
+
assert fn2 == pytest.approx(385.0)
|
|
526
|
+
assert trc == pytest.approx(55.0)
|
|
527
|
+
|
|
528
|
+
tn2 = t.split(
|
|
529
|
+
"a", method="svd", cutoff=0.1, renorm=True, cutoff_mode="rsum2"
|
|
530
|
+
)
|
|
531
|
+
a_fn2 = tn2.H @ tn2
|
|
532
|
+
assert qtn.bonds_size(*tn2) == 6
|
|
533
|
+
assert a_fn2 == pytest.approx(fn2)
|
|
534
|
+
|
|
535
|
+
tn2 = t.split(
|
|
536
|
+
"a", method="svd", cutoff=40, renorm=True, cutoff_mode="sum2"
|
|
537
|
+
)
|
|
538
|
+
a_fn2 = tn2.H @ tn2
|
|
539
|
+
assert qtn.bonds_size(*tn2) == 6
|
|
540
|
+
assert a_fn2 == pytest.approx(fn2)
|
|
541
|
+
|
|
542
|
+
tn1 = t.split(
|
|
543
|
+
"a", method="svd", cutoff=0.2, renorm=True, cutoff_mode="rsum1"
|
|
544
|
+
)
|
|
545
|
+
a_trc = tn1.trace("a", "b").real
|
|
546
|
+
assert qtn.bonds_size(*tn1) == 6
|
|
547
|
+
assert a_trc == pytest.approx(trc)
|
|
548
|
+
|
|
549
|
+
tn1 = t.split(
|
|
550
|
+
"a", method="svd", cutoff=11, renorm=True, cutoff_mode="sum1"
|
|
551
|
+
)
|
|
552
|
+
a_trc = tn1.trace("a", "b").real
|
|
553
|
+
assert qtn.bonds_size(*tn1) == 6
|
|
554
|
+
assert a_trc == pytest.approx(trc)
|
|
555
|
+
|
|
556
|
+
@pytest.mark.parametrize("method", ["svd", "eig"])
|
|
557
|
+
def test_entropy(self, method):
|
|
558
|
+
psim = Tensor(np.eye(2) * 2**-0.5, inds="ab")
|
|
559
|
+
assert_allclose(psim.H @ psim, 1.0)
|
|
560
|
+
assert_allclose(psim.entropy("a", method=method) ** 2, 1)
|
|
561
|
+
|
|
562
|
+
@pytest.mark.parametrize("method", ["svd", "eig"])
|
|
563
|
+
def test_entropy_matches_dense(self, method):
|
|
564
|
+
p = MPS_rand_state(5, 32)
|
|
565
|
+
p_dense = p.to_qarray()
|
|
566
|
+
real_svn = qu.entropy(p_dense.ptr([2] * 5, [0, 1, 2]))
|
|
567
|
+
|
|
568
|
+
svn = (p ^ ...).entropy(("k0", "k1", "k2"))
|
|
569
|
+
assert_allclose(real_svn, svn)
|
|
570
|
+
|
|
571
|
+
# use tensor to left of bipartition
|
|
572
|
+
p.canonicalize_(2)
|
|
573
|
+
t1 = p["I2"]
|
|
574
|
+
left_inds = set(t1.inds) - set(p["I3"].inds)
|
|
575
|
+
svn = (t1).entropy(left_inds, method=method)
|
|
576
|
+
assert_allclose(real_svn, svn)
|
|
577
|
+
|
|
578
|
+
# use tensor to right of bipartition
|
|
579
|
+
p.canonicalize_(3)
|
|
580
|
+
t2 = p["I3"]
|
|
581
|
+
left_inds = set(t2.inds) & set(p["I2"].inds)
|
|
582
|
+
svn = (t2).entropy(left_inds, method=method)
|
|
583
|
+
assert_allclose(real_svn, svn)
|
|
584
|
+
|
|
585
|
+
def test_direct_product(self):
|
|
586
|
+
a1 = rand_tensor((2, 3, 4), inds="abc")
|
|
587
|
+
b1 = rand_tensor((3, 4, 5), inds="bcd")
|
|
588
|
+
a2 = rand_tensor((2, 3, 4), inds="abc")
|
|
589
|
+
b2 = rand_tensor((3, 4, 5), inds="bcd")
|
|
590
|
+
|
|
591
|
+
c1 = (a1 @ b1) + (a2 @ b2)
|
|
592
|
+
c2 = tensor_direct_product(
|
|
593
|
+
a1, a2, sum_inds=("a")
|
|
594
|
+
) @ tensor_direct_product(b1, b2, sum_inds=("d"))
|
|
595
|
+
assert c1.almost_equals(c2)
|
|
596
|
+
|
|
597
|
+
def test_direct_product_triple(self):
|
|
598
|
+
a1 = rand_tensor((2, 3, 4), inds="abc")
|
|
599
|
+
b1 = rand_tensor((3, 4, 5, 6), inds="bcde")
|
|
600
|
+
c1 = rand_tensor((6, 7), inds="ef")
|
|
601
|
+
|
|
602
|
+
a2 = rand_tensor((2, 3, 4), inds="abc")
|
|
603
|
+
b2 = rand_tensor((3, 4, 5, 6), inds="bcde").transpose(*"decb")
|
|
604
|
+
c2 = rand_tensor((6, 7), inds="ef")
|
|
605
|
+
|
|
606
|
+
d1 = (a1 @ b1 @ c1) + (a2 @ b2 @ c2)
|
|
607
|
+
d2 = (
|
|
608
|
+
tensor_direct_product(a1, a2, sum_inds=("a"))
|
|
609
|
+
@ tensor_direct_product(b1, b2, sum_inds=("d"))
|
|
610
|
+
@ tensor_direct_product(c1, c2, sum_inds=("f"))
|
|
611
|
+
)
|
|
612
|
+
assert d1.almost_equals(d2)
|
|
613
|
+
|
|
614
|
+
@pytest.mark.parametrize(
|
|
615
|
+
"dtype",
|
|
616
|
+
[float, complex, np.complex128, np.float64, np.float32, "raise"],
|
|
617
|
+
)
|
|
618
|
+
def test_rand_tensor(self, dtype):
|
|
619
|
+
if dtype == "raise":
|
|
620
|
+
with pytest.raises(TypeError):
|
|
621
|
+
rand_tensor((2, 3, 4), "abc", dtype=dtype)
|
|
622
|
+
else:
|
|
623
|
+
t = rand_tensor((2, 3, 4), "abc", dtype=dtype)
|
|
624
|
+
assert t.dtype == np.dtype(dtype)
|
|
625
|
+
|
|
626
|
+
tn = t & t
|
|
627
|
+
assert tn.dtype == np.dtype(dtype)
|
|
628
|
+
|
|
629
|
+
def test_squeeze(self):
|
|
630
|
+
a = rand_tensor((1, 2, 3, 1, 4), inds="abcde", tags=["hello"])
|
|
631
|
+
b = a.squeeze()
|
|
632
|
+
assert b.shape == (2, 3, 4)
|
|
633
|
+
assert b.inds == ("b", "c", "e")
|
|
634
|
+
assert "hello" in b.tags
|
|
635
|
+
assert a.shape == (1, 2, 3, 1, 4)
|
|
636
|
+
c = a.squeeze(include=["d"])
|
|
637
|
+
assert c.shape == (1, 2, 3, 4)
|
|
638
|
+
assert c.inds == ("a", "b", "c", "e")
|
|
639
|
+
d = a.squeeze(exclude=["d"])
|
|
640
|
+
assert d.shape == (2, 3, 1, 4)
|
|
641
|
+
assert d.inds == ("b", "c", "d", "e")
|
|
642
|
+
|
|
643
|
+
def test_tensor_fuse_squeeze(self):
|
|
644
|
+
a = rand_tensor((1, 2, 3), inds="abc")
|
|
645
|
+
b = rand_tensor((2, 3, 4), inds="bcd")
|
|
646
|
+
qtn.tensor_fuse_squeeze(a, b)
|
|
647
|
+
assert a.inds == ("a", "b")
|
|
648
|
+
assert a.shape == (1, 6)
|
|
649
|
+
assert b.inds == ("b", "d")
|
|
650
|
+
assert b.shape == (6, 4)
|
|
651
|
+
|
|
652
|
+
a = rand_tensor((1, 1, 1), inds="abc")
|
|
653
|
+
b = rand_tensor((1, 1, 1), inds="bcd")
|
|
654
|
+
qtn.tensor_fuse_squeeze(a, b)
|
|
655
|
+
assert a.inds == ("a",)
|
|
656
|
+
assert a.shape == (1,)
|
|
657
|
+
assert b.inds == ("d",)
|
|
658
|
+
assert b.shape == (1,)
|
|
659
|
+
|
|
660
|
+
@pytest.mark.parametrize("dtype", [None, "complex128", "float32"])
|
|
661
|
+
def test_randomize(self, dtype):
|
|
662
|
+
a = rand_tensor((2, 3, 4), ["a", "b", "c"], dtype="float64")
|
|
663
|
+
if dtype is not None:
|
|
664
|
+
assert a.dtype != dtype
|
|
665
|
+
x1 = a.norm()
|
|
666
|
+
a.randomize_(dtype=dtype)
|
|
667
|
+
x2 = a.norm()
|
|
668
|
+
assert x1 != pytest.approx(x2)
|
|
669
|
+
assert a.shape == (2, 3, 4)
|
|
670
|
+
if dtype is not None:
|
|
671
|
+
assert a.dtype == dtype
|
|
672
|
+
else:
|
|
673
|
+
assert a.dtype == "float64"
|
|
674
|
+
|
|
675
|
+
def test_multiply_index_diagonal(self):
|
|
676
|
+
x = rand_tensor((3, 4), "ab")
|
|
677
|
+
y = rand_tensor((4, 5), "bc")
|
|
678
|
+
z1 = x @ y
|
|
679
|
+
# insert a diagonal gauge
|
|
680
|
+
s = qu.randn(4)
|
|
681
|
+
z2 = x.multiply_index_diagonal("b", s) @ y.multiply_index_diagonal(
|
|
682
|
+
"b", 1 / s
|
|
683
|
+
)
|
|
684
|
+
assert z1.almost_equals(z2)
|
|
685
|
+
|
|
686
|
+
@pytest.mark.parametrize("smudge", [1e-6, 1e-12])
|
|
687
|
+
def test_balance_bonds(self, smudge):
|
|
688
|
+
t1 = rand_tensor((3, 4), "ab")
|
|
689
|
+
t2 = rand_tensor((4, 5), "bc")
|
|
690
|
+
col_nrm_x1 = tensor_contract(t1.H, t1, output_inds="b").data
|
|
691
|
+
col_nrm_y1 = tensor_contract(t2.H, t2, output_inds="b").data
|
|
692
|
+
assert not np.allclose(col_nrm_x1, col_nrm_y1, rtol=1e-6)
|
|
693
|
+
z1 = (t1 @ t2).data
|
|
694
|
+
qtn.tensor_balance_bond(t1, t2, smudge=smudge)
|
|
695
|
+
col_nrm_x2 = tensor_contract(t1.H, t1, output_inds="b").data
|
|
696
|
+
col_nrm_y2 = tensor_contract(t2.H, t2, output_inds="b").data
|
|
697
|
+
assert_allclose(col_nrm_x2, col_nrm_y2, rtol=10 * smudge)
|
|
698
|
+
z2 = (t1 @ t2).data
|
|
699
|
+
assert_allclose(z1, z2)
|
|
700
|
+
|
|
701
|
+
def test_new_ind_with_identity(self):
|
|
702
|
+
t = rand_tensor((2, 2, 3, 3), "abcd")
|
|
703
|
+
t.new_ind_with_identity("switch", ["a", "c"], ["b", "d"], axis=2)
|
|
704
|
+
assert t.inds == ("a", "b", "switch", "c", "d")
|
|
705
|
+
assert t.isel({"switch": 1}).data.sum() == pytest.approx(6)
|
|
706
|
+
|
|
707
|
+
def test_idxmin(self):
|
|
708
|
+
data = np.arange(24).reshape(2, 3, 4)
|
|
709
|
+
t = Tensor(data, inds=["a", "b", "c"])
|
|
710
|
+
assert t.idxmax() == {"a": 1, "b": 2, "c": 3}
|
|
711
|
+
assert t.idxmin() == {"a": 0, "b": 0, "c": 0}
|
|
712
|
+
data = np.arange(24).reshape(2, 3, 4) - 11.5
|
|
713
|
+
t = Tensor(data, inds=["a", "b", "c"])
|
|
714
|
+
assert t.idxmin("abs") == {"a": 0, "b": 2, "c": 3}
|
|
715
|
+
assert t.idxmax(lambda x: 1 / x) == {"a": 1, "b": 0, "c": 0}
|
|
716
|
+
|
|
717
|
+
def test_expand_ind(self):
|
|
718
|
+
t = Tensor(np.ones((2, 3, 4)), inds=["a", "b", "c"])
|
|
719
|
+
assert t.data.sum() == pytest.approx(24)
|
|
720
|
+
|
|
721
|
+
# test zeros mode
|
|
722
|
+
t0 = t.copy()
|
|
723
|
+
t0.expand_ind("a", size=6, mode="zeros")
|
|
724
|
+
assert t0.data.sum() == pytest.approx(24)
|
|
725
|
+
|
|
726
|
+
# test tiling mode
|
|
727
|
+
tt = t.copy()
|
|
728
|
+
tt.expand_ind("a", size=6, mode="repeat")
|
|
729
|
+
assert tt.data.sum() == pytest.approx(72)
|
|
730
|
+
|
|
731
|
+
# test random mode
|
|
732
|
+
tr = t.copy()
|
|
733
|
+
tr.expand_ind("a", size=6, mode="random", rand_dist="uniform")
|
|
734
|
+
assert 24 <= tr.data.sum() <= 72
|
|
735
|
+
|
|
736
|
+
tr = t.copy()
|
|
737
|
+
tr.expand_ind("a", size=6, rand_strength=1.0, rand_dist="uniform")
|
|
738
|
+
assert 24 <= tr.data.sum() <= 72
|
|
739
|
+
|
|
740
|
+
tr = t.copy()
|
|
741
|
+
tr.expand_ind("a", size=6, rand_strength=-1.0, rand_dist="uniform")
|
|
742
|
+
assert -48 <= tr.data.sum() <= 24
|
|
743
|
+
|
|
744
|
+
|
|
745
|
+
class TestTensorNetwork:
|
|
746
|
+
def test_combining_tensors(self):
|
|
747
|
+
a = rand_tensor((2, 3, 4), inds=[0, 1, 2], tags="red")
|
|
748
|
+
b = rand_tensor((3, 4, 5), inds=[1, 2, 3], tags="blue")
|
|
749
|
+
c = rand_tensor((5, 2, 6), inds=[3, 0, 4], tags="blue")
|
|
750
|
+
|
|
751
|
+
with pytest.raises(TypeError):
|
|
752
|
+
a & np.array([0, 0])
|
|
753
|
+
|
|
754
|
+
abc1 = (a & b & c).H.contract()
|
|
755
|
+
abc2 = (a & (b & c)).H.contract()
|
|
756
|
+
abc3 = (TensorNetwork([a, b, c])).H.contract()
|
|
757
|
+
abc4 = (TensorNetwork([a, TensorNetwork([b, c])])).H.contract()
|
|
758
|
+
abc5 = (TensorNetwork([a]) & TensorNetwork([b, c])).H.contract()
|
|
759
|
+
|
|
760
|
+
assert_allclose(abc1.data, abc2.data)
|
|
761
|
+
assert_allclose(abc1.data, abc3.data)
|
|
762
|
+
assert_allclose(abc1.data, abc4.data)
|
|
763
|
+
assert_allclose(abc1.data, abc5.data)
|
|
764
|
+
|
|
765
|
+
def test_copy(self):
|
|
766
|
+
a = rand_tensor((2, 3, 4), inds="abc", tags="t0")
|
|
767
|
+
b = rand_tensor((2, 3, 4), inds="abd", tags="t1")
|
|
768
|
+
tn1 = TensorNetwork((a, b))
|
|
769
|
+
tn2 = tn1.copy()
|
|
770
|
+
# check can modify tensor structure
|
|
771
|
+
tn2["t1"].modify(inds=("a", "b", "X"))
|
|
772
|
+
assert tn1["t1"] is not tn2["t1"]
|
|
773
|
+
assert tn2["t1"].inds == ("a", "b", "X")
|
|
774
|
+
assert tn1["t1"].inds == ("a", "b", "d")
|
|
775
|
+
# but that data remains the same
|
|
776
|
+
assert tn1["t1"].data is tn2["t1"].data
|
|
777
|
+
tn2["t1"].data[:] /= 2
|
|
778
|
+
assert_allclose(tn1["t1"].data, tn2["t1"].data)
|
|
779
|
+
|
|
780
|
+
def test_copy_deep(self):
|
|
781
|
+
a = rand_tensor((2, 3, 4), inds="abc", tags="t0")
|
|
782
|
+
b = rand_tensor((2, 3, 4), inds="abd", tags="t1")
|
|
783
|
+
tn1 = TensorNetwork((a, b))
|
|
784
|
+
tn2 = tn1.copy(deep=True)
|
|
785
|
+
# check can modify tensor structure
|
|
786
|
+
tn2["t1"].modify(inds=("a", "b", "X"))
|
|
787
|
+
assert tn1["t1"] is not tn2["t1"]
|
|
788
|
+
assert tn2["t1"].inds == ("a", "b", "X")
|
|
789
|
+
assert tn1["t1"].inds == ("a", "b", "d")
|
|
790
|
+
# and that data is not the same
|
|
791
|
+
assert tn1["t1"].data is not tn2["t1"].data
|
|
792
|
+
tn2["t1"].data[:] /= 2
|
|
793
|
+
assert_allclose(tn1["t1"].data / 2, tn2["t1"].data)
|
|
794
|
+
|
|
795
|
+
def test_TensorNetwork_init_checks(self):
|
|
796
|
+
a = rand_tensor((2, 3, 4), inds=[0, 1, 2], tags={"red"})
|
|
797
|
+
b = rand_tensor((3, 4, 5), inds=[1, 2, 3], tags={"blue"})
|
|
798
|
+
c = rand_tensor((3, 4, 5), inds=[1, 2, 3], tags={"blue", "c"})
|
|
799
|
+
|
|
800
|
+
with pytest.raises(TypeError):
|
|
801
|
+
TensorNetwork(a, b) # missing brackets around ``a, b``.
|
|
802
|
+
|
|
803
|
+
tn = a & b
|
|
804
|
+
with pytest.raises(TypeError):
|
|
805
|
+
tn["red"] = 1
|
|
806
|
+
|
|
807
|
+
tn.add_tag("foo")
|
|
808
|
+
assert len(tn["foo"]) == 2
|
|
809
|
+
with pytest.raises(KeyError):
|
|
810
|
+
tn["foo"] = c
|
|
811
|
+
|
|
812
|
+
tn[("foo", "blue")] = c
|
|
813
|
+
assert "c" in tn.tags
|
|
814
|
+
assert tn[("blue", "c")] is c
|
|
815
|
+
|
|
816
|
+
assert "red" in tn.tags
|
|
817
|
+
del tn["red"]
|
|
818
|
+
assert "red" not in tn.tags
|
|
819
|
+
|
|
820
|
+
assert set(tn.tag_map.keys()) == {"blue", "c"}
|
|
821
|
+
|
|
822
|
+
tn.drop_tags("c")
|
|
823
|
+
assert set(tn.tag_map.keys()) == {"blue"}
|
|
824
|
+
tn.drop_tags(["blue"])
|
|
825
|
+
assert set(tn.tag_map.keys()) == set()
|
|
826
|
+
|
|
827
|
+
def test_drop_tags(self):
|
|
828
|
+
mps = MPS_rand_state(5, 2)
|
|
829
|
+
mps.drop_tags([mps.site_tag(i) for i in (0, 2, 4)])
|
|
830
|
+
assert mps.tags == oset([mps.site_tag(1), mps.site_tag(3)])
|
|
831
|
+
mps.drop_tags()
|
|
832
|
+
assert mps.tags == oset()
|
|
833
|
+
assert not mps.tag_map
|
|
834
|
+
|
|
835
|
+
def test_conj(self):
|
|
836
|
+
a_data = np.random.randn(2, 3, 4) + 1.0j * np.random.randn(2, 3, 4)
|
|
837
|
+
b_data = np.random.randn(3, 4, 5) + 1.0j * np.random.randn(3, 4, 5)
|
|
838
|
+
c_data = np.random.randn(5, 2, 6) + 1.0j * np.random.randn(5, 2, 6)
|
|
839
|
+
|
|
840
|
+
a = Tensor(a_data, inds=[0, 1, 2], tags={"red", "0"})
|
|
841
|
+
b = Tensor(b_data, inds=[1, 2, 3], tags={"blue", "1"})
|
|
842
|
+
c = Tensor(c_data, inds=[3, 0, 4], tags={"blue", "2"})
|
|
843
|
+
|
|
844
|
+
tn = a & b & c
|
|
845
|
+
new_tn = tn.conj()
|
|
846
|
+
|
|
847
|
+
for i, arr in enumerate((a_data, b_data, c_data)):
|
|
848
|
+
assert_allclose(new_tn[str(i)].data, arr.conj())
|
|
849
|
+
|
|
850
|
+
# make sure original network unchanged
|
|
851
|
+
for i, arr in enumerate((a_data, b_data, c_data)):
|
|
852
|
+
assert_allclose(tn[str(i)].data, arr)
|
|
853
|
+
|
|
854
|
+
def test_conj_inplace(self):
|
|
855
|
+
a_data = np.random.randn(2, 3, 4) + 1.0j * np.random.randn(2, 3, 4)
|
|
856
|
+
b_data = np.random.randn(3, 4, 5) + 1.0j * np.random.randn(3, 4, 5)
|
|
857
|
+
c_data = np.random.randn(5, 2, 6) + 1.0j * np.random.randn(5, 2, 6)
|
|
858
|
+
|
|
859
|
+
a = Tensor(a_data, inds=[0, 1, 2], tags={"red", "I0"})
|
|
860
|
+
b = Tensor(b_data, inds=[1, 2, 3], tags={"blue", "I1"})
|
|
861
|
+
c = Tensor(c_data, inds=[3, 0, 4], tags={"blue", "I2"})
|
|
862
|
+
|
|
863
|
+
tn = a & b & c
|
|
864
|
+
tn.conj_()
|
|
865
|
+
|
|
866
|
+
for i, arr in enumerate((a_data, b_data, c_data)):
|
|
867
|
+
assert_allclose(tn[f"I{i}"].data, arr.conj())
|
|
868
|
+
|
|
869
|
+
def test_multiply(self):
|
|
870
|
+
a = rand_tensor((2, 3, 4), inds=["0", "1", "2"], tags="red")
|
|
871
|
+
b = rand_tensor((3, 4, 5), inds=["1", "2", "3"], tags="blue")
|
|
872
|
+
c = rand_tensor((5, 2, 6), inds=["3", "0", "4"], tags="blue")
|
|
873
|
+
tn = a & b & c
|
|
874
|
+
x1 = (tn & tn.H) ^ ...
|
|
875
|
+
x2 = ((2 * tn) & tn.H) ^ ...
|
|
876
|
+
assert_allclose(2 * x1, x2)
|
|
877
|
+
|
|
878
|
+
def test_multiply_inplace(self):
|
|
879
|
+
a = rand_tensor((2, 3, 4), inds=["0", "1", "2"], tags="red")
|
|
880
|
+
b = rand_tensor((3, 4, 5), inds=["1", "2", "3"], tags="blue")
|
|
881
|
+
c = rand_tensor((5, 2, 6), inds=["3", "0", "4"], tags="blue")
|
|
882
|
+
tn = a & b & c
|
|
883
|
+
x1 = (tn & tn.H) ^ ...
|
|
884
|
+
tn *= 2
|
|
885
|
+
x2 = (tn & tn.H) ^ ...
|
|
886
|
+
assert_allclose(4 * x1, x2)
|
|
887
|
+
|
|
888
|
+
def test_multiply_each(self):
|
|
889
|
+
a = rand_tensor((2, 3, 4), inds=["0", "1", "2"], tags="red")
|
|
890
|
+
b = rand_tensor((3, 4, 5), inds=["1", "2", "3"], tags="blue")
|
|
891
|
+
c = rand_tensor((5, 2, 6), inds=["3", "0", "4"], tags="blue")
|
|
892
|
+
tn = a & b & c
|
|
893
|
+
x1 = (tn & tn.H) ^ ...
|
|
894
|
+
x2 = (tn.multiply_each(2) & tn.H) ^ ...
|
|
895
|
+
assert_allclose(2**3 * x1, x2)
|
|
896
|
+
|
|
897
|
+
def test_divide(self):
|
|
898
|
+
a = rand_tensor((2, 3, 4), inds=["0", "1", "2"], tags="red")
|
|
899
|
+
b = rand_tensor((3, 4, 5), inds=["1", "2", "3"], tags="blue")
|
|
900
|
+
c = rand_tensor((5, 2, 6), inds=["3", "0", "4"], tags="blue")
|
|
901
|
+
tn = a & b & c
|
|
902
|
+
x1 = (tn & tn.H) ^ ...
|
|
903
|
+
x2 = ((tn / 2) & tn.H) ^ ...
|
|
904
|
+
assert_allclose(x1 / 2, x2)
|
|
905
|
+
|
|
906
|
+
def test_divide_inplace(self):
|
|
907
|
+
a = rand_tensor((2, 3, 4), inds=["0", "1", "2"], tags="red")
|
|
908
|
+
b = rand_tensor((3, 4, 5), inds=["1", "2", "3"], tags="blue")
|
|
909
|
+
c = rand_tensor((5, 2, 6), inds=["3", "0", "4"], tags="blue")
|
|
910
|
+
tn = a & b & c
|
|
911
|
+
x1 = (tn & tn.H) ^ ...
|
|
912
|
+
tn /= 2
|
|
913
|
+
x2 = (tn & tn.H) ^ ...
|
|
914
|
+
assert_allclose(x1 / 4, x2)
|
|
915
|
+
|
|
916
|
+
def test_multiply_spread(self):
|
|
917
|
+
a = rand_tensor([2, 2], inds=["a", "b"], tags="A")
|
|
918
|
+
b = Tensor(a.data, ["b", "c"], tags="B")
|
|
919
|
+
c = Tensor(a.data, ["c", "d"], tags="C")
|
|
920
|
+
tn = a | b | c
|
|
921
|
+
tn.multiply_(-8j + 1 / 3, spread_over=3)
|
|
922
|
+
assert_allclose(tn["A"].data, tn["B"].data)
|
|
923
|
+
assert_allclose(tn["B"].data, tn["C"].data)
|
|
924
|
+
|
|
925
|
+
def test_multiply_spread_neg_stays_real(self):
|
|
926
|
+
a = rand_tensor([2, 2], inds=["a", "b"], tags="A", dtype="float32")
|
|
927
|
+
b = Tensor(a.data, ["b", "c"], tags="B")
|
|
928
|
+
c = Tensor(a.data, ["c", "d"], tags="C")
|
|
929
|
+
tn = a | b | c
|
|
930
|
+
tn.multiply_(-1000)
|
|
931
|
+
assert a.dtype == b.dtype == c.dtype == "float32"
|
|
932
|
+
assert_allclose(abs(tn["A"].data), abs(tn["B"].data))
|
|
933
|
+
assert_allclose(abs(tn["B"].data), abs(tn["C"].data))
|
|
934
|
+
|
|
935
|
+
def test_tensor_network_sum(self):
|
|
936
|
+
A = qtn.TN_rand_reg(n=6, reg=3, D=2, phys_dim=2, dtype="complex")
|
|
937
|
+
B = A.copy()
|
|
938
|
+
B.randomize_()
|
|
939
|
+
d1 = A.distance(B)
|
|
940
|
+
AmB = qtn.tensor_network_sum(A, -1 * B)
|
|
941
|
+
d2 = (AmB | AmB.H).contract(all) ** 0.5
|
|
942
|
+
assert d1 == pytest.approx(d2)
|
|
943
|
+
|
|
944
|
+
def test_contracting_tensors(self):
|
|
945
|
+
a = rand_tensor((2, 3, 4), inds=[0, 1, 2], tags="red")
|
|
946
|
+
b = rand_tensor((3, 4, 5), inds=[1, 2, 3], tags="blue")
|
|
947
|
+
c = rand_tensor((5, 2, 6), inds=[3, 0, 4], tags="blue")
|
|
948
|
+
|
|
949
|
+
a_b_c = a & b & c
|
|
950
|
+
print(a_b_c)
|
|
951
|
+
repr(a_b_c)
|
|
952
|
+
|
|
953
|
+
assert isinstance(a_b_c, TensorNetwork)
|
|
954
|
+
a_bc = a_b_c ^ "blue"
|
|
955
|
+
assert isinstance(a_bc, TensorNetwork)
|
|
956
|
+
assert len(a_bc.tensors) == 2
|
|
957
|
+
abc = a_bc ^ ["red", "blue"]
|
|
958
|
+
assert isinstance(abc, Tensor)
|
|
959
|
+
assert_allclose(abc.data, a_b_c.contract().data)
|
|
960
|
+
|
|
961
|
+
assert len(a_b_c.tensors) == 3
|
|
962
|
+
a_b_c ^= "blue"
|
|
963
|
+
assert len(a_b_c.tensors) == 2
|
|
964
|
+
|
|
965
|
+
def test_cumulative_contract(self):
|
|
966
|
+
a = rand_tensor((2, 3, 4), inds=[0, 1, 2], tags="red")
|
|
967
|
+
b = rand_tensor((3, 4, 5), inds=[1, 2, 3], tags="blue")
|
|
968
|
+
c = rand_tensor((5, 2, 6), inds=[3, 0, 4], tags="green")
|
|
969
|
+
|
|
970
|
+
d = a & b & c
|
|
971
|
+
d2 = d.copy()
|
|
972
|
+
|
|
973
|
+
cd = d >> ["red", "green", "blue"]
|
|
974
|
+
assert cd.shape == (6,)
|
|
975
|
+
assert cd.inds == (4,)
|
|
976
|
+
|
|
977
|
+
# make sure inplace operations didn't effect original tensor
|
|
978
|
+
for tag, names in d2.tag_map.items():
|
|
979
|
+
assert d.tag_map[tag] == names
|
|
980
|
+
|
|
981
|
+
assert isinstance(d >> ["red", "green", "blue"], Tensor)
|
|
982
|
+
|
|
983
|
+
# test inplace
|
|
984
|
+
d >>= ["red", "green", "blue"]
|
|
985
|
+
assert isinstance(d, TensorNetwork)
|
|
986
|
+
|
|
987
|
+
def test_contract_with_slices(self):
|
|
988
|
+
a = rand_tensor((2, 3, 4), inds=[0, 1, 2], tags="I0")
|
|
989
|
+
b = rand_tensor((3, 4, 5), inds=[1, 2, 3], tags="I1")
|
|
990
|
+
c = rand_tensor((5, 2, 6), inds=[3, 0, 4], tags="I2")
|
|
991
|
+
d = rand_tensor((5, 2, 6), inds=[5, 6, 4], tags="I3")
|
|
992
|
+
tn = TensorNetwork((a, b, c, d))
|
|
993
|
+
tn.view_as_(TensorNetwork1D, L=4, site_tag_id="I{}")
|
|
994
|
+
|
|
995
|
+
assert len((tn ^ slice(2)).tensors) == 3
|
|
996
|
+
assert len((tn ^ slice(..., 1, -1)).tensors) == 3
|
|
997
|
+
assert len((tn ^ slice(-1, 1)).tensors) == 3
|
|
998
|
+
assert len((tn ^ slice(None, -2, -1)).tensors) == 3
|
|
999
|
+
assert len((tn ^ slice(-2, 0)).tensors) == 3
|
|
1000
|
+
|
|
1001
|
+
@pytest.mark.parametrize("structured", [True, False])
|
|
1002
|
+
@pytest.mark.parametrize("inplace", [True, False])
|
|
1003
|
+
def test_contract_output_unwrapping(self, structured, inplace):
|
|
1004
|
+
psi = qtn.MPS_rand_state(5, 3)
|
|
1005
|
+
tn = psi.H & psi
|
|
1006
|
+
if structured:
|
|
1007
|
+
tags = ...
|
|
1008
|
+
else:
|
|
1009
|
+
tags = all
|
|
1010
|
+
|
|
1011
|
+
x = tn.contract(tags, inplace=inplace)
|
|
1012
|
+
|
|
1013
|
+
assert isinstance(x, TensorNetwork) == inplace
|
|
1014
|
+
|
|
1015
|
+
def test_contraction_info(self):
|
|
1016
|
+
a = qtn.rand_tensor((8, 8), ("a", "b"))
|
|
1017
|
+
b = qtn.rand_tensor((8, 8), ("b", "c"))
|
|
1018
|
+
c = qtn.rand_tensor((8, 8), ("c", "d"))
|
|
1019
|
+
tn = a | b | c
|
|
1020
|
+
assert tn.contraction_width() == 6
|
|
1021
|
+
assert tn.contraction_cost() == 2 * 8**3
|
|
1022
|
+
|
|
1023
|
+
def test_contract_to_dense_reduced_factor(self):
|
|
1024
|
+
tn = qtn.PEPS.rand(2, 2, 2)
|
|
1025
|
+
left_inds = ["k0,0", "k0,1"]
|
|
1026
|
+
right_inds = ["k1,0", "k1,1"]
|
|
1027
|
+
A = tn.to_dense(left_inds, right_inds)
|
|
1028
|
+
L = tn.compute_reduced_factor("left", left_inds, right_inds)
|
|
1029
|
+
Linv = ar.do("linalg.inv", L)
|
|
1030
|
+
Ul = Linv @ A
|
|
1031
|
+
assert_allclose(Ul.T @ Ul, np.eye(4), atol=1e-10)
|
|
1032
|
+
R = tn.compute_reduced_factor("right", left_inds, right_inds)
|
|
1033
|
+
Rinv = ar.do("linalg.inv", R)
|
|
1034
|
+
Ur = A @ Rinv
|
|
1035
|
+
assert_allclose(Ur @ Ur.T, np.eye(4), atol=1e-10)
|
|
1036
|
+
|
|
1037
|
+
@pytest.mark.parametrize("method", ("auto", "dense", "overlap"))
|
|
1038
|
+
@pytest.mark.parametrize("normalized", (True, False))
|
|
1039
|
+
def test_tensor_network_distance(self, method, normalized):
|
|
1040
|
+
n = 6
|
|
1041
|
+
A = qtn.TN_rand_reg(n=n, reg=3, D=2, phys_dim=2, dtype=complex)
|
|
1042
|
+
Ad = A.to_dense([f"k{i}" for i in range(n)])
|
|
1043
|
+
B = qtn.TN_rand_reg(n=6, reg=3, D=2, phys_dim=2, dtype=complex)
|
|
1044
|
+
Bd = B.to_dense([f"k{i}" for i in range(n)])
|
|
1045
|
+
d1 = np.linalg.norm(Ad - Bd)
|
|
1046
|
+
d2 = A.distance(B, method=method, normalized=normalized)
|
|
1047
|
+
if normalized:
|
|
1048
|
+
assert 0 <= d2 <= 2
|
|
1049
|
+
else:
|
|
1050
|
+
assert d1 == pytest.approx(d2)
|
|
1051
|
+
|
|
1052
|
+
@pytest.mark.parametrize(
|
|
1053
|
+
"method,opts",
|
|
1054
|
+
(
|
|
1055
|
+
("als", (("enforce_pos", False), ("solver", "lstsq"))),
|
|
1056
|
+
("als", (("enforce_pos", True),)),
|
|
1057
|
+
pytest.param(
|
|
1058
|
+
"autodiff",
|
|
1059
|
+
(("distance_method", "dense"),),
|
|
1060
|
+
marks=requires_autograd,
|
|
1061
|
+
),
|
|
1062
|
+
pytest.param(
|
|
1063
|
+
"autodiff",
|
|
1064
|
+
(("distance_method", "overlap"),),
|
|
1065
|
+
marks=requires_autograd,
|
|
1066
|
+
),
|
|
1067
|
+
),
|
|
1068
|
+
)
|
|
1069
|
+
def test_fit_mps(self, method, opts):
|
|
1070
|
+
k1 = qtn.MPS_rand_state(5, 3, seed=666)
|
|
1071
|
+
k2 = qtn.MPS_rand_state(5, 3, seed=667)
|
|
1072
|
+
assert k1.distance_normalized(k2) > 1e-3
|
|
1073
|
+
k1.fit_(k2, method=method, progbar=True, **dict(opts))
|
|
1074
|
+
assert k1.distance_normalized(k2) < 1e-3
|
|
1075
|
+
|
|
1076
|
+
@pytest.mark.parametrize(
|
|
1077
|
+
"method,opts",
|
|
1078
|
+
(
|
|
1079
|
+
("als", (("enforce_pos", False),)),
|
|
1080
|
+
("als", (("enforce_pos", True),)),
|
|
1081
|
+
pytest.param(
|
|
1082
|
+
"autodiff",
|
|
1083
|
+
(("distance_method", "dense"),),
|
|
1084
|
+
marks=requires_autograd,
|
|
1085
|
+
),
|
|
1086
|
+
pytest.param(
|
|
1087
|
+
"autodiff",
|
|
1088
|
+
(("distance_method", "overlap"),),
|
|
1089
|
+
marks=requires_autograd,
|
|
1090
|
+
),
|
|
1091
|
+
),
|
|
1092
|
+
)
|
|
1093
|
+
def test_fit_rand_reg(self, method, opts):
|
|
1094
|
+
r1 = qtn.TN_rand_reg(5, 4, D=2, seed=666, phys_dim=2)
|
|
1095
|
+
k2 = qtn.MPS_rand_state(5, 3, seed=667)
|
|
1096
|
+
assert r1.distance(k2) > 1e-3
|
|
1097
|
+
r1.fit_(k2, method=method, progbar=True, **dict(opts))
|
|
1098
|
+
assert r1.distance(k2) < 1e-3
|
|
1099
|
+
|
|
1100
|
+
@pytest.mark.parametrize(
|
|
1101
|
+
"method,opts",
|
|
1102
|
+
(
|
|
1103
|
+
("als", (("enforce_pos", False),)),
|
|
1104
|
+
("als", (("enforce_pos", True),)),
|
|
1105
|
+
pytest.param(
|
|
1106
|
+
"autodiff",
|
|
1107
|
+
(("distance_method", "dense"),),
|
|
1108
|
+
marks=requires_autograd,
|
|
1109
|
+
),
|
|
1110
|
+
pytest.param(
|
|
1111
|
+
"autodiff",
|
|
1112
|
+
(("distance_method", "overlap"),),
|
|
1113
|
+
marks=requires_autograd,
|
|
1114
|
+
),
|
|
1115
|
+
),
|
|
1116
|
+
)
|
|
1117
|
+
def test_fit_partial_tags(self, method, opts):
|
|
1118
|
+
k1 = qtn.MPS_rand_state(5, 3, seed=666)
|
|
1119
|
+
k2 = qtn.MPS_rand_state(5, 3, seed=667)
|
|
1120
|
+
d0 = k1.distance(k2)
|
|
1121
|
+
tags = ["I0", "I2", "I4"]
|
|
1122
|
+
k1f = k1.fit(
|
|
1123
|
+
k2, tol=1e-3, tags=tags, method=method, progbar=True, **dict(opts)
|
|
1124
|
+
)
|
|
1125
|
+
assert k1f.distance(k2) < d0
|
|
1126
|
+
assert (k1f[0] - k1[0]).norm() > 1e-12
|
|
1127
|
+
assert (k1f[1] - k1[1]).norm() < 1e-12
|
|
1128
|
+
assert (k1f[2] - k1[2]).norm() > 1e-12
|
|
1129
|
+
assert (k1f[3] - k1[3]).norm() < 1e-12
|
|
1130
|
+
assert (k1f[4] - k1[4]).norm() > 1e-12
|
|
1131
|
+
|
|
1132
|
+
def test_reindex(self):
|
|
1133
|
+
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2], tags="red")
|
|
1134
|
+
b = Tensor(np.random.randn(3, 4, 5), inds=[1, 2, 3], tags="blue")
|
|
1135
|
+
c = Tensor(np.random.randn(5, 2, 6), inds=[3, 0, 4], tags="green")
|
|
1136
|
+
|
|
1137
|
+
a_b_c = a & b & c
|
|
1138
|
+
a_b_c.check()
|
|
1139
|
+
|
|
1140
|
+
d = a_b_c.reindex({4: "foo", 2: "bar"})
|
|
1141
|
+
d.check()
|
|
1142
|
+
|
|
1143
|
+
assert a_b_c.outer_inds() == (4,)
|
|
1144
|
+
assert d.outer_inds() == ("foo",)
|
|
1145
|
+
assert set(a_b_c.inner_inds()) == {0, 1, 2, 3}
|
|
1146
|
+
assert set(d.inner_inds()) == {0, 1, "bar", 3}
|
|
1147
|
+
assert d.tensors[0].inds == (0, 1, "bar")
|
|
1148
|
+
|
|
1149
|
+
d = a_b_c.reindex_({4: "foo", 2: "bar"})
|
|
1150
|
+
|
|
1151
|
+
assert a_b_c.outer_inds() == ("foo",)
|
|
1152
|
+
assert set(d.inner_inds()) == {0, 1, "bar", 3}
|
|
1153
|
+
assert d.tensors[0].inds == (0, 1, "bar")
|
|
1154
|
+
|
|
1155
|
+
def test_add_tag(self):
|
|
1156
|
+
a = rand_tensor((2, 3, 4), inds="abc", tags={"red"})
|
|
1157
|
+
b = rand_tensor((2, 3, 4), inds="abc", tags={"blue"})
|
|
1158
|
+
tn = a & b
|
|
1159
|
+
tn.add_tag("green")
|
|
1160
|
+
assert "green" in tn.tag_map
|
|
1161
|
+
assert "green" in tn["red"].tags
|
|
1162
|
+
assert "green" in tn["blue"].tags
|
|
1163
|
+
tn.add_tag("blue")
|
|
1164
|
+
for t in tn:
|
|
1165
|
+
assert "blue" in t.tags
|
|
1166
|
+
|
|
1167
|
+
def test_index_by_site(self):
|
|
1168
|
+
a_data = np.random.randn(2, 3, 4)
|
|
1169
|
+
b_data = np.random.randn(2, 3, 4)
|
|
1170
|
+
a = Tensor(a_data, inds="abc", tags={"I0"})
|
|
1171
|
+
b = Tensor(b_data, inds="abc", tags={"I1"})
|
|
1172
|
+
tn = TensorNetwork((a, b))
|
|
1173
|
+
tn.view_as_(TensorNetwork1D, L=2, site_tag_id="I{}")
|
|
1174
|
+
assert_allclose(tn[0].data, a_data)
|
|
1175
|
+
new_data = np.random.randn(2, 3, 4)
|
|
1176
|
+
tn[1] = Tensor(new_data, inds="abc", tags={"I1", "red"})
|
|
1177
|
+
assert_allclose(tn["I1"].data, new_data)
|
|
1178
|
+
assert "red" in tn["I1"].tags
|
|
1179
|
+
|
|
1180
|
+
def test_set_data_in_tensor(self):
|
|
1181
|
+
a_data = np.random.randn(2, 3, 4)
|
|
1182
|
+
b_data = np.random.randn(2, 3, 4)
|
|
1183
|
+
a = Tensor(a_data, inds="abc", tags={"I0"})
|
|
1184
|
+
b = Tensor(b_data, inds="abc", tags={"I1"})
|
|
1185
|
+
tn = TensorNetwork((a, b))
|
|
1186
|
+
tn.view_as_(TensorNetwork1D, L=2, site_tag_id="I{}")
|
|
1187
|
+
assert_allclose(tn[0].data, a_data)
|
|
1188
|
+
new_data = np.random.randn(2, 3, 4)
|
|
1189
|
+
tn[1].modify(data=new_data)
|
|
1190
|
+
assert_allclose(tn["I1"].data, new_data)
|
|
1191
|
+
|
|
1192
|
+
def test_make_tids_consecutive_combining_with_no_check_collisions(self):
|
|
1193
|
+
p1 = MPS_rand_state(5, 3, phys_dim=3)
|
|
1194
|
+
p2 = MPS_rand_state(5, 3, phys_dim=3)
|
|
1195
|
+
p2.make_tids_consecutive(tid0=5)
|
|
1196
|
+
# shouldn't need to check any collisions
|
|
1197
|
+
tn = TensorNetwork((p1, p2), check_collisions=False)
|
|
1198
|
+
# test can contract
|
|
1199
|
+
assert 0 < abs(tn ^ ...) < 1
|
|
1200
|
+
|
|
1201
|
+
def test_retagging(self):
|
|
1202
|
+
x = rand_tensor((2, 4), inds="ab", tags={"X", "I0"})
|
|
1203
|
+
y = rand_tensor((4, 2, 5), inds="bcd", tags={"Y", "I1"})
|
|
1204
|
+
z = rand_tensor((5, 3), inds="de", tags={"Z", "I2"})
|
|
1205
|
+
tn = TensorNetwork((x, y, z))
|
|
1206
|
+
tn.retag_({"I0": "I1", "I1": "I2", "I2": "I3", "Z": "A"})
|
|
1207
|
+
assert set(tn.tag_map.keys()) == {"X", "I1", "I2", "I3", "Y", "A"}
|
|
1208
|
+
|
|
1209
|
+
def test_squeeze(self):
|
|
1210
|
+
A, B, C = (
|
|
1211
|
+
rand_tensor((1, 2, 3), "abc", tags=["I0"]),
|
|
1212
|
+
rand_tensor((2, 3, 4), "bcd", tags=["I1"]),
|
|
1213
|
+
rand_tensor((4, 1, 1), "dae", tags=["I2"]),
|
|
1214
|
+
)
|
|
1215
|
+
tn = A & B & C
|
|
1216
|
+
|
|
1217
|
+
x1 = tn ^ ...
|
|
1218
|
+
stn = tn.squeeze()
|
|
1219
|
+
|
|
1220
|
+
assert tn["I0"].shape == (1, 2, 3)
|
|
1221
|
+
assert tn["I1"].shape == (2, 3, 4)
|
|
1222
|
+
assert tn["I2"].shape == (4, 1, 1)
|
|
1223
|
+
|
|
1224
|
+
assert stn["I0"].shape == (2, 3)
|
|
1225
|
+
assert stn["I1"].shape == (2, 3, 4)
|
|
1226
|
+
assert stn["I2"].shape == (4,)
|
|
1227
|
+
|
|
1228
|
+
x2 = stn ^ ...
|
|
1229
|
+
assert_allclose(x1.data, x2) # x2 should be scalar already
|
|
1230
|
+
|
|
1231
|
+
def test_tensors_sorted(self):
|
|
1232
|
+
tn1, tn2 = TensorNetwork([]), TensorNetwork([])
|
|
1233
|
+
A, B, C = (
|
|
1234
|
+
rand_tensor((1, 2, 3), "abc", tags=["I0"]),
|
|
1235
|
+
rand_tensor((2, 3, 4), "bcd", tags=["I1"]),
|
|
1236
|
+
rand_tensor((4, 1, 1), "dae", tags=["I2"]),
|
|
1237
|
+
)
|
|
1238
|
+
|
|
1239
|
+
tn1 &= A
|
|
1240
|
+
tn1 &= B
|
|
1241
|
+
tn1 &= C
|
|
1242
|
+
|
|
1243
|
+
tn2 &= C
|
|
1244
|
+
tn2 &= A
|
|
1245
|
+
tn2 &= B
|
|
1246
|
+
|
|
1247
|
+
for t1, t2 in zip(tn1.tensors_sorted(), tn2.tensors_sorted()):
|
|
1248
|
+
assert t1.tags == t2.tags
|
|
1249
|
+
assert t1.almost_equals(t2)
|
|
1250
|
+
|
|
1251
|
+
def test_select_tensors_mode(self):
|
|
1252
|
+
A, B, C = (
|
|
1253
|
+
rand_tensor((2, 2), "ab", tags={"0", "X"}),
|
|
1254
|
+
rand_tensor((2, 2), "bc", tags={"1", "X", "Y"}),
|
|
1255
|
+
rand_tensor((2, 3), "cd", tags={"2", "Y"}),
|
|
1256
|
+
)
|
|
1257
|
+
tn = A & B & C
|
|
1258
|
+
|
|
1259
|
+
ts = tn.select_tensors(("X", "Y"), which="all")
|
|
1260
|
+
assert len(ts) == 1
|
|
1261
|
+
assert not any(map(A.almost_equals, ts))
|
|
1262
|
+
assert any(map(B.almost_equals, ts))
|
|
1263
|
+
assert not any(map(C.almost_equals, ts))
|
|
1264
|
+
|
|
1265
|
+
ts = tn.select_tensors(("X", "Y"), which="any")
|
|
1266
|
+
assert len(ts) == 3
|
|
1267
|
+
assert any(map(A.almost_equals, ts))
|
|
1268
|
+
assert any(map(B.almost_equals, ts))
|
|
1269
|
+
assert any(map(C.almost_equals, ts))
|
|
1270
|
+
|
|
1271
|
+
def test_replace_with_identity(self):
|
|
1272
|
+
A, B, C, D = (
|
|
1273
|
+
rand_tensor((2, 3, 4), "abc", tags=["I0"]),
|
|
1274
|
+
rand_tensor((4, 5, 6), "cde", tags=["I1"]),
|
|
1275
|
+
rand_tensor((5, 6, 7), "def", tags=["I2"]),
|
|
1276
|
+
rand_tensor((7,), "f", tags=["I3"]),
|
|
1277
|
+
)
|
|
1278
|
+
|
|
1279
|
+
tn = A & B & C & D
|
|
1280
|
+
tn.check()
|
|
1281
|
+
|
|
1282
|
+
with pytest.raises(ValueError):
|
|
1283
|
+
tn.replace_with_identity(("I1", "I2"), inplace=True)
|
|
1284
|
+
|
|
1285
|
+
tn["I2"] = rand_tensor((5, 6, 4), "def", tags=["I2"])
|
|
1286
|
+
tn["I3"] = rand_tensor((4,), "f", tags=["I3"])
|
|
1287
|
+
|
|
1288
|
+
tn1 = tn.replace_with_identity(("I1", "I2"))
|
|
1289
|
+
tn1.check()
|
|
1290
|
+
assert len(tn1.tensors) == 2
|
|
1291
|
+
x = tn1 ^ ...
|
|
1292
|
+
assert set(x.inds) == {"a", "b"}
|
|
1293
|
+
|
|
1294
|
+
A, B, C = (
|
|
1295
|
+
rand_tensor((2, 2), "ab", tags={"0"}),
|
|
1296
|
+
rand_tensor((2, 2), "bc", tags={"1"}),
|
|
1297
|
+
rand_tensor((2, 3), "cd", tags={"2"}),
|
|
1298
|
+
)
|
|
1299
|
+
|
|
1300
|
+
tn = A & B & C
|
|
1301
|
+
|
|
1302
|
+
tn2 = tn.replace_with_identity("1")
|
|
1303
|
+
assert len(tn2.tensors) == 2
|
|
1304
|
+
x = tn2 ^ ...
|
|
1305
|
+
assert set(x.inds) == {"a", "d"}
|
|
1306
|
+
|
|
1307
|
+
def test_partition(self):
|
|
1308
|
+
k = MPS_rand_state(10, 7, site_tag_id="Q{}")
|
|
1309
|
+
where = [f"Q{i}" for i in range(10) if i % 2 == 1]
|
|
1310
|
+
k.add_tag("odd", where=where, which="any")
|
|
1311
|
+
|
|
1312
|
+
tn_even, tn_odd = k.partition("odd")
|
|
1313
|
+
|
|
1314
|
+
assert len(tn_even.tensors) == len(tn_odd.tensors) == 5
|
|
1315
|
+
|
|
1316
|
+
assert tn_even.site_tag_id == "Q{}"
|
|
1317
|
+
assert tn_odd.site_tag_id == "Q{}"
|
|
1318
|
+
|
|
1319
|
+
assert (tn_even & tn_odd).sites == tuple(range(10))
|
|
1320
|
+
|
|
1321
|
+
def test_subgraphs(_):
|
|
1322
|
+
k1 = MPS_rand_state(6, 7, site_ind_id="a{}")
|
|
1323
|
+
k2 = MPS_rand_state(8, 7, site_ind_id="b{}")
|
|
1324
|
+
tn = k1 | k2
|
|
1325
|
+
s1, s2 = tn.subgraphs()
|
|
1326
|
+
assert {s1.num_tensors, s2.num_tensors} == {6, 8}
|
|
1327
|
+
|
|
1328
|
+
def test_expand_bond_dimension_zeros(self):
|
|
1329
|
+
k = MPS_rand_state(10, 7)
|
|
1330
|
+
k0 = k.copy()
|
|
1331
|
+
k0.expand_bond_dimension_(13)
|
|
1332
|
+
assert k0.max_bond() == 13
|
|
1333
|
+
assert k0.ind_size("k0") == 2
|
|
1334
|
+
assert k0.distance(k) == pytest.approx(0.0, abs=1e-7)
|
|
1335
|
+
|
|
1336
|
+
def test_expand_bond_dimension_random(self):
|
|
1337
|
+
tn = qtn.TN_rand_reg(6, 3, 2, dist="uniform")
|
|
1338
|
+
Z = tn ^ ...
|
|
1339
|
+
# expand w/ positive random entries -> contraction value must increase
|
|
1340
|
+
tn.expand_bond_dimension_(3, rand_strength=0.1, rand_dist="uniform")
|
|
1341
|
+
Ze = tn ^ ...
|
|
1342
|
+
assert Ze > Z
|
|
1343
|
+
|
|
1344
|
+
def test_compress_multibond(self):
|
|
1345
|
+
A = rand_tensor((7, 2, 2), "abc", tags="A")
|
|
1346
|
+
A.expand_ind("c", 3)
|
|
1347
|
+
B = rand_tensor((3, 2, 7), "cbd", tags="B")
|
|
1348
|
+
x0 = (A & B).trace("a", "d")
|
|
1349
|
+
qtn.tensor_compress_bond(A, B, absorb="left")
|
|
1350
|
+
A.transpose_("a", "b")
|
|
1351
|
+
assert A.shape == (7, 4)
|
|
1352
|
+
B.transpose_("b", "d")
|
|
1353
|
+
assert B.shape == (4, 7)
|
|
1354
|
+
assert B.H @ B == pytest.approx(4)
|
|
1355
|
+
x1 = (A & B).trace("a", "d")
|
|
1356
|
+
assert x1 == pytest.approx(x0)
|
|
1357
|
+
|
|
1358
|
+
@pytest.mark.parametrize("absorb", ["both", "left", "right"])
|
|
1359
|
+
def test_tensor_compress_bond_reduced_modes(self, absorb):
|
|
1360
|
+
kws = dict(max_bond=4, absorb=absorb)
|
|
1361
|
+
|
|
1362
|
+
A = rand_tensor((3, 4, 5), "abc", tags="A")
|
|
1363
|
+
B = rand_tensor((5, 6), "cd", tags="B")
|
|
1364
|
+
AB = A @ B
|
|
1365
|
+
|
|
1366
|
+
# naive contract and compress should be optimal
|
|
1367
|
+
A1, B1 = A.copy(), B.copy()
|
|
1368
|
+
qtn.tensor_compress_bond(A1, B1, reduced=False, **kws)
|
|
1369
|
+
assert A1.shape == (3, 4, 4)
|
|
1370
|
+
assert B1.shape == (4, 6)
|
|
1371
|
+
if absorb == "right":
|
|
1372
|
+
# assert A is isometric
|
|
1373
|
+
assert A1.norm() ** 2 == pytest.approx(4)
|
|
1374
|
+
if absorb == "left":
|
|
1375
|
+
# assert B is isometric
|
|
1376
|
+
assert B1.norm() ** 2 == pytest.approx(4)
|
|
1377
|
+
# compute the optimal fidelity
|
|
1378
|
+
d1 = (A1 @ B1).distance(AB)
|
|
1379
|
+
assert 0 < d1
|
|
1380
|
+
|
|
1381
|
+
A2, B2 = A.copy(), B.copy()
|
|
1382
|
+
qtn.tensor_compress_bond(A2, B2, reduced=True, **kws)
|
|
1383
|
+
assert A2.shape == (3, 4, 4)
|
|
1384
|
+
assert B2.shape == (4, 6)
|
|
1385
|
+
if absorb == "right":
|
|
1386
|
+
assert A2.norm() ** 2 == pytest.approx(4)
|
|
1387
|
+
if absorb == "left":
|
|
1388
|
+
assert B2.norm() ** 2 == pytest.approx(4)
|
|
1389
|
+
d2 = (A2 @ B2).distance(AB)
|
|
1390
|
+
# reduced mode should also be optimal
|
|
1391
|
+
assert d2 == pytest.approx(d1)
|
|
1392
|
+
|
|
1393
|
+
Ar, Br = A.copy(), B.copy()
|
|
1394
|
+
qtn.tensor_compress_bond(Ar, Br, reduced="right", **kws)
|
|
1395
|
+
assert Ar.shape == (3, 4, 4)
|
|
1396
|
+
assert Br.shape == (4, 6)
|
|
1397
|
+
if absorb == "right":
|
|
1398
|
+
# A won't be canonical
|
|
1399
|
+
assert Ar.left_inds is None
|
|
1400
|
+
assert Ar.norm() ** 2 != pytest.approx(4)
|
|
1401
|
+
if absorb == "left":
|
|
1402
|
+
assert Br.norm() ** 2 == pytest.approx(4)
|
|
1403
|
+
dr = (Ar @ Br).distance(AB)
|
|
1404
|
+
# right reduced mode should not be optimal
|
|
1405
|
+
assert dr > d1
|
|
1406
|
+
# unless we canonicalize first
|
|
1407
|
+
Ar, Br = A.copy(), B.copy()
|
|
1408
|
+
qtn.tensor_canonize_bond(Ar, Br, absorb="right")
|
|
1409
|
+
qtn.tensor_compress_bond(Ar, Br, reduced="right", **kws)
|
|
1410
|
+
if absorb == "right":
|
|
1411
|
+
assert Ar.norm() ** 2 == pytest.approx(4)
|
|
1412
|
+
if absorb == "left":
|
|
1413
|
+
assert Br.norm() ** 2 == pytest.approx(4)
|
|
1414
|
+
dr = (Ar @ Br).distance(AB)
|
|
1415
|
+
assert dr == pytest.approx(d1)
|
|
1416
|
+
|
|
1417
|
+
Al, Bl = A.copy(), B.copy()
|
|
1418
|
+
qtn.tensor_compress_bond(Al, Bl, reduced="left", **kws)
|
|
1419
|
+
assert Al.shape == (3, 4, 4)
|
|
1420
|
+
assert Bl.shape == (4, 6)
|
|
1421
|
+
if absorb == "right":
|
|
1422
|
+
assert Al.norm() ** 2 == pytest.approx(4)
|
|
1423
|
+
if absorb == "left":
|
|
1424
|
+
# B won't be canonical
|
|
1425
|
+
assert Bl.left_inds is None
|
|
1426
|
+
assert Bl.norm() ** 2 != pytest.approx(4)
|
|
1427
|
+
dl = (Al @ Bl).distance(AB)
|
|
1428
|
+
# left reduced mode should not be optimal
|
|
1429
|
+
assert dl > d1
|
|
1430
|
+
# unless we canonicalize first
|
|
1431
|
+
Al, Bl = A.copy(), B.copy()
|
|
1432
|
+
qtn.tensor_canonize_bond(Al, Bl, absorb="left")
|
|
1433
|
+
qtn.tensor_compress_bond(Al, Bl, reduced="left", **kws)
|
|
1434
|
+
if absorb == "right":
|
|
1435
|
+
assert Al.norm() ** 2 == pytest.approx(4)
|
|
1436
|
+
if absorb == "left":
|
|
1437
|
+
assert Bl.norm() ** 2 == pytest.approx(4)
|
|
1438
|
+
dl = (Al @ Bl).distance(AB)
|
|
1439
|
+
assert dl == pytest.approx(d1)
|
|
1440
|
+
|
|
1441
|
+
def test_canonize_multibond(self):
|
|
1442
|
+
A = rand_tensor((3, 4, 5), "abc", tags="A")
|
|
1443
|
+
assert A.H @ A != pytest.approx(3)
|
|
1444
|
+
B = rand_tensor((5, 4, 3), "cbd", tags="B")
|
|
1445
|
+
x0 = (A & B).trace("a", "d")
|
|
1446
|
+
qtn.tensor_canonize_bond(A, B)
|
|
1447
|
+
assert A.shape == (3, 3)
|
|
1448
|
+
assert B.shape == (3, 3)
|
|
1449
|
+
assert A.H @ A == pytest.approx(3)
|
|
1450
|
+
x1 = (A & B).trace("a", "d")
|
|
1451
|
+
assert x1 == pytest.approx(x0)
|
|
1452
|
+
|
|
1453
|
+
@pytest.mark.parametrize("method", ["svd", "eig", "isvd", "svds", "rsvd"])
|
|
1454
|
+
def test_compress_between(self, method):
|
|
1455
|
+
A = rand_tensor((3, 4, 5), "abd", tags={"T1"})
|
|
1456
|
+
A.expand_ind("d", 10)
|
|
1457
|
+
B = rand_tensor((5, 6), "dc", tags={"T2"})
|
|
1458
|
+
B.expand_ind("d", 10)
|
|
1459
|
+
tn = A | B
|
|
1460
|
+
assert A.shared_bond_size(B) == 10
|
|
1461
|
+
tn.compress_between("T1", "T2", method=method, mode="basic")
|
|
1462
|
+
assert A.shared_bond_size(B) == 5
|
|
1463
|
+
|
|
1464
|
+
@pytest.mark.parametrize("method", ["svd", "eig", "isvd", "svds", "rsvd"])
|
|
1465
|
+
def test_compress_all(self, method):
|
|
1466
|
+
k = MPS_rand_state(10, 7)
|
|
1467
|
+
k += k
|
|
1468
|
+
k /= 2
|
|
1469
|
+
k.compress_all_(max_bond=7, method=method, mode="basic")
|
|
1470
|
+
assert k.max_bond() == 7
|
|
1471
|
+
assert_allclose(k.H @ k, 1.0)
|
|
1472
|
+
|
|
1473
|
+
def test_compress_all_1d(self):
|
|
1474
|
+
mpo = qtn.MPO_rand(10, 7)
|
|
1475
|
+
mpo1 = mpo.copy()
|
|
1476
|
+
mpo1.compress(max_bond=4, renorm=False)
|
|
1477
|
+
mpo2 = mpo.compress_all_1d(max_bond=4, renorm=False)
|
|
1478
|
+
assert mpo1.max_bond() == mpo2.max_bond() == 4
|
|
1479
|
+
assert mpo2 is not mpo
|
|
1480
|
+
assert_allclose(mpo1.H @ mpo, mpo2.H @ mpo)
|
|
1481
|
+
|
|
1482
|
+
def test_canonize_between(self):
|
|
1483
|
+
k = MPS_rand_state(4, 3)
|
|
1484
|
+
k.canonize_between("I1", "I2")
|
|
1485
|
+
assert k.H @ k == pytest.approx(1)
|
|
1486
|
+
t = k[1]
|
|
1487
|
+
assert t.H @ t == pytest.approx(3)
|
|
1488
|
+
t = k[2]
|
|
1489
|
+
assert t.H @ t != pytest.approx(3)
|
|
1490
|
+
k.canonize_between("I2", "I1")
|
|
1491
|
+
assert k.H @ k == pytest.approx(1)
|
|
1492
|
+
t = k[1]
|
|
1493
|
+
assert t.H @ t != pytest.approx(3)
|
|
1494
|
+
t = k[2]
|
|
1495
|
+
assert t.H @ t == pytest.approx(3)
|
|
1496
|
+
|
|
1497
|
+
def test_canonize_around(self):
|
|
1498
|
+
# make a small tree tensor network
|
|
1499
|
+
#
|
|
1500
|
+
# U2-- v--
|
|
1501
|
+
# | |
|
|
1502
|
+
# U1-- ==> v-- etc
|
|
1503
|
+
# | |
|
|
1504
|
+
# L2---L1---C---R1---R2 >---->---->----O---<
|
|
1505
|
+
# / | | | \ / | | | \
|
|
1506
|
+
#
|
|
1507
|
+
C = qtn.rand_tensor([2], inds=["kC"], tags="C", dtype=complex)
|
|
1508
|
+
|
|
1509
|
+
# left arm
|
|
1510
|
+
L1 = qtn.rand_tensor([2], inds=["kL1"], tags="L1", dtype=complex)
|
|
1511
|
+
qtn.new_bond(C, L1, size=7)
|
|
1512
|
+
L2 = qtn.rand_tensor([2], inds=["kL2"], tags="L2", dtype=complex)
|
|
1513
|
+
qtn.new_bond(L1, L2, size=7)
|
|
1514
|
+
|
|
1515
|
+
# right arm
|
|
1516
|
+
R1 = qtn.rand_tensor([2], inds=["kR1"], tags="R1", dtype=complex)
|
|
1517
|
+
qtn.new_bond(C, R1, size=7)
|
|
1518
|
+
R2 = qtn.rand_tensor([2], inds=["kR2"], tags="R2", dtype=complex)
|
|
1519
|
+
qtn.new_bond(R1, R2, size=7)
|
|
1520
|
+
|
|
1521
|
+
# upper arm
|
|
1522
|
+
U1 = qtn.rand_tensor([2], inds=["kU1"], tags="U1", dtype=complex)
|
|
1523
|
+
qtn.new_bond(C, U1, size=7)
|
|
1524
|
+
U2 = qtn.rand_tensor([2], inds=["kU2"], tags="U2", dtype=complex)
|
|
1525
|
+
qtn.new_bond(U1, U2, size=7)
|
|
1526
|
+
|
|
1527
|
+
# make the TN and randomize the data then normalize
|
|
1528
|
+
ttn = qtn.TensorNetwork([C, L1, L2, R1, R2, U1, U2])
|
|
1529
|
+
ttn.randomize_()
|
|
1530
|
+
ttn /= (ttn.H @ ttn) ** 0.5
|
|
1531
|
+
assert ttn.H @ ttn == pytest.approx(1.0)
|
|
1532
|
+
|
|
1533
|
+
# test max distance
|
|
1534
|
+
ttn.canonize_around_("C", max_distance=1)
|
|
1535
|
+
assert ttn.H @ ttn == pytest.approx(1.0)
|
|
1536
|
+
assert ttn["C"].H @ ttn["C"] != pytest.approx(1.0)
|
|
1537
|
+
|
|
1538
|
+
# tensors one-away from center should be isometries
|
|
1539
|
+
for tg in ["L1", "R1", "U1"]:
|
|
1540
|
+
assert ttn[tg].H @ ttn[tg] == pytest.approx(7)
|
|
1541
|
+
# tensors two-away from center should be random
|
|
1542
|
+
for tg in ["L2", "R2", "U2"]:
|
|
1543
|
+
assert ttn[tg].H @ ttn[tg] != pytest.approx(2)
|
|
1544
|
+
|
|
1545
|
+
ttn.canonize_around_("C", max_distance=2)
|
|
1546
|
+
for tg in ["L2", "R2", "U2"]:
|
|
1547
|
+
assert ttn[tg].H @ ttn[tg] == pytest.approx(2)
|
|
1548
|
+
|
|
1549
|
+
# test can set the orthogonality center anywhere
|
|
1550
|
+
for tg in ["C", "L1", "L2", "R1", "R2", "U1", "U2"]:
|
|
1551
|
+
ttn.canonize_around_(tg)
|
|
1552
|
+
assert ttn.H @ ttn == pytest.approx(1.0)
|
|
1553
|
+
assert ttn[tg].H @ ttn[tg] == pytest.approx(1.0)
|
|
1554
|
+
|
|
1555
|
+
# tensors two-away from center should now be isometries
|
|
1556
|
+
for far_tg in ["L2", "R2", "U2"]:
|
|
1557
|
+
if far_tg != tg:
|
|
1558
|
+
ttn[far_tg].H @ ttn[far_tg] == pytest.approx(2)
|
|
1559
|
+
|
|
1560
|
+
def test_tn_split_tensor(self):
|
|
1561
|
+
mps = MPS_rand_state(4, 3)
|
|
1562
|
+
right_inds = bonds(mps[1], mps[2])
|
|
1563
|
+
mps.split_tensor(1, left_inds=None, right_inds=right_inds, rtags="X")
|
|
1564
|
+
mps.check()
|
|
1565
|
+
assert mps.num_tensors == 5
|
|
1566
|
+
assert mps["X"].shape == (3, 3)
|
|
1567
|
+
assert mps.H @ mps == pytest.approx(1.0)
|
|
1568
|
+
|
|
1569
|
+
def test_insert_operator(self):
|
|
1570
|
+
p = MPS_rand_state(3, 7, tags="KET")
|
|
1571
|
+
q = p.H.retag({"KET": "BRA"})
|
|
1572
|
+
qp = q & p
|
|
1573
|
+
sz = qu.spin_operator("z").real
|
|
1574
|
+
qp.insert_operator(
|
|
1575
|
+
sz, ("KET", "I1"), ("BRA", "I1"), tags="SZ", inplace=True
|
|
1576
|
+
)
|
|
1577
|
+
assert "SZ" in qp.tags
|
|
1578
|
+
assert len(qp.tensors) == 7
|
|
1579
|
+
x1 = qp ^ all
|
|
1580
|
+
x2 = qu.expec(p.to_dense(), qu.ikron(sz, [2, 2, 2], inds=1))
|
|
1581
|
+
assert x1 == pytest.approx(x2)
|
|
1582
|
+
|
|
1583
|
+
@pytest.mark.parametrize("dtype", (float, complex))
|
|
1584
|
+
def test_insert_gauge(self, dtype):
|
|
1585
|
+
k = MPS_rand_state(10, 7, dtype=dtype, normalize=False)
|
|
1586
|
+
kU = k.copy()
|
|
1587
|
+
|
|
1588
|
+
U = rand_tensor((7, 7), dtype=dtype, inds="ab").data
|
|
1589
|
+
kU.insert_gauge(U, 4, 5)
|
|
1590
|
+
|
|
1591
|
+
assert k[3].almost_equals(kU[3])
|
|
1592
|
+
assert not k[4].almost_equals(kU[4])
|
|
1593
|
+
assert not k[5].almost_equals(kU[5])
|
|
1594
|
+
assert k[6].almost_equals(kU[6])
|
|
1595
|
+
|
|
1596
|
+
assert k[4].inds == kU[4].inds
|
|
1597
|
+
assert k[5].inds == kU[5].inds
|
|
1598
|
+
|
|
1599
|
+
assert_allclose(k.H @ k, kU.H @ kU)
|
|
1600
|
+
|
|
1601
|
+
def test_fuse_multibonds(self):
|
|
1602
|
+
x = rand_tensor((2, 2, 2), ["a", "b", "c"])
|
|
1603
|
+
y = rand_tensor((2, 2, 2, 2), ["b", "c", "d", "e"])
|
|
1604
|
+
z = rand_tensor((2, 2, 2), ["a", "e", "d"])
|
|
1605
|
+
tn = x & y & z
|
|
1606
|
+
assert len(tn.inner_inds()) == 5
|
|
1607
|
+
tn.fuse_multibonds(inplace=True)
|
|
1608
|
+
assert len(tn.inner_inds()) == 3
|
|
1609
|
+
|
|
1610
|
+
def test_cut_bond(self):
|
|
1611
|
+
ta = qtn.rand_tensor((2, 2, 2), inds="abc", tags="A")
|
|
1612
|
+
tb = qtn.rand_tensor((2, 2, 2), inds="cde", tags="B")
|
|
1613
|
+
tn = ta | tb
|
|
1614
|
+
tn.cut_bond("c", new_left_ind="l", new_right_ind="r")
|
|
1615
|
+
assert ta.inds == ("a", "b", "l")
|
|
1616
|
+
assert tb.inds == ("r", "d", "e")
|
|
1617
|
+
|
|
1618
|
+
def test_drape_bond_between(self):
|
|
1619
|
+
tx = qtn.rand_tensor([2, 3, 4], ["a", "b", "c"], tags="X")
|
|
1620
|
+
ty = qtn.rand_tensor([3, 4, 6], ["b", "d", "e"], tags="Y")
|
|
1621
|
+
tz = qtn.rand_tensor([5], ["f"], tags="Z")
|
|
1622
|
+
tn = tx | ty | tz
|
|
1623
|
+
assert tn.num_indices == 6
|
|
1624
|
+
assert len(tn.subgraphs()) == 2
|
|
1625
|
+
te = tn.contract()
|
|
1626
|
+
tn.drape_bond_between_("X", "Y", "Z")
|
|
1627
|
+
assert tn.num_indices == 7
|
|
1628
|
+
assert len(tn.subgraphs()) == 1
|
|
1629
|
+
t = tn.contract()
|
|
1630
|
+
assert t.distance_normalized(te) == pytest.approx(0.0, abs=1e-6)
|
|
1631
|
+
|
|
1632
|
+
def test_draw(self):
|
|
1633
|
+
import matplotlib
|
|
1634
|
+
from matplotlib import pyplot as plt
|
|
1635
|
+
|
|
1636
|
+
matplotlib.use("Template")
|
|
1637
|
+
k = MPS_rand_state(10, 7, normalize=False)
|
|
1638
|
+
fig = k.draw(color=["I0", "I2"], return_fig=True)
|
|
1639
|
+
plt.close(fig)
|
|
1640
|
+
|
|
1641
|
+
def test_draw_with_fixed_pos(self):
|
|
1642
|
+
import matplotlib
|
|
1643
|
+
from matplotlib import pyplot as plt
|
|
1644
|
+
|
|
1645
|
+
matplotlib.use("Template")
|
|
1646
|
+
n = 7
|
|
1647
|
+
p = MPS_rand_state(n, 7, tags="KET")
|
|
1648
|
+
q = MPS_rand_state(n, 7, tags="BRA")
|
|
1649
|
+
fix = {
|
|
1650
|
+
**{("KET", f"I{i}"): (i, 0) for i in range(n)},
|
|
1651
|
+
**{("BRA", f"I{i}"): (i, 1) for i in range(n)},
|
|
1652
|
+
}
|
|
1653
|
+
fig = (q | p).draw(color=["KET", "BRA"], fix=fix, return_fig=True)
|
|
1654
|
+
plt.close(fig)
|
|
1655
|
+
|
|
1656
|
+
def test_pickle(self):
|
|
1657
|
+
import tempfile
|
|
1658
|
+
import os
|
|
1659
|
+
|
|
1660
|
+
pytest.importorskip("joblib")
|
|
1661
|
+
|
|
1662
|
+
tn = MPS_rand_state(10, 7, tags="KET")
|
|
1663
|
+
|
|
1664
|
+
with tempfile.TemporaryDirectory() as tdir:
|
|
1665
|
+
fname = os.path.join(tdir, "tn.dmp")
|
|
1666
|
+
qu.save_to_disk(tn, fname)
|
|
1667
|
+
tn2 = qu.load_from_disk(fname)
|
|
1668
|
+
|
|
1669
|
+
assert tn.H @ tn2 == pytest.approx(1.0)
|
|
1670
|
+
|
|
1671
|
+
assert all(hash(tn) not in t.owners for t in tn2)
|
|
1672
|
+
assert all(hash(tn2) in t.owners for t in tn2)
|
|
1673
|
+
|
|
1674
|
+
@pytest.mark.parametrize("dtype", [None, "float32", "complex128"])
|
|
1675
|
+
def test_randomize(self, dtype):
|
|
1676
|
+
psi = MPS_rand_state(5, 3, dtype="float64")
|
|
1677
|
+
x1 = psi.H @ psi
|
|
1678
|
+
psi.randomize_(seed=42, dtype=dtype)
|
|
1679
|
+
x2 = psi.H @ psi
|
|
1680
|
+
assert x1 != pytest.approx(x2)
|
|
1681
|
+
if dtype is None:
|
|
1682
|
+
assert psi.dtype == "float64"
|
|
1683
|
+
else:
|
|
1684
|
+
assert psi.dtype == dtype
|
|
1685
|
+
psi.randomize_(seed=42, dtype=dtype)
|
|
1686
|
+
x3 = psi.H @ psi
|
|
1687
|
+
assert x2 == pytest.approx(x3)
|
|
1688
|
+
|
|
1689
|
+
@pytest.mark.parametrize("dtype", ["float32", "complex128"])
|
|
1690
|
+
@pytest.mark.parametrize("value", [None, 42])
|
|
1691
|
+
def test_equalize_norms(self, dtype, value):
|
|
1692
|
+
psi = MPS_rand_state(5, 3, dtype=dtype)
|
|
1693
|
+
psi.randomize_(seed=42)
|
|
1694
|
+
x_exp = psi.H @ psi
|
|
1695
|
+
norms = [t.norm() for t in psi]
|
|
1696
|
+
psi.equalize_norms_(value)
|
|
1697
|
+
enorms = [t.norm() for t in psi]
|
|
1698
|
+
if value is None:
|
|
1699
|
+
assert all(n1 != n2 for n1, n2 in zip(norms, enorms))
|
|
1700
|
+
assert psi.H @ psi == pytest.approx(x_exp, rel=1e-4)
|
|
1701
|
+
else:
|
|
1702
|
+
assert all(n1 == pytest.approx(value) for n1 in enorms)
|
|
1703
|
+
assert (psi.H @ psi) * 10 ** (2 * psi.exponent) == pytest.approx(
|
|
1704
|
+
x_exp
|
|
1705
|
+
)
|
|
1706
|
+
|
|
1707
|
+
@pytest.mark.parametrize("append", [None, "*"])
|
|
1708
|
+
def test_mangle_inner(self, append):
|
|
1709
|
+
a = MPS_rand_state(6, 3)
|
|
1710
|
+
b = a.copy()
|
|
1711
|
+
assert tuple(a.ind_map) == tuple(b.ind_map)
|
|
1712
|
+
b.mangle_inner_(append)
|
|
1713
|
+
assert tuple(a.ind_map) != tuple(b.ind_map)
|
|
1714
|
+
ab = a & b
|
|
1715
|
+
assert all(ix in ab.ind_map for ix in a.ind_map)
|
|
1716
|
+
assert all(ix in ab.ind_map for ix in b.ind_map)
|
|
1717
|
+
|
|
1718
|
+
@pytest.mark.parametrize("mode", ["manual", "dense", "mps", "tree"])
|
|
1719
|
+
def test_hyperind_resolve(self, mode):
|
|
1720
|
+
import networkx as nx
|
|
1721
|
+
import random
|
|
1722
|
+
import collections
|
|
1723
|
+
|
|
1724
|
+
# create a random interaction ising model
|
|
1725
|
+
G = nx.watts_strogatz_graph(10, 4, 0.5, seed=666)
|
|
1726
|
+
edges = tuple(G.edges)
|
|
1727
|
+
js = collections.defaultdict(random.random)
|
|
1728
|
+
htn = qtn.HTN_classical_partition_function_from_edges(
|
|
1729
|
+
edges, j=lambda i, j: js[frozenset((i, j))], beta=0.22, h=0.04
|
|
1730
|
+
)
|
|
1731
|
+
Zh = htn.contract(all, output_inds=())
|
|
1732
|
+
assert len(htn.get_hyperinds(output_inds=())) == 10
|
|
1733
|
+
if mode == "manual":
|
|
1734
|
+
# resolve manually
|
|
1735
|
+
tn = qtn.TN_classical_partition_function_from_edges(
|
|
1736
|
+
edges, j=lambda i, j: js[frozenset((i, j))], beta=0.22, h=0.04
|
|
1737
|
+
)
|
|
1738
|
+
else:
|
|
1739
|
+
tn = htn.hyperinds_resolve(mode)
|
|
1740
|
+
assert len(tn.get_hyperinds()) == 0
|
|
1741
|
+
Z = tn.contract(all, output_inds=())
|
|
1742
|
+
assert Z == pytest.approx(Zh)
|
|
1743
|
+
assert max(map(len, tn.ind_map.values())) == 2
|
|
1744
|
+
|
|
1745
|
+
@requires_cotengra
|
|
1746
|
+
def test_hyperind_simplification_with_outputs(self):
|
|
1747
|
+
htn = qtn.HTN_random_ksat(3, 10, alpha=3.0, seed=42)
|
|
1748
|
+
assert htn.get_hyperinds()
|
|
1749
|
+
htn.randomize_()
|
|
1750
|
+
output_inds = [f"var{v}" for v in range(1, 11)]
|
|
1751
|
+
pex = htn.contract(output_inds=output_inds).data
|
|
1752
|
+
stn1 = htn.compress_simplify(output_inds=output_inds)
|
|
1753
|
+
p1 = stn1.contract(output_inds=output_inds).data
|
|
1754
|
+
assert stn1.get_hyperinds()
|
|
1755
|
+
assert_allclose(pex, p1)
|
|
1756
|
+
stn2 = stn1.compress_simplify(
|
|
1757
|
+
output_inds=output_inds, final_resolve=True
|
|
1758
|
+
)
|
|
1759
|
+
assert not stn2.get_hyperinds()
|
|
1760
|
+
p2 = stn2.contract(output_inds=output_inds).data
|
|
1761
|
+
assert_allclose(pex, p2)
|
|
1762
|
+
|
|
1763
|
+
def test_istree(self):
|
|
1764
|
+
assert Tensor().as_network().istree()
|
|
1765
|
+
tn = rand_tensor([2] * 1, ["x"]).as_network()
|
|
1766
|
+
assert tn.istree()
|
|
1767
|
+
tn |= rand_tensor([2] * 3, ["x", "y", "z"])
|
|
1768
|
+
assert tn.istree()
|
|
1769
|
+
tn |= rand_tensor([2] * 2, ["y", "z"])
|
|
1770
|
+
assert tn.istree()
|
|
1771
|
+
tn |= rand_tensor([2] * 2, ["x", "z"])
|
|
1772
|
+
assert not tn.istree()
|
|
1773
|
+
|
|
1774
|
+
def test_isconnected(self):
|
|
1775
|
+
assert Tensor().as_network().isconnected()
|
|
1776
|
+
tn = rand_tensor([2] * 1, ["x"]).as_network()
|
|
1777
|
+
assert tn.isconnected()
|
|
1778
|
+
tn |= rand_tensor([2] * 3, ["x", "y", "z"])
|
|
1779
|
+
assert tn.isconnected()
|
|
1780
|
+
tn |= rand_tensor([2] * 2, ["w", "u"])
|
|
1781
|
+
assert not tn.isconnected()
|
|
1782
|
+
assert not (Tensor() | Tensor()).isconnected()
|
|
1783
|
+
|
|
1784
|
+
def test_get_path_between_tids(self):
|
|
1785
|
+
tn = MPS_rand_state(5, 3)
|
|
1786
|
+
path = tn.get_path_between_tids(0, 4)
|
|
1787
|
+
assert path.tids == (0, 1, 2, 3, 4)
|
|
1788
|
+
path = tn.get_path_between_tids(3, 0)
|
|
1789
|
+
assert path.tids == (3, 2, 1, 0)
|
|
1790
|
+
|
|
1791
|
+
@pytest.mark.parametrize(
|
|
1792
|
+
"contract",
|
|
1793
|
+
(
|
|
1794
|
+
False,
|
|
1795
|
+
True,
|
|
1796
|
+
"split",
|
|
1797
|
+
"reduce-split",
|
|
1798
|
+
"split-gate",
|
|
1799
|
+
"swap-split-gate",
|
|
1800
|
+
),
|
|
1801
|
+
)
|
|
1802
|
+
def test_gate_inds(self, contract):
|
|
1803
|
+
tn = qtn.TN_from_edges_rand(
|
|
1804
|
+
[("A", "B"), ("B", "C"), ("C", "A")],
|
|
1805
|
+
D=3,
|
|
1806
|
+
phys_dim=2,
|
|
1807
|
+
)
|
|
1808
|
+
oix = tn._outer_inds.copy()
|
|
1809
|
+
p = tn.to_dense()
|
|
1810
|
+
G = qu.rand_matrix(4)
|
|
1811
|
+
tn.gate_inds_(
|
|
1812
|
+
G, inds=(tn.site_ind("A"), tn.site_ind("C")), contract=contract
|
|
1813
|
+
)
|
|
1814
|
+
if contract is True:
|
|
1815
|
+
assert tn.num_tensors == 2
|
|
1816
|
+
elif contract is False:
|
|
1817
|
+
assert tn.num_tensors == 4
|
|
1818
|
+
elif contract in ("split", "reduce-split"):
|
|
1819
|
+
assert tn.num_tensors == 3
|
|
1820
|
+
elif contract in ("split-gate", "swap-split-gate"):
|
|
1821
|
+
assert tn.num_tensors == 5
|
|
1822
|
+
assert tn.max_bond() == 4
|
|
1823
|
+
|
|
1824
|
+
assert tn._outer_inds == oix
|
|
1825
|
+
|
|
1826
|
+
pG = tn.to_dense()
|
|
1827
|
+
GIG = qu.pkron(G, [2, 2, 2], [0, 2])
|
|
1828
|
+
pGx = GIG @ p
|
|
1829
|
+
assert_allclose(pG, pGx)
|
|
1830
|
+
|
|
1831
|
+
def test_gate_inds_with_tn(self):
|
|
1832
|
+
k = qtn.MPS_rand_state(6, 3)
|
|
1833
|
+
A = qtn.MPO_rand(3, 2)
|
|
1834
|
+
k.gate_inds_with_tn_(
|
|
1835
|
+
["k1", "k2", "k4"],
|
|
1836
|
+
A,
|
|
1837
|
+
["b0", "b1", "b2"],
|
|
1838
|
+
["k0", "k1", "k2"],
|
|
1839
|
+
)
|
|
1840
|
+
assert k._outer_inds == oset(f"k{i}" for i in range(6))
|
|
1841
|
+
assert k.num_tensors == 6 + 3
|
|
1842
|
+
|
|
1843
|
+
def test_gate_inds_with_tn_missing_inds(self):
|
|
1844
|
+
tn = TensorNetwork()
|
|
1845
|
+
tn.gate_inds_with_tn_(
|
|
1846
|
+
["k0", "k1"],
|
|
1847
|
+
(
|
|
1848
|
+
rand_tensor([2, 2, 3], ["k0", "b0", "X"])
|
|
1849
|
+
| rand_tensor(
|
|
1850
|
+
[2, 2, 3],
|
|
1851
|
+
["k1", "b1", "X"],
|
|
1852
|
+
)
|
|
1853
|
+
),
|
|
1854
|
+
["b0", "b1"],
|
|
1855
|
+
["k0", "k1"],
|
|
1856
|
+
)
|
|
1857
|
+
assert tn._outer_inds == oset(["k0", "k1", "b0", "b1"])
|
|
1858
|
+
assert tn.num_tensors == 2
|
|
1859
|
+
assert tn.num_indices == 5
|
|
1860
|
+
assert tn.max_bond() == 3
|
|
1861
|
+
tn.gate_inds_with_tn_(
|
|
1862
|
+
["k1", "k2"],
|
|
1863
|
+
(
|
|
1864
|
+
rand_tensor([2, 2, 3], ["k1", "b1", "X"])
|
|
1865
|
+
| rand_tensor(
|
|
1866
|
+
[2, 2, 3],
|
|
1867
|
+
["k2", "b2", "X"],
|
|
1868
|
+
)
|
|
1869
|
+
),
|
|
1870
|
+
["b1", "b2"],
|
|
1871
|
+
["k1", "k2"],
|
|
1872
|
+
)
|
|
1873
|
+
assert tn._outer_inds == oset(["k0", "k1", "k2", "b0", "b1", "b2"])
|
|
1874
|
+
assert tn.num_tensors == 4
|
|
1875
|
+
assert tn.num_indices == 9
|
|
1876
|
+
|
|
1877
|
+
def test_gen_paths_loops(self):
|
|
1878
|
+
tn = qtn.TN2D_rand(3, 4, 2)
|
|
1879
|
+
loops = tuple(tn.gen_paths_loops())
|
|
1880
|
+
assert len(loops) == 6
|
|
1881
|
+
assert all(len(loop) == 4 for loop in loops)
|
|
1882
|
+
|
|
1883
|
+
def test_select_loop(self):
|
|
1884
|
+
tn = qtn.TN2D_rand(2, 3, 2)
|
|
1885
|
+
loop6 = next(
|
|
1886
|
+
loop
|
|
1887
|
+
for loop in tn.gen_paths_loops(max_loop_length=6)
|
|
1888
|
+
if len(loop) == 6
|
|
1889
|
+
)
|
|
1890
|
+
tnl = tn.select_path(loop6)
|
|
1891
|
+
assert len(tnl.inner_inds()) == 6
|
|
1892
|
+
|
|
1893
|
+
def test_gen_paths_loops_intersect(self):
|
|
1894
|
+
tn = qtn.TN2D_empty(5, 4, 2)
|
|
1895
|
+
loops = tuple(tn.gen_paths_loops(8, False))
|
|
1896
|
+
na = len(loops)
|
|
1897
|
+
assert na == len(frozenset(loops))
|
|
1898
|
+
assert na == len(frozenset(map(frozenset, loops)))
|
|
1899
|
+
|
|
1900
|
+
loops = tuple(tn.gen_paths_loops(8, True))
|
|
1901
|
+
nb = len(loops)
|
|
1902
|
+
assert nb == len(frozenset(loops))
|
|
1903
|
+
assert nb == len(frozenset(map(frozenset, loops)))
|
|
1904
|
+
|
|
1905
|
+
assert nb > na
|
|
1906
|
+
|
|
1907
|
+
def test_gen_inds_connected(self):
|
|
1908
|
+
tn = qtn.TN2D_rand(3, 4, 2)
|
|
1909
|
+
patches = tuple(tn.gen_inds_connected(2))
|
|
1910
|
+
assert len(patches) == 34
|
|
1911
|
+
|
|
1912
|
+
def test_tn_isel_rand(self):
|
|
1913
|
+
mps = qtn.MPS_rand_state(6, 7)
|
|
1914
|
+
ramp = mps.isel({mps.site_ind(i): "r" for i in mps.sites})
|
|
1915
|
+
assert ramp.outer_inds() == ()
|
|
1916
|
+
# check we haven't selected an computation basis amplitude
|
|
1917
|
+
rx = ramp.contract()
|
|
1918
|
+
xs = mps.to_dense().ravel()
|
|
1919
|
+
assert not any(np.allclose(rx, x) for x in xs)
|
|
1920
|
+
|
|
1921
|
+
|
|
1922
|
+
class TestTensorNetworkSimplifications:
|
|
1923
|
+
def test_rank_simplify(self):
|
|
1924
|
+
A = rand_tensor([2, 2, 3], "abc", tags="A")
|
|
1925
|
+
B = rand_tensor([3, 2], "cd", tags="B")
|
|
1926
|
+
C = rand_tensor([2, 2, 2], "def", tags="C")
|
|
1927
|
+
tn = A & B & C
|
|
1928
|
+
tn_s = tn.rank_simplify()
|
|
1929
|
+
assert tn.num_tensors == 3
|
|
1930
|
+
assert tn_s.num_tensors == 2
|
|
1931
|
+
assert (tn ^ all).almost_equals(tn_s ^ all)
|
|
1932
|
+
# checl that 'B' was absorbed into 'A' not 'C'
|
|
1933
|
+
assert set(tn_s["B"].tags) == {"A", "B"}
|
|
1934
|
+
|
|
1935
|
+
def test_rank_simplify_single_ind(self):
|
|
1936
|
+
ts = [rand_tensor([2], "a") for _ in range(100)]
|
|
1937
|
+
tn = TensorNetwork(ts)
|
|
1938
|
+
assert len(tn.ind_map) == 1
|
|
1939
|
+
assert len(tn.tensor_map) == 100
|
|
1940
|
+
tn.rank_simplify_()
|
|
1941
|
+
assert len(tn.tensor_map) == 1
|
|
1942
|
+
|
|
1943
|
+
def test_diagonal_reduce(self):
|
|
1944
|
+
A = rand_tensor([2, 2], "ab", dtype=complex)
|
|
1945
|
+
B = Tensor([[3j, 0.0], [0.0, 4j]], "bc")
|
|
1946
|
+
C = rand_tensor([2, 2], "ca", dtype=complex)
|
|
1947
|
+
tn = A & B & C
|
|
1948
|
+
tn_s = tn.diagonal_reduce()
|
|
1949
|
+
assert tn.num_indices == 3
|
|
1950
|
+
assert tn_s.num_indices == 2
|
|
1951
|
+
assert tn ^ all == pytest.approx(tn_s.contract(all, output_inds=[]))
|
|
1952
|
+
|
|
1953
|
+
def test_antidiag_gauge(self):
|
|
1954
|
+
A = rand_tensor([2, 2], "ab", dtype=complex)
|
|
1955
|
+
B = Tensor([[0.0, 3j], [4j, 0.0]], "bc")
|
|
1956
|
+
C = rand_tensor([2, 2], "ca", dtype=complex)
|
|
1957
|
+
tn = A & B & C
|
|
1958
|
+
assert tn.num_indices == 3
|
|
1959
|
+
# can't use diagonal reduction yet
|
|
1960
|
+
assert tn.diagonal_reduce().num_indices == 3
|
|
1961
|
+
# initial gauge doesn't change indices
|
|
1962
|
+
tn_a = tn.antidiag_gauge()
|
|
1963
|
+
assert tn_a.num_indices == 3
|
|
1964
|
+
# but allows the diagonal reduction
|
|
1965
|
+
tn_ad = tn_a.diagonal_reduce()
|
|
1966
|
+
assert tn_ad.num_indices == 2
|
|
1967
|
+
assert tn ^ all == pytest.approx(tn_ad.contract(all, output_inds=[]))
|
|
1968
|
+
|
|
1969
|
+
def test_column_reduce(self):
|
|
1970
|
+
A = rand_tensor([2, 3], "ab")
|
|
1971
|
+
A.new_ind("c", size=4, axis=-2)
|
|
1972
|
+
B = rand_tensor([4, 5, 6], "cde")
|
|
1973
|
+
tn = A & B
|
|
1974
|
+
assert tn.num_indices == 5
|
|
1975
|
+
tn_s = tn.column_reduce()
|
|
1976
|
+
assert tn_s.num_indices == 4
|
|
1977
|
+
assert (tn ^ all).almost_equals(tn_s ^ all)
|
|
1978
|
+
|
|
1979
|
+
|
|
1980
|
+
class TestTensorNetworkAsLinearOperator:
|
|
1981
|
+
@pytest.mark.parametrize("optimize", ["auto", "auto-hq"])
|
|
1982
|
+
def test_against_dense(self, optimize):
|
|
1983
|
+
A, B, C, D = (
|
|
1984
|
+
rand_tensor([3, 5, 5], "aef"),
|
|
1985
|
+
rand_tensor([3, 5, 5], "beg"),
|
|
1986
|
+
rand_tensor([3, 5, 5], "cfh"),
|
|
1987
|
+
rand_tensor([3, 5, 5], "dhg"),
|
|
1988
|
+
)
|
|
1989
|
+
|
|
1990
|
+
tn = A & B & C & D
|
|
1991
|
+
tn_lo = tn.aslinearoperator(("a", "b"), ("c", "d"), optimize=optimize)
|
|
1992
|
+
tn_d = tn.to_dense(["a", "b"], ["c", "d"])
|
|
1993
|
+
|
|
1994
|
+
u, s, v = qu.svds(tn_lo, k=5, backend="scipy")
|
|
1995
|
+
ud, sd, vd = qu.svds(tn_d, k=5, backend="scipy")
|
|
1996
|
+
|
|
1997
|
+
assert_allclose(s, sd)
|
|
1998
|
+
|
|
1999
|
+
# test matmat
|
|
2000
|
+
X = np.random.randn(9, 8) + 1.0j * np.random.randn(9, 8)
|
|
2001
|
+
assert_allclose(tn_lo.dot(X), tn_d.dot(X))
|
|
2002
|
+
|
|
2003
|
+
def test_trace_array_function_interface(self):
|
|
2004
|
+
tn = qtn.TensorNetwork(
|
|
2005
|
+
(
|
|
2006
|
+
rand_tensor([3, 5, 5], "aef"),
|
|
2007
|
+
rand_tensor([3, 5, 5], "beg"),
|
|
2008
|
+
rand_tensor([3, 5, 5], "cfh"),
|
|
2009
|
+
rand_tensor([3, 5, 5], "dhg"),
|
|
2010
|
+
)
|
|
2011
|
+
)
|
|
2012
|
+
tn_lo = tn.aslinearoperator(("a", "b"), ("c", "d"))
|
|
2013
|
+
tn_d = tn.to_dense(["a", "b"], ["c", "d"])
|
|
2014
|
+
assert np.trace(tn_lo) == pytest.approx(np.trace(tn_d))
|
|
2015
|
+
|
|
2016
|
+
@pytest.mark.parametrize("dtype", (float, complex))
|
|
2017
|
+
@pytest.mark.parametrize("method", ("isvd", "rsvd"))
|
|
2018
|
+
def test_replace_with_svd_using_linear_operator(self, dtype, method):
|
|
2019
|
+
k = MPS_rand_state(100, 10, dtype=dtype, cyclic=True)
|
|
2020
|
+
b = k.H
|
|
2021
|
+
b.expand_bond_dimension(11)
|
|
2022
|
+
k.add_tag("_KET")
|
|
2023
|
+
b.add_tag("_BRA")
|
|
2024
|
+
tn = b & k
|
|
2025
|
+
|
|
2026
|
+
x1 = tn ^ ...
|
|
2027
|
+
|
|
2028
|
+
(ul,) = tn["_KET", "I1"].bonds(tn["_KET", "I2"])
|
|
2029
|
+
(ll,) = tn["_BRA", "I1"].bonds(tn["_BRA", "I2"])
|
|
2030
|
+
|
|
2031
|
+
where = [f"I{i}" for i in range(2, 40)]
|
|
2032
|
+
|
|
2033
|
+
tn.replace_with_svd(
|
|
2034
|
+
where,
|
|
2035
|
+
left_inds=(ul, ll),
|
|
2036
|
+
eps=1e-3,
|
|
2037
|
+
method=method,
|
|
2038
|
+
inplace=True,
|
|
2039
|
+
ltags="_U",
|
|
2040
|
+
rtags="_V",
|
|
2041
|
+
)
|
|
2042
|
+
|
|
2043
|
+
x2 = tn ^ ...
|
|
2044
|
+
|
|
2045
|
+
# check ltags and rtags have gone in
|
|
2046
|
+
assert isinstance(tn["_U"], Tensor)
|
|
2047
|
+
assert isinstance(tn["_V"], Tensor)
|
|
2048
|
+
|
|
2049
|
+
assert_allclose(x1, x2, rtol=1e-4)
|
|
2050
|
+
|
|
2051
|
+
def test_TNLinearOperator1D(self):
|
|
2052
|
+
p = MPS_rand_state(40, 10, dtype=complex)
|
|
2053
|
+
pp = p.H & p
|
|
2054
|
+
start, stop = 10, 30
|
|
2055
|
+
lix = bonds(pp[start - 1], pp[start])
|
|
2056
|
+
rix = bonds(pp[stop - 1], pp[stop])
|
|
2057
|
+
|
|
2058
|
+
sec = pp[start:stop]
|
|
2059
|
+
|
|
2060
|
+
A = TNLinearOperator1D(sec, lix, rix, start, stop)
|
|
2061
|
+
B = sec.aslinearoperator(lix, rix)
|
|
2062
|
+
|
|
2063
|
+
s1 = spla.svds(A)[1]
|
|
2064
|
+
s2 = spla.svds(B)[1]
|
|
2065
|
+
|
|
2066
|
+
assert_allclose(s1, s2)
|