Trajectree 0.0.0__py3-none-any.whl → 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. trajectree/__init__.py +3 -0
  2. trajectree/fock_optics/devices.py +1 -1
  3. trajectree/fock_optics/light_sources.py +2 -2
  4. trajectree/fock_optics/measurement.py +3 -3
  5. trajectree/fock_optics/utils.py +6 -6
  6. trajectree/quimb/docs/_pygments/_pygments_dark.py +118 -0
  7. trajectree/quimb/docs/_pygments/_pygments_light.py +118 -0
  8. trajectree/quimb/docs/conf.py +158 -0
  9. trajectree/quimb/docs/examples/ex_mpi_expm_evo.py +62 -0
  10. trajectree/quimb/quimb/__init__.py +507 -0
  11. trajectree/quimb/quimb/calc.py +1491 -0
  12. trajectree/quimb/quimb/core.py +2279 -0
  13. trajectree/quimb/quimb/evo.py +712 -0
  14. trajectree/quimb/quimb/experimental/__init__.py +0 -0
  15. trajectree/quimb/quimb/experimental/autojittn.py +129 -0
  16. trajectree/quimb/quimb/experimental/belief_propagation/__init__.py +109 -0
  17. trajectree/quimb/quimb/experimental/belief_propagation/bp_common.py +397 -0
  18. trajectree/quimb/quimb/experimental/belief_propagation/d1bp.py +316 -0
  19. trajectree/quimb/quimb/experimental/belief_propagation/d2bp.py +653 -0
  20. trajectree/quimb/quimb/experimental/belief_propagation/hd1bp.py +571 -0
  21. trajectree/quimb/quimb/experimental/belief_propagation/hv1bp.py +775 -0
  22. trajectree/quimb/quimb/experimental/belief_propagation/l1bp.py +316 -0
  23. trajectree/quimb/quimb/experimental/belief_propagation/l2bp.py +537 -0
  24. trajectree/quimb/quimb/experimental/belief_propagation/regions.py +194 -0
  25. trajectree/quimb/quimb/experimental/cluster_update.py +286 -0
  26. trajectree/quimb/quimb/experimental/merabuilder.py +865 -0
  27. trajectree/quimb/quimb/experimental/operatorbuilder/__init__.py +15 -0
  28. trajectree/quimb/quimb/experimental/operatorbuilder/operatorbuilder.py +1631 -0
  29. trajectree/quimb/quimb/experimental/schematic.py +7 -0
  30. trajectree/quimb/quimb/experimental/tn_marginals.py +130 -0
  31. trajectree/quimb/quimb/experimental/tnvmc.py +1483 -0
  32. trajectree/quimb/quimb/gates.py +36 -0
  33. trajectree/quimb/quimb/gen/__init__.py +2 -0
  34. trajectree/quimb/quimb/gen/operators.py +1167 -0
  35. trajectree/quimb/quimb/gen/rand.py +713 -0
  36. trajectree/quimb/quimb/gen/states.py +479 -0
  37. trajectree/quimb/quimb/linalg/__init__.py +6 -0
  38. trajectree/quimb/quimb/linalg/approx_spectral.py +1109 -0
  39. trajectree/quimb/quimb/linalg/autoblock.py +258 -0
  40. trajectree/quimb/quimb/linalg/base_linalg.py +719 -0
  41. trajectree/quimb/quimb/linalg/mpi_launcher.py +397 -0
  42. trajectree/quimb/quimb/linalg/numpy_linalg.py +244 -0
  43. trajectree/quimb/quimb/linalg/rand_linalg.py +514 -0
  44. trajectree/quimb/quimb/linalg/scipy_linalg.py +293 -0
  45. trajectree/quimb/quimb/linalg/slepc_linalg.py +892 -0
  46. trajectree/quimb/quimb/schematic.py +1518 -0
  47. trajectree/quimb/quimb/tensor/__init__.py +401 -0
  48. trajectree/quimb/quimb/tensor/array_ops.py +610 -0
  49. trajectree/quimb/quimb/tensor/circuit.py +4824 -0
  50. trajectree/quimb/quimb/tensor/circuit_gen.py +411 -0
  51. trajectree/quimb/quimb/tensor/contraction.py +336 -0
  52. trajectree/quimb/quimb/tensor/decomp.py +1255 -0
  53. trajectree/quimb/quimb/tensor/drawing.py +1646 -0
  54. trajectree/quimb/quimb/tensor/fitting.py +385 -0
  55. trajectree/quimb/quimb/tensor/geometry.py +583 -0
  56. trajectree/quimb/quimb/tensor/interface.py +114 -0
  57. trajectree/quimb/quimb/tensor/networking.py +1058 -0
  58. trajectree/quimb/quimb/tensor/optimize.py +1818 -0
  59. trajectree/quimb/quimb/tensor/tensor_1d.py +4778 -0
  60. trajectree/quimb/quimb/tensor/tensor_1d_compress.py +1854 -0
  61. trajectree/quimb/quimb/tensor/tensor_1d_tebd.py +662 -0
  62. trajectree/quimb/quimb/tensor/tensor_2d.py +5954 -0
  63. trajectree/quimb/quimb/tensor/tensor_2d_compress.py +96 -0
  64. trajectree/quimb/quimb/tensor/tensor_2d_tebd.py +1230 -0
  65. trajectree/quimb/quimb/tensor/tensor_3d.py +2869 -0
  66. trajectree/quimb/quimb/tensor/tensor_3d_tebd.py +46 -0
  67. trajectree/quimb/quimb/tensor/tensor_approx_spectral.py +60 -0
  68. trajectree/quimb/quimb/tensor/tensor_arbgeom.py +3237 -0
  69. trajectree/quimb/quimb/tensor/tensor_arbgeom_compress.py +565 -0
  70. trajectree/quimb/quimb/tensor/tensor_arbgeom_tebd.py +1138 -0
  71. trajectree/quimb/quimb/tensor/tensor_builder.py +5411 -0
  72. trajectree/quimb/quimb/tensor/tensor_core.py +11179 -0
  73. trajectree/quimb/quimb/tensor/tensor_dmrg.py +1472 -0
  74. trajectree/quimb/quimb/tensor/tensor_mera.py +204 -0
  75. trajectree/quimb/quimb/utils.py +892 -0
  76. trajectree/quimb/tests/__init__.py +0 -0
  77. trajectree/quimb/tests/test_accel.py +501 -0
  78. trajectree/quimb/tests/test_calc.py +788 -0
  79. trajectree/quimb/tests/test_core.py +847 -0
  80. trajectree/quimb/tests/test_evo.py +565 -0
  81. trajectree/quimb/tests/test_gen/__init__.py +0 -0
  82. trajectree/quimb/tests/test_gen/test_operators.py +361 -0
  83. trajectree/quimb/tests/test_gen/test_rand.py +296 -0
  84. trajectree/quimb/tests/test_gen/test_states.py +261 -0
  85. trajectree/quimb/tests/test_linalg/__init__.py +0 -0
  86. trajectree/quimb/tests/test_linalg/test_approx_spectral.py +368 -0
  87. trajectree/quimb/tests/test_linalg/test_base_linalg.py +351 -0
  88. trajectree/quimb/tests/test_linalg/test_mpi_linalg.py +127 -0
  89. trajectree/quimb/tests/test_linalg/test_numpy_linalg.py +84 -0
  90. trajectree/quimb/tests/test_linalg/test_rand_linalg.py +134 -0
  91. trajectree/quimb/tests/test_linalg/test_slepc_linalg.py +283 -0
  92. trajectree/quimb/tests/test_tensor/__init__.py +0 -0
  93. trajectree/quimb/tests/test_tensor/test_belief_propagation/__init__.py +0 -0
  94. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d1bp.py +39 -0
  95. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d2bp.py +67 -0
  96. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hd1bp.py +64 -0
  97. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hv1bp.py +51 -0
  98. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l1bp.py +142 -0
  99. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l2bp.py +101 -0
  100. trajectree/quimb/tests/test_tensor/test_circuit.py +816 -0
  101. trajectree/quimb/tests/test_tensor/test_contract.py +67 -0
  102. trajectree/quimb/tests/test_tensor/test_decomp.py +40 -0
  103. trajectree/quimb/tests/test_tensor/test_mera.py +52 -0
  104. trajectree/quimb/tests/test_tensor/test_optimizers.py +488 -0
  105. trajectree/quimb/tests/test_tensor/test_tensor_1d.py +1171 -0
  106. trajectree/quimb/tests/test_tensor/test_tensor_2d.py +606 -0
  107. trajectree/quimb/tests/test_tensor/test_tensor_2d_tebd.py +144 -0
  108. trajectree/quimb/tests/test_tensor/test_tensor_3d.py +123 -0
  109. trajectree/quimb/tests/test_tensor/test_tensor_arbgeom.py +226 -0
  110. trajectree/quimb/tests/test_tensor/test_tensor_builder.py +441 -0
  111. trajectree/quimb/tests/test_tensor/test_tensor_core.py +2066 -0
  112. trajectree/quimb/tests/test_tensor/test_tensor_dmrg.py +388 -0
  113. trajectree/quimb/tests/test_tensor/test_tensor_spectral_approx.py +63 -0
  114. trajectree/quimb/tests/test_tensor/test_tensor_tebd.py +270 -0
  115. trajectree/quimb/tests/test_utils.py +85 -0
  116. trajectree/trajectory.py +2 -2
  117. {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/METADATA +2 -2
  118. trajectree-0.0.1.dist-info/RECORD +126 -0
  119. trajectree-0.0.0.dist-info/RECORD +0 -16
  120. {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/WHEEL +0 -0
  121. {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/licenses/LICENSE +0 -0
  122. {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,67 @@
1
+ import pytest
2
+ import numpy as np
3
+
4
+ import quimb.tensor as qtn
5
+ from quimb.tensor.contraction import _CONTRACT_BACKEND, _TENSOR_LINOP_BACKEND
6
+
7
+
8
+ class TestContractOpts:
9
+ def test_contract_strategy(self):
10
+ assert qtn.get_contract_strategy() == "greedy"
11
+ with qtn.contract_strategy("auto"):
12
+ assert qtn.get_contract_strategy() == "auto"
13
+ assert qtn.get_contract_strategy() == "greedy"
14
+
15
+ def test_contract_backend(self):
16
+ assert qtn.get_contract_backend() == _CONTRACT_BACKEND
17
+ with qtn.contract_backend("cupy"):
18
+ assert qtn.get_contract_backend() == "cupy"
19
+ assert qtn.get_contract_backend() == _CONTRACT_BACKEND
20
+
21
+ def test_tensor_linop_backend(self):
22
+ assert qtn.get_tensor_linop_backend() == _TENSOR_LINOP_BACKEND
23
+ with qtn.tensor_linop_backend("cupy"):
24
+ assert qtn.get_tensor_linop_backend() == "cupy"
25
+ assert qtn.get_tensor_linop_backend() == _TENSOR_LINOP_BACKEND
26
+
27
+ def test_contract_cache(self):
28
+ import cotengra as ctg
29
+
30
+ info = {"num_calls": 0}
31
+
32
+ def my_custom_opt(inputs, output, size_dict, memory_limit=None):
33
+ info["num_calls"] += 1
34
+ return [(0, 1)] * (len(inputs) - 1)
35
+
36
+ ctg.register_preset("quimb_test_opt", my_custom_opt)
37
+
38
+ tn = qtn.MPS_rand_state(4, 3) & qtn.MPS_rand_state(4, 3)
39
+ assert tn.contract(
40
+ all, optimize="quimb_test_opt", get="expression"
41
+ ) is tn.contract(all, optimize="quimb_test_opt", get="expression")
42
+ assert info["num_calls"] == 1
43
+
44
+ assert info["num_calls"] == 1
45
+
46
+
47
+ @pytest.mark.parametrize("around", ["I3,3", "I0,0", "I1,2"])
48
+ @pytest.mark.parametrize("equalize_norms", [False, True])
49
+ @pytest.mark.parametrize("gauge_boundary_only", [False, True])
50
+ def test_contract_approx_with_gauges(
51
+ around, equalize_norms, gauge_boundary_only
52
+ ):
53
+ rng = np.random.default_rng(42)
54
+ tn = qtn.TN2D_from_fill_fn(
55
+ lambda shape: rng.uniform(size=shape, low=-0.5), 7, 7, 4
56
+ )
57
+ Zex = tn ^ ...
58
+ Z = tn.contract_around(
59
+ around,
60
+ max_bond=8,
61
+ gauges=True,
62
+ gauge_boundary_only=gauge_boundary_only,
63
+ tree_gauge_distance=2,
64
+ equalize_norms=equalize_norms,
65
+ progbar=True,
66
+ )
67
+ assert Z == pytest.approx(Zex, rel=1e-2)
@@ -0,0 +1,40 @@
1
+ import pytest
2
+ import numpy as np
3
+ import quimb as qu
4
+
5
+
6
+ def test_sgn_convention():
7
+ from quimb.tensor.decomp import sgn
8
+
9
+ assert sgn(1) == 1
10
+ assert sgn(2.0) == 1
11
+ assert sgn(-1) == -1
12
+ assert sgn(-2.0) == -1
13
+ assert sgn(0) == 1
14
+ assert sgn(0.0) == 1
15
+ assert sgn(0.0 + 0.0j) == 1
16
+ assert sgn(1.0 + 2.0j) != 1
17
+ assert sgn(1.0 + 2.0j) != -1
18
+ assert abs(sgn(1.0 + 2.0j)) == pytest.approx(1)
19
+
20
+
21
+ @pytest.mark.parametrize(
22
+ "dtype", ["float64", "float32", "complex128", "complex64"]
23
+ )
24
+ def test_qr_stabilized_sign_bug(dtype):
25
+ from quimb.tensor.decomp import qr_stabilized
26
+
27
+ for _ in range(10):
28
+ Q = qu.rand_uni(4, dtype=dtype)
29
+ R = qu.rand_matrix(4, dtype=dtype)
30
+
31
+ # make R strictly upper triangular
32
+ ii, jj = np.indices(R.shape)
33
+ R[ii >= jj] = 0.0
34
+
35
+ X = Q @ R
36
+ Q2, _, R2 = qr_stabilized(X)
37
+
38
+ assert abs(np.linalg.norm((Q2 @ R2) - X)) < (
39
+ 1e-12 if dtype in ("float64", "complex128") else 1e-6
40
+ )
@@ -0,0 +1,52 @@
1
+ import pytest
2
+ import quimb as qu
3
+ import quimb.tensor as qt
4
+
5
+
6
+ class TestMERA:
7
+ @pytest.mark.parametrize("dtype", [float, complex])
8
+ def test_construct_random(self, dtype):
9
+ mera = qt.MERA.rand(16)
10
+
11
+ # test outer inds
12
+ assert set(mera.outer_inds()) == {f"k{i}" for i in range(16)}
13
+
14
+ # test normalized
15
+ assert (mera.H & mera) ^ all == pytest.approx(1.0)
16
+
17
+ # test auto contract all
18
+ assert mera.H @ mera == pytest.approx(1.0)
19
+
20
+ # test dense conversion
21
+ md = mera.to_qarray()
22
+ assert md.H @ md == pytest.approx(1.0)
23
+
24
+ def test_1d_vector_methods(self):
25
+ X = qu.spin_operator("X", sparse=True)
26
+
27
+ mera = qt.MERA.rand(16)
28
+ meraX = mera.gate(X.toarray(), 7)
29
+ assert mera is not meraX
30
+ x1 = mera.H @ meraX
31
+
32
+ md = mera.to_qarray()
33
+ mdX = qu.ikron(X, [2] * 16, 7) @ md
34
+ x2 = md.H @ mdX
35
+ # check against dense
36
+ assert x1 == pytest.approx(x2)
37
+
38
+ # check 'outside lightcone' unaffected
39
+ assert mera.select(3).H @ meraX.select(3) == pytest.approx(1.0)
40
+
41
+ # check only need 'lightcone' to compute local
42
+ assert mera.select(7).H @ meraX.select(7) == pytest.approx(x2)
43
+
44
+ @pytest.mark.parametrize("method", ["qr", "exp", "cayley", "mgs", "svd"])
45
+ def test_isometrize(self, method):
46
+ mera = qt.MERA.rand(16, dangle=True)
47
+ assert mera.H @ mera == pytest.approx(2.0)
48
+ for t in mera:
49
+ t.modify(data=qu.randn(t.shape))
50
+ assert mera.H @ mera != pytest.approx(2.0)
51
+ mera.isometrize_(method=method)
52
+ assert mera.H @ mera == pytest.approx(2.0)
@@ -0,0 +1,488 @@
1
+ import functools
2
+ import importlib
3
+
4
+ import pytest
5
+ import numpy as np
6
+ from numpy.testing import assert_allclose
7
+ from autoray import real
8
+
9
+ import quimb as qu
10
+ import quimb.tensor as qtn
11
+ from quimb.tensor.optimize import Vectorizer, parse_network_to_backend
12
+
13
+
14
+ found_torch = importlib.util.find_spec("torch") is not None
15
+ found_autograd = importlib.util.find_spec("autograd") is not None
16
+ found_jax = importlib.util.find_spec("jax") is not None
17
+ found_tensorflow = importlib.util.find_spec("tensorflow") is not None
18
+
19
+ if found_tensorflow:
20
+ import tensorflow.experimental.numpy as tnp
21
+
22
+ tnp.experimental_enable_numpy_behavior()
23
+
24
+ jax_case = pytest.param(
25
+ "jax", marks=pytest.mark.skipif(not found_jax, reason="jax not installed")
26
+ )
27
+ autograd_case = pytest.param(
28
+ "autograd",
29
+ marks=pytest.mark.skipif(
30
+ not found_autograd, reason="autograd not installed"
31
+ ),
32
+ )
33
+ tensorflow_case = pytest.param(
34
+ "tensorflow",
35
+ marks=pytest.mark.skipif(
36
+ not found_tensorflow, reason="tensorflow not installed"
37
+ ),
38
+ )
39
+ pytorch_case = pytest.param(
40
+ "torch",
41
+ marks=pytest.mark.skipif(not found_torch, reason="pytorch not installed"),
42
+ )
43
+
44
+
45
+ @pytest.fixture
46
+ def tagged_qaoa_tn():
47
+ """
48
+ make qaoa tensor network, with RZZ and RX tagged on a per-round basis
49
+ so that these tags can be used as shared_tags to TNOptimizer
50
+ """
51
+
52
+ n = 8
53
+ depth = 4
54
+ terms = [(i, (i + 1) % n) for i in range(n)]
55
+ gammas = qu.randn(depth)
56
+ betas = qu.randn(depth)
57
+
58
+ # make circuit
59
+ circuit_opts = {"gate_opts": {"contract": False}}
60
+ circ = qtn.Circuit(n, **circuit_opts)
61
+
62
+ # layer of hadamards to get into plus state
63
+ for i in range(n):
64
+ circ.apply_gate("H", i, gate_round=0)
65
+
66
+ for d in range(depth):
67
+ for i, j in terms:
68
+ circ.apply_gate(
69
+ "RZZ", -gammas[d], i, j, gate_round=d, parametrize=True
70
+ )
71
+
72
+ for i in range(n):
73
+ circ.apply_gate(
74
+ "RX", betas[d] * 2, i, gate_round=d, parametrize=True
75
+ )
76
+
77
+ # tag circuit for shared_tags
78
+ tn_tagged = circ.psi.copy()
79
+ for i in range(depth):
80
+ tn_tagged.select(["RZZ", f"ROUND_{i}"]).add_tag(f"p{2 * i}")
81
+ tn_tagged.select(["RX", f"ROUND_{i}"]).add_tag(f"p{2 * i + 1}")
82
+
83
+ return n, depth, tn_tagged
84
+
85
+
86
+ @pytest.fixture
87
+ def heis_pbc():
88
+ L = 10
89
+ chi = 8
90
+ dtype = "float32"
91
+ psi0 = qtn.MPS_rand_state(L, chi, cyclic=True, seed=42).astype(dtype)
92
+ H = qtn.MPO_ham_heis(L, cyclic=True).astype(dtype)
93
+
94
+ def norm_fn(psi):
95
+ factor = (psi & psi).contract(all, optimize="auto-hq")
96
+ return psi / factor**0.5
97
+
98
+ def loss_fn(psi, H):
99
+ k, H, b = qtn.tensor_network_align(psi, H, psi)
100
+ energy = (k & H & b).contract(all, optimize="auto-hq")
101
+ return energy
102
+
103
+ en_ex = qu.groundenergy(qu.ham_heis(L, cyclic=True, sparse=True))
104
+
105
+ return psi0, H, norm_fn, loss_fn, en_ex
106
+
107
+
108
+ @pytest.fixture
109
+ def ham_mbl_pbc_complex():
110
+ L = 10
111
+ chi = 8
112
+ dtype = "complex64"
113
+ psi0 = qtn.MPS_rand_state(L, chi, cyclic=True, seed=42).astype(dtype)
114
+
115
+ ham_opts = {"cyclic": True, "dh": 0.7, "dh_dim": 3, "seed": 42}
116
+ H = qtn.MPO_ham_mbl(L, **ham_opts).astype(dtype)
117
+
118
+ def norm_fn(psi):
119
+ factor = (psi.H & psi).contract(all, optimize="auto-hq")
120
+ return psi * factor**-0.5
121
+
122
+ def loss_fn(psi, H):
123
+ k, H, b = qtn.tensor_network_align(psi, H, psi.H)
124
+ energy = (k & H & b).contract(all, optimize="auto-hq")
125
+ return real(energy)
126
+
127
+ en_ex = qu.groundenergy(qu.ham_mbl(L, sparse=True, **ham_opts))
128
+
129
+ return psi0, H, norm_fn, loss_fn, en_ex
130
+
131
+
132
+ def test_vectorizer():
133
+ shapes = [(2, 3), (4, 5), (6, 7, 8)]
134
+ dtypes = ["complex64", "float32", "complex64"]
135
+ arrays = [qu.randn(s, dtype=dtype) for s, dtype in zip(shapes, dtypes)]
136
+
137
+ v = Vectorizer(arrays)
138
+
139
+ grads = [qu.randn(s, dtype=dtype) for s, dtype in zip(shapes, dtypes)]
140
+ v.pack(grads, "grad")
141
+
142
+ new_arrays = v.unpack(v.vector)
143
+ for x, y in zip(arrays, new_arrays):
144
+ assert_allclose(x, y)
145
+
146
+ new_arrays = v.unpack(v.grad)
147
+ for x, y in zip(grads, new_arrays):
148
+ assert_allclose(x, y)
149
+
150
+
151
+ def rand_array(rng):
152
+ ndim = rng.integers(1, 6)
153
+ shape = rng.integers(2, 6, size=ndim)
154
+ dtype = rng.choice(["float32", "float64", "complex64", "complex128"])
155
+ x = rng.normal(shape).astype(dtype)
156
+ if "complex" in dtype:
157
+ x += 1j * rng.normal(shape).astype(dtype)
158
+ return x
159
+
160
+
161
+ def random_array_pytree(rng, max_depth=3):
162
+ def _recurse(d=0):
163
+ if d >= max_depth:
164
+ return rand_array(rng)
165
+ t = rng.choice(["array", "list", "tuple", "dict"])
166
+ if t == "array":
167
+ return rand_array(rng)
168
+ elif t == "list":
169
+ return [_recurse(d + 1) for _ in range(rng.integers(2, 6))]
170
+ elif t == "tuple":
171
+ return tuple([_recurse(d + 1) for _ in range(rng.integers(2, 6))])
172
+ elif t == "dict":
173
+ cs = (chr(i) for i in range(ord("a"), ord("z") + 1))
174
+ return {
175
+ next(cs): _recurse(d + 1) for _ in range(rng.integers(2, 6))
176
+ }
177
+
178
+ return _recurse()
179
+
180
+
181
+ def test_vectorizer_pytree():
182
+ tree = random_array_pytree(np.random.default_rng(666))
183
+ v = Vectorizer(tree)
184
+ new_tree = v.unpack()
185
+ assert tree is not new_tree
186
+ assert str(tree) == str(new_tree)
187
+
188
+
189
+ @pytest.mark.parametrize(
190
+ "backend", [jax_case, autograd_case, tensorflow_case, pytorch_case]
191
+ )
192
+ @pytest.mark.parametrize("method", ["simple", "basin"])
193
+ def test_optimize_pbc_heis(heis_pbc, backend, method):
194
+ psi0, H, norm_fn, loss_fn, en_ex = heis_pbc
195
+ tnopt = qtn.TNOptimizer(
196
+ psi0,
197
+ loss_fn,
198
+ norm_fn,
199
+ loss_constants={"H": H},
200
+ autodiff_backend=backend,
201
+ )
202
+ if method == "simple":
203
+ psi_opt = tnopt.optimize(100)
204
+ elif method == "basin":
205
+ psi_opt = tnopt.optimize_basinhopping(25, 4)
206
+ assert loss_fn(psi_opt, H) == pytest.approx(en_ex, rel=1e-2)
207
+
208
+
209
+ @pytest.mark.parametrize("backend", [jax_case, autograd_case, tensorflow_case])
210
+ @pytest.mark.parametrize("method", ["simple", "basin"])
211
+ def test_optimize_ham_mbl_complex(ham_mbl_pbc_complex, backend, method):
212
+ psi0, H, norm_fn, loss_fn, en_ex = ham_mbl_pbc_complex
213
+ tnopt = qtn.TNOptimizer(
214
+ psi0,
215
+ loss_fn,
216
+ norm_fn,
217
+ loss_constants={"H": H},
218
+ autodiff_backend=backend,
219
+ )
220
+ if method == "simple":
221
+ psi_opt = tnopt.optimize(100)
222
+ elif method == "basin":
223
+ psi_opt = tnopt.optimize_basinhopping(25, 4)
224
+ assert loss_fn(psi_opt, H) == pytest.approx(en_ex, rel=1e-2)
225
+
226
+
227
+ @pytest.mark.parametrize(
228
+ "backend", [jax_case, autograd_case, pytorch_case, tensorflow_case]
229
+ )
230
+ def test_every_parametrized_gate(backend):
231
+ circ = qtn.Circuit(2)
232
+ circ.rx(*qu.randn(1), 0, parametrize=True, tags=["OPTIMIZE"])
233
+ circ.ry(*qu.randn(1), 1, parametrize=True, tags=["OPTIMIZE"])
234
+ circ.rz(*qu.randn(1), 0, parametrize=True, tags=["OPTIMIZE"])
235
+ circ.rxx(*qu.randn(1), 0, 1, parametrize=True, tags=["OPTIMIZE"])
236
+ circ.ryy(*qu.randn(1), 0, 1, parametrize=True, tags=["OPTIMIZE"])
237
+ circ.rzz(*qu.randn(1), 1, 0, parametrize=True, tags=["OPTIMIZE"])
238
+ circ.crx(*qu.randn(1), 1, 0, parametrize=True, tags=["OPTIMIZE"])
239
+ circ.cry(*qu.randn(1), 0, 1, parametrize=True, tags=["OPTIMIZE"])
240
+ circ.crz(*qu.randn(1), 1, 0, parametrize=True, tags=["OPTIMIZE"])
241
+ circ.u1(*qu.randn(1), 1, parametrize=True, tags=["OPTIMIZE"])
242
+ circ.u2(*qu.randn(2), 0, parametrize=True, tags=["OPTIMIZE"])
243
+ circ.u3(*qu.randn(3), 1, parametrize=True, tags=["OPTIMIZE"])
244
+ circ.cu1(*qu.randn(1), 0, 1, parametrize=True, tags=["OPTIMIZE"])
245
+ circ.cu2(*qu.randn(2), 1, 0, parametrize=True, tags=["OPTIMIZE"])
246
+ circ.cu3(*qu.randn(3), 1, 0, parametrize=True, tags=["OPTIMIZE"])
247
+ circ.fsim(*qu.randn(2), 0, 1, parametrize=True, tags=["OPTIMIZE"])
248
+ circ.fsimg(*qu.randn(5), 1, 0, parametrize=True, tags=["OPTIMIZE"])
249
+ circ.givens(*qu.randn(1), 0, 1, parametrize=True, tags=["OPTIMIZE"])
250
+ circ.givens2(*qu.randn(2), 0, 1, parametrize=True, tags=["OPTIMIZE"])
251
+ circ.su4(*qu.randn(15), 0, 1, parametrize=True, tags=["OPTIMIZE"])
252
+ psi = circ.psi
253
+
254
+ target = qtn.Dense1D(qu.rand_haar_state(4))
255
+
256
+ def loss(psi, target):
257
+ return -(real(psi.H @ target) ** 2)
258
+
259
+ f0 = loss(psi, target)
260
+ tnopt = qtn.TNOptimizer(
261
+ psi,
262
+ loss,
263
+ tags="OPTIMIZE",
264
+ loss_constants={"target": target},
265
+ autodiff_backend=backend,
266
+ device="cpu",
267
+ )
268
+ tnopt.optimize(1)
269
+ assert tnopt.loss < f0
270
+
271
+
272
+ @pytest.mark.parametrize("backend", [jax_case, autograd_case, tensorflow_case])
273
+ def test_parametrized_circuit(backend):
274
+ H = qu.ham_mbl(4, dh=3.0, dh_dim=3)
275
+ gs = qu.groundstate(H)
276
+ T_gs = qtn.Dense1D(gs)
277
+
278
+ def loss(psi, target):
279
+ f = psi.H & target
280
+ f.rank_simplify_()
281
+ return -abs(f ^ all)
282
+
283
+ circ = qtn.circ_ansatz_1D_brickwork(4, depth=4)
284
+ psi0 = circ.psi
285
+ tnopt = qtn.TNOptimizer(
286
+ psi0,
287
+ loss,
288
+ tags="U3",
289
+ loss_constants=dict(target=T_gs),
290
+ autodiff_backend=backend,
291
+ loss_target=-0.99,
292
+ )
293
+ psi_opt = tnopt.optimize(20)
294
+ assert sum(loss < -0.99 for loss in tnopt.losses) == 1
295
+ assert qu.fidelity(psi_opt.to_dense(), gs) > 0.99
296
+
297
+
298
+ def mera_norm_fn(mera):
299
+ return mera.isometrize(method="cayley")
300
+
301
+
302
+ def mera_local_expectation(mera, terms, where):
303
+ tags = [mera.site_tag(coo) for coo in where]
304
+ mera_ij = mera.select(tags, "any")
305
+ mera_ij_G = mera_ij.gate(terms[where], where)
306
+ mera_ij_ex = mera_ij_G & mera_ij.H
307
+ return mera_ij_ex.contract(all, optimize="auto-hq")
308
+
309
+
310
+ @pytest.mark.parametrize(
311
+ "backend", [autograd_case, jax_case, tensorflow_case, pytorch_case]
312
+ )
313
+ @pytest.mark.parametrize("executor", [None, "threads"])
314
+ def test_multiloss(backend, executor):
315
+ if executor == "threads":
316
+ from concurrent.futures import ThreadPoolExecutor
317
+
318
+ executor = ThreadPoolExecutor(2)
319
+
320
+ L = 8
321
+ D = 3
322
+ dtype = "float32"
323
+
324
+ mera = qtn.MERA.rand(L, max_bond=D, dtype=dtype)
325
+
326
+ H2 = qu.ham_heis(2).real.astype(dtype)
327
+ terms = {(i, (i + 1) % L): H2 for i in range(L)}
328
+
329
+ loss_fns = [
330
+ functools.partial(mera_local_expectation, where=where)
331
+ for where in terms
332
+ ]
333
+
334
+ tnopt = qtn.TNOptimizer(
335
+ mera,
336
+ loss_fn=loss_fns,
337
+ norm_fn=mera_norm_fn,
338
+ loss_constants={"terms": terms},
339
+ autodiff_backend=backend,
340
+ executor=executor,
341
+ device="cpu",
342
+ )
343
+
344
+ tnopt.optimize(10)
345
+ # ex = -3.6510934089371734
346
+ assert tnopt.loss < -2.5
347
+
348
+ if executor is not None:
349
+ executor.shutdown()
350
+
351
+
352
+ def test_parse_network_to_backend_shared_tags(tagged_qaoa_tn):
353
+ n, depth, psi0 = tagged_qaoa_tn
354
+
355
+ def to_constant(x):
356
+ return np.asarray(x)
357
+
358
+ tags = [f"p{i}" for i in range(2 * depth)]
359
+ tn_tagged, variabes = parse_network_to_backend(
360
+ psi0,
361
+ tags=tags,
362
+ shared_tags=tags,
363
+ to_constant=to_constant,
364
+ )
365
+ # test number of variables identified
366
+ assert len(variabes) == 2 * depth
367
+ # each variable tag should be in n tensors
368
+ for i in range(len(tags)):
369
+ var_tag = f"__VARIABLE{i}__"
370
+ assert len(tn_tagged.select(var_tag).tensors) == n
371
+
372
+
373
+ def test_parse_network_to_backend_individual_tags(tagged_qaoa_tn):
374
+ n, depth, psi0 = tagged_qaoa_tn
375
+
376
+ def to_constant(x):
377
+ return np.asarray(x)
378
+
379
+ tags = [f"p{i}" for i in range(2 * depth)]
380
+ tn_tagged, variabes = parse_network_to_backend(
381
+ psi0, tags=tags, to_constant=to_constant
382
+ )
383
+ # test number of variables identified
384
+ assert len(variabes) == 2 * depth * n
385
+ # each variable tag should only be in 1 tensors
386
+ for i in range(len(tags)):
387
+ var_tag = f"__VARIABLE{i}__"
388
+ assert len(tn_tagged.select_tensors(var_tag)) == 1
389
+
390
+
391
+ def test_parse_network_to_backend_constant_tags(tagged_qaoa_tn):
392
+ n, depth, psi0 = tagged_qaoa_tn
393
+
394
+ def to_constant(x):
395
+ return np.asarray(x)
396
+
397
+ # constant tags, include shared variable tags for first QAOA layer
398
+ constant_tags = ["PSI0", "H", "p0", "p1"]
399
+ tn_tagged, variabes = parse_network_to_backend(
400
+ psi0, constant_tags=constant_tags, to_constant=to_constant
401
+ )
402
+
403
+ # test number of variables identified
404
+ assert len(variabes) == 2 * (depth - 1) * n
405
+ # each variable tag should only be in 1 tensors
406
+ for i in range(len(variabes)):
407
+ var_tag = f"__VARIABLE{i}__"
408
+ assert len(tn_tagged.select(var_tag).tensors) == 1
409
+
410
+
411
+ @pytest.mark.parametrize("backend", [jax_case, autograd_case, tensorflow_case])
412
+ def test_shared_tags(tagged_qaoa_tn, backend):
413
+ n, depth, psi0 = tagged_qaoa_tn
414
+
415
+ H = qu.ham_heis(
416
+ n,
417
+ j=(0.0, 0.0, -1.0),
418
+ b=(1.0, 0.0, 0.0),
419
+ cyclic=True,
420
+ )
421
+ gs = qu.groundstate(H)
422
+ T_gs = qtn.Dense1D(gs).astype(complex) # tensorflow needs all same dtype
423
+
424
+ def loss(psi, target):
425
+ f = psi.H & target
426
+ f.rank_simplify_()
427
+ return -abs(f ^ all)
428
+
429
+ tags = [f"p{i}" for i in range(2 * depth)]
430
+ tnopt = qtn.TNOptimizer(
431
+ psi0,
432
+ loss_fn=loss,
433
+ tags=tags,
434
+ shared_tags=tags,
435
+ loss_constants={"target": T_gs},
436
+ autodiff_backend=backend,
437
+ # loss_target=-0.99,
438
+ )
439
+
440
+ # run optimisation and test output
441
+ psi_opt = tnopt.optimize_basinhopping(n=10, nhop=5)
442
+ # assert sum(loss < -0.99 for loss in tnopt.losses) == 1
443
+ assert qu.fidelity(psi_opt.to_dense(), gs) > 0.99
444
+
445
+ # test dimension of optimisation space
446
+ assert tnopt.res.x.size == 2 * depth
447
+
448
+ # examine tensors inside optimised TN and check sharing was done
449
+ for tag in tags:
450
+ test_data = None
451
+ for t in psi_opt.select_tensors(tag):
452
+ if test_data is None:
453
+ test_data = t.get_params()
454
+ else:
455
+ assert_allclose(test_data, t.get_params())
456
+
457
+
458
+ @pytest.mark.parametrize(
459
+ "backend", [jax_case, autograd_case, tensorflow_case, pytorch_case]
460
+ )
461
+ @pytest.mark.parametrize("simplify", ["ADCRS", "R"])
462
+ def test_optimize_circuit_directly(backend, simplify):
463
+ if backend == "jax" and simplify == "ADCRS":
464
+ pytest.skip("JAX does not support dynamic simplification.")
465
+
466
+ circ = qtn.Circuit(2)
467
+ rng = np.random.default_rng(42)
468
+ circ.u3(*rng.uniform(high=2 * np.pi, size=3), 0, parametrize=True)
469
+ circ.u3(*rng.uniform(high=2 * np.pi, size=3), 1, parametrize=True)
470
+ circ.cnot(0, 1)
471
+ circ.u3(*rng.uniform(high=2 * np.pi, size=3), 0, parametrize=True)
472
+ circ.u3(*rng.uniform(high=2 * np.pi, size=3), 1, parametrize=True)
473
+
474
+ H = qu.ham_heis(2).astype("complex128")
475
+
476
+ def loss(circ, H):
477
+ return real(
478
+ circ.local_expectation(H, (0, 1), simplify_sequence=simplify)
479
+ )
480
+
481
+ assert loss(circ, H) > -0.74
482
+ tnopt = qtn.TNOptimizer(
483
+ circ, loss, loss_constants=dict(H=H), autodiff_backend=backend
484
+ )
485
+ circ_opt = tnopt.optimize(10)
486
+ assert circ_opt is not circ
487
+ assert loss(circ_opt, H) < -0.74
488
+ assert {t.backend for t in circ_opt.psi} == {"numpy"}