tensorcircuit-nightly 1.2.0.dev20250326__py3-none-any.whl → 1.4.0.dev20251128__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tensorcircuit-nightly might be problematic. Click here for more details.
- tensorcircuit/__init__.py +5 -1
- tensorcircuit/abstractcircuit.py +4 -0
- tensorcircuit/analogcircuit.py +413 -0
- tensorcircuit/applications/layers.py +1 -1
- tensorcircuit/applications/van.py +1 -1
- tensorcircuit/backends/abstract_backend.py +312 -5
- tensorcircuit/backends/cupy_backend.py +3 -1
- tensorcircuit/backends/jax_backend.py +100 -4
- tensorcircuit/backends/jax_ops.py +108 -0
- tensorcircuit/backends/numpy_backend.py +49 -3
- tensorcircuit/backends/pytorch_backend.py +92 -3
- tensorcircuit/backends/tensorflow_backend.py +102 -3
- tensorcircuit/basecircuit.py +157 -98
- tensorcircuit/circuit.py +115 -57
- tensorcircuit/cloud/local.py +1 -1
- tensorcircuit/cloud/quafu_provider.py +1 -1
- tensorcircuit/cloud/tencent.py +1 -1
- tensorcircuit/compiler/simple_compiler.py +2 -2
- tensorcircuit/cons.py +105 -23
- tensorcircuit/densitymatrix.py +16 -11
- tensorcircuit/experimental.py +733 -153
- tensorcircuit/fgs.py +254 -73
- tensorcircuit/gates.py +66 -22
- tensorcircuit/interfaces/jax.py +5 -3
- tensorcircuit/interfaces/tensortrans.py +6 -2
- tensorcircuit/interfaces/torch.py +14 -4
- tensorcircuit/keras.py +3 -3
- tensorcircuit/mpscircuit.py +154 -65
- tensorcircuit/quantum.py +698 -134
- tensorcircuit/quditcircuit.py +733 -0
- tensorcircuit/quditgates.py +618 -0
- tensorcircuit/results/counts.py +131 -18
- tensorcircuit/results/readout_mitigation.py +4 -1
- tensorcircuit/shadows.py +1 -1
- tensorcircuit/simplify.py +3 -1
- tensorcircuit/stabilizercircuit.py +29 -17
- tensorcircuit/templates/__init__.py +2 -0
- tensorcircuit/templates/blocks.py +2 -2
- tensorcircuit/templates/hamiltonians.py +174 -0
- tensorcircuit/templates/lattice.py +1789 -0
- tensorcircuit/timeevol.py +896 -0
- tensorcircuit/translation.py +10 -3
- tensorcircuit/utils.py +7 -0
- {tensorcircuit_nightly-1.2.0.dev20250326.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/METADATA +66 -29
- tensorcircuit_nightly-1.4.0.dev20251128.dist-info/RECORD +96 -0
- {tensorcircuit_nightly-1.2.0.dev20250326.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/WHEEL +1 -1
- {tensorcircuit_nightly-1.2.0.dev20250326.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/top_level.txt +0 -1
- tensorcircuit_nightly-1.2.0.dev20250326.dist-info/RECORD +0 -118
- tests/__init__.py +0 -0
- tests/conftest.py +0 -67
- tests/test_backends.py +0 -1035
- tests/test_calibrating.py +0 -149
- tests/test_channels.py +0 -409
- tests/test_circuit.py +0 -1699
- tests/test_cloud.py +0 -219
- tests/test_compiler.py +0 -147
- tests/test_dmcircuit.py +0 -555
- tests/test_ensemble.py +0 -72
- tests/test_fgs.py +0 -310
- tests/test_gates.py +0 -156
- tests/test_interfaces.py +0 -562
- tests/test_keras.py +0 -160
- tests/test_miscs.py +0 -282
- tests/test_mpscircuit.py +0 -341
- tests/test_noisemodel.py +0 -156
- tests/test_qaoa.py +0 -86
- tests/test_qem.py +0 -152
- tests/test_quantum.py +0 -549
- tests/test_quantum_attr.py +0 -42
- tests/test_results.py +0 -380
- tests/test_shadows.py +0 -160
- tests/test_simplify.py +0 -46
- tests/test_stabilizer.py +0 -217
- tests/test_templates.py +0 -218
- tests/test_torchnn.py +0 -99
- tests/test_van.py +0 -102
- {tensorcircuit_nightly-1.2.0.dev20250326.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/licenses/LICENSE +0 -0
tests/test_stabilizer.py
DELETED
|
@@ -1,217 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import sys
|
|
3
|
-
|
|
4
|
-
import numpy as np
|
|
5
|
-
import pytest
|
|
6
|
-
|
|
7
|
-
thisfile = os.path.abspath(__file__)
|
|
8
|
-
modulepath = os.path.dirname(os.path.dirname(thisfile))
|
|
9
|
-
|
|
10
|
-
sys.path.insert(0, modulepath)
|
|
11
|
-
import tensorcircuit as tc
|
|
12
|
-
|
|
13
|
-
# Skip all tests if stim is not installed
|
|
14
|
-
stim = pytest.importorskip("stim")
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
def test_basic_gates():
|
|
18
|
-
c = tc.StabilizerCircuit(2)
|
|
19
|
-
c.h(0)
|
|
20
|
-
c.cnot(0, 1)
|
|
21
|
-
results = c.measure(0, 1)
|
|
22
|
-
print(results)
|
|
23
|
-
results = c.measure(0, 1)
|
|
24
|
-
print(results)
|
|
25
|
-
assert len(results) == 2
|
|
26
|
-
assert all(r in [0, 1] for r in results)
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def test_bell_state():
|
|
30
|
-
c = tc.StabilizerCircuit(2)
|
|
31
|
-
c.H(1)
|
|
32
|
-
c.cx(1, 0)
|
|
33
|
-
# Test Z measurements correlation
|
|
34
|
-
samples = c.sample(batch=1000)
|
|
35
|
-
assert samples.shape == (1000, 2)
|
|
36
|
-
counts = np.sum(samples, axis=0)
|
|
37
|
-
# Should be roughly equal number of 00 and 11 states
|
|
38
|
-
assert abs(counts[0] - counts[1]) < 50
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
def test_ghz_state():
|
|
42
|
-
c = tc.StabilizerCircuit(3)
|
|
43
|
-
c.h(0)
|
|
44
|
-
c.cnot(0, 1)
|
|
45
|
-
c.cnot(1, 2)
|
|
46
|
-
# Test expectation values
|
|
47
|
-
exp_z = c.expectation_ps(z=[0, 1])
|
|
48
|
-
np.testing.assert_allclose(exp_z, 1.0, atol=1e-6)
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
def test_stabilizer_operations():
|
|
52
|
-
c = tc.StabilizerCircuit(2)
|
|
53
|
-
# Test all supported gates
|
|
54
|
-
for gate in ["h", "x", "y", "z", "s", "sdg"]:
|
|
55
|
-
getattr(c, gate)(0)
|
|
56
|
-
c.cnot(0, 1)
|
|
57
|
-
c.cz(0, 1)
|
|
58
|
-
c.swap(0, 1)
|
|
59
|
-
print(c.current_circuit())
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
def test_sample_expectation():
|
|
63
|
-
c = tc.StabilizerCircuit(2)
|
|
64
|
-
c.h(0)
|
|
65
|
-
c.cnot(0, 1)
|
|
66
|
-
# Test sampling-based expectation
|
|
67
|
-
exp = c.sample_expectation_ps(z=[0, 1], shots=1000)
|
|
68
|
-
assert abs(exp - 1.0) < 0.1
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
def test_invalid_gates():
|
|
72
|
-
c = tc.StabilizerCircuit(1)
|
|
73
|
-
with pytest.raises(ValueError):
|
|
74
|
-
c.t(0)
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
def test_qir():
|
|
78
|
-
c = tc.StabilizerCircuit(2)
|
|
79
|
-
c.h(0)
|
|
80
|
-
c.cnot(0, 1)
|
|
81
|
-
qir = c.to_qir()
|
|
82
|
-
assert len(qir) == 2
|
|
83
|
-
assert qir[0]["name"] == "h"
|
|
84
|
-
assert qir[1]["name"] == "cnot"
|
|
85
|
-
print(qir)
|
|
86
|
-
c1 = tc.Circuit.from_qir(qir)
|
|
87
|
-
np.testing.assert_allclose(c1.expectation_ps(z=(0, 1)), 1, atol=1e-5)
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
def test_cond_measure():
|
|
91
|
-
c = tc.StabilizerCircuit(3)
|
|
92
|
-
|
|
93
|
-
# Prepare Bell pair between qubits 1 and 2
|
|
94
|
-
c.H(1)
|
|
95
|
-
c.CNOT(1, 2)
|
|
96
|
-
|
|
97
|
-
# Prepare state to teleport on qubit 0 (can only be Clifford state)
|
|
98
|
-
c.X(0)
|
|
99
|
-
|
|
100
|
-
# Teleportation circuit
|
|
101
|
-
c.CNOT(0, 1)
|
|
102
|
-
c.H(0)
|
|
103
|
-
|
|
104
|
-
# Measure qubits 0 and 1
|
|
105
|
-
r0 = c.cond_measure(0)
|
|
106
|
-
r1 = c.cond_measure(1)
|
|
107
|
-
# Apply corrections based on measurements
|
|
108
|
-
if r0 == 1:
|
|
109
|
-
c.Z(2)
|
|
110
|
-
if r1 == 1:
|
|
111
|
-
c.X(2)
|
|
112
|
-
|
|
113
|
-
# Verify teleported state
|
|
114
|
-
final = c.measure(2)
|
|
115
|
-
np.testing.assert_allclose(final, 1)
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
def test_post_select():
|
|
119
|
-
c = tc.StabilizerCircuit(2)
|
|
120
|
-
c.h(0)
|
|
121
|
-
c.s(0)
|
|
122
|
-
c.cx(0, 1)
|
|
123
|
-
c.post_select(1, keep=1)
|
|
124
|
-
np.testing.assert_allclose(c.expectation_ps(z=[0]), -1, atol=1e-5)
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
def test_to_openqasm():
|
|
128
|
-
c = tc.StabilizerCircuit(3)
|
|
129
|
-
c.sd(0)
|
|
130
|
-
c.cz(0, 1)
|
|
131
|
-
c.s(2)
|
|
132
|
-
c.measure(0, 1)
|
|
133
|
-
|
|
134
|
-
# Test basic circuit conversion
|
|
135
|
-
qasm = c.to_openqasm()
|
|
136
|
-
print(qasm)
|
|
137
|
-
|
|
138
|
-
c1 = tc.StabilizerCircuit.from_openqasm(qasm)
|
|
139
|
-
print(c1.draw())
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
def test_ee():
|
|
143
|
-
c = tc.Circuit(8)
|
|
144
|
-
for i in range(3):
|
|
145
|
-
c.h(i)
|
|
146
|
-
c.cx(i, i + 4)
|
|
147
|
-
c.sd(i + 2)
|
|
148
|
-
ee0 = tc.quantum.entanglement_entropy(c.state(), list(range(4)))
|
|
149
|
-
c1 = tc.StabilizerCircuit.from_openqasm(c.to_openqasm())
|
|
150
|
-
ee1 = c1.entanglement_entropy(list(range(4)))
|
|
151
|
-
np.testing.assert_allclose(ee0, ee1, atol=1e-6)
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
def test_random_gates():
|
|
155
|
-
c = tc.StabilizerCircuit(4)
|
|
156
|
-
c.random_gate(0, 1, recorded=True)
|
|
157
|
-
c.random_gate(2, 3)
|
|
158
|
-
c.random_gate(1, 2)
|
|
159
|
-
print(c.entanglement_entropy(list(range(2))))
|
|
160
|
-
print(len(c.current_circuit()))
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
def test_circuit_state():
|
|
164
|
-
c = tc.StabilizerCircuit(2)
|
|
165
|
-
c.h(1)
|
|
166
|
-
c1 = tc.Circuit(2)
|
|
167
|
-
c1.h(1)
|
|
168
|
-
np.testing.assert_allclose(c.state(), c1.state(), atol=1e-5)
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
def test_circuit_inputs():
|
|
172
|
-
c = tc.StabilizerCircuit(2, inputs=[stim.PauliString("XX"), stim.PauliString("ZZ")])
|
|
173
|
-
c.cnot(0, 1)
|
|
174
|
-
c.h(0)
|
|
175
|
-
np.testing.assert_allclose(c.expectation_ps(z=[0]), 1, atol=1e-6)
|
|
176
|
-
np.testing.assert_allclose(c.expectation_ps(z=[1]), 1, atol=1e-6)
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
def test_depolarize():
|
|
180
|
-
r = []
|
|
181
|
-
for _ in range(20):
|
|
182
|
-
c = tc.StabilizerCircuit(2)
|
|
183
|
-
c.h(0)
|
|
184
|
-
c.depolarizing(0, 1, p=0.2)
|
|
185
|
-
c.h(0)
|
|
186
|
-
r.append(c.expectation_ps(z=[0]))
|
|
187
|
-
assert 4 < np.sum(r) < 20
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
def test_tableau_inputs():
|
|
191
|
-
c = tc.StabilizerCircuit(2)
|
|
192
|
-
c.x(1)
|
|
193
|
-
c.s(1)
|
|
194
|
-
it = c.current_inverse_tableau()
|
|
195
|
-
c1 = tc.StabilizerCircuit(2, tableau_inputs=it)
|
|
196
|
-
c1.s(1)
|
|
197
|
-
c1.x(1)
|
|
198
|
-
np.testing.assert_allclose(c1.state()[0], 1, atol=1e-6)
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
def test_mipt():
|
|
202
|
-
resource = [stim.Tableau.random(2) for _ in range(1000)]
|
|
203
|
-
|
|
204
|
-
def ruc(n, nlayer, p):
|
|
205
|
-
c = tc.StabilizerCircuit(n)
|
|
206
|
-
status = np.random.choice(1000, size=[n, nlayer], replace=True)
|
|
207
|
-
for j in range(nlayer):
|
|
208
|
-
for i in range(0, n, 2):
|
|
209
|
-
c.tableau_gate(i, (i + 1) % n, tableau=resource[status[i, j]])
|
|
210
|
-
for i in range(1, n, 2):
|
|
211
|
-
c.tableau_gate(i, (i + 1) % n, tableau=resource[status[i, j]])
|
|
212
|
-
mask = np.random.random(n) < p
|
|
213
|
-
ids = list(np.where(mask)[0])
|
|
214
|
-
c.cond_measure_many(*ids)
|
|
215
|
-
return c.entanglement_entropy(list(range(n // 2)))
|
|
216
|
-
|
|
217
|
-
print(ruc(50, 10, 0.1))
|
tests/test_templates.py
DELETED
|
@@ -1,218 +0,0 @@
|
|
|
1
|
-
# pylint: disable=invalid-name
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
import sys
|
|
5
|
-
|
|
6
|
-
import numpy as np
|
|
7
|
-
import pytest
|
|
8
|
-
from pytest_lazyfixture import lazy_fixture as lf
|
|
9
|
-
|
|
10
|
-
thisfile = os.path.abspath(__file__)
|
|
11
|
-
modulepath = os.path.dirname(os.path.dirname(thisfile))
|
|
12
|
-
|
|
13
|
-
sys.path.insert(0, modulepath)
|
|
14
|
-
import tensorcircuit as tc
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
def test_any_measurement():
|
|
18
|
-
c = tc.Circuit(2)
|
|
19
|
-
c.H(0)
|
|
20
|
-
c.H(1)
|
|
21
|
-
mea = np.array([1, 1])
|
|
22
|
-
r = tc.templates.measurements.any_measurements(c, mea, onehot=True)
|
|
23
|
-
np.testing.assert_allclose(r, 1.0, atol=1e-5)
|
|
24
|
-
mea2 = np.array([3, 0])
|
|
25
|
-
r2 = tc.templates.measurements.any_measurements(c, mea2, onehot=True)
|
|
26
|
-
np.testing.assert_allclose(r2, 0.0, atol=1e-5)
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
@pytest.mark.parametrize("backend", [lf("jaxb"), lf("tfb")])
|
|
30
|
-
def test_parameterized_local_measurement(backend):
|
|
31
|
-
c = tc.Circuit(3)
|
|
32
|
-
c.X(0)
|
|
33
|
-
c.cnot(0, 1)
|
|
34
|
-
c.H(-1)
|
|
35
|
-
basis = tc.backend.convert_to_tensor(np.array([3, 3, 1]))
|
|
36
|
-
r = tc.templates.measurements.parameterized_local_measurements(
|
|
37
|
-
c, structures=basis, onehot=True
|
|
38
|
-
)
|
|
39
|
-
np.testing.assert_allclose(r, np.array([-1, -1, 1]), atol=1e-5)
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
@pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
|
|
43
|
-
def test_sparse_expectation(backend):
|
|
44
|
-
ham = tc.backend.coo_sparse_matrix(
|
|
45
|
-
indices=[[0, 1], [1, 0]], values=tc.backend.ones([2]), shape=(2, 2)
|
|
46
|
-
)
|
|
47
|
-
|
|
48
|
-
def f(param):
|
|
49
|
-
c = tc.Circuit(1)
|
|
50
|
-
c.rx(0, theta=param[0])
|
|
51
|
-
c.H(0)
|
|
52
|
-
return tc.templates.measurements.sparse_expectation(c, ham)
|
|
53
|
-
|
|
54
|
-
fvag = tc.backend.jit(tc.backend.value_and_grad(f))
|
|
55
|
-
param = tc.backend.zeros([1])
|
|
56
|
-
print(fvag(param))
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
def test_bell_block():
|
|
60
|
-
c = tc.Circuit(4)
|
|
61
|
-
c = tc.templates.blocks.Bell_pair_block(c)
|
|
62
|
-
for _ in range(10):
|
|
63
|
-
s = c.perfect_sampling()[0]
|
|
64
|
-
assert s[0] != s[1]
|
|
65
|
-
assert s[2] != s[3]
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
def test_qft_block() -> None:
|
|
69
|
-
n_qubits = 4
|
|
70
|
-
c = tc.Circuit(n_qubits)
|
|
71
|
-
c = tc.templates.blocks.qft(c, *range(n_qubits))
|
|
72
|
-
mat = c.quoperator().eval().reshape(2 ** (n_qubits), -1)
|
|
73
|
-
N = 2**n_qubits
|
|
74
|
-
ref = np.exp(
|
|
75
|
-
1j * 2 * np.pi * np.arange(N).reshape(-1, 1) * np.arange(N).reshape(1, -1) / N
|
|
76
|
-
) / np.sqrt(N)
|
|
77
|
-
np.testing.assert_allclose(mat, ref, atol=1e-7)
|
|
78
|
-
|
|
79
|
-
c = tc.Circuit(n_qubits)
|
|
80
|
-
c = tc.templates.blocks.qft(c, *range(n_qubits), inverse=True)
|
|
81
|
-
mat = c.quoperator().eval().reshape(2 ** (n_qubits), -1)
|
|
82
|
-
np.testing.assert_allclose(mat, ref.T.conj(), atol=1e-7)
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
def test_grid_coord():
|
|
86
|
-
cd = tc.templates.graphs.Grid2DCoord(3, 2)
|
|
87
|
-
assert cd.all_cols() == [(0, 3), (1, 4), (2, 5)]
|
|
88
|
-
assert cd.all_rows() == [(0, 1), (1, 2), (3, 4), (4, 5)]
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
@pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
|
|
92
|
-
def test_qaoa_template(backend):
|
|
93
|
-
cd = tc.templates.graphs.Grid2DCoord(3, 2)
|
|
94
|
-
g = cd.lattice_graph(pbc=False)
|
|
95
|
-
for e1, e2 in g.edges:
|
|
96
|
-
g[e1][e2]["weight"] = np.random.uniform()
|
|
97
|
-
|
|
98
|
-
def forward(paramzz, paramx):
|
|
99
|
-
c = tc.Circuit(6)
|
|
100
|
-
for i in range(6):
|
|
101
|
-
c.H(i)
|
|
102
|
-
c = tc.templates.blocks.QAOA_block(c, g, paramzz, paramx)
|
|
103
|
-
return tc.templates.measurements.spin_glass_measurements(c, g)
|
|
104
|
-
|
|
105
|
-
fvag = tc.backend.jit(tc.backend.value_and_grad(forward, argnums=(0, 1)))
|
|
106
|
-
paramzz = tc.backend.real(tc.backend.ones([1]))
|
|
107
|
-
paramx = tc.backend.real(tc.backend.ones([1]))
|
|
108
|
-
_, gr = fvag(paramzz, paramx)
|
|
109
|
-
np.testing.assert_allclose(gr[1].shape, [1])
|
|
110
|
-
paramzz = tc.backend.real(tc.backend.ones([7]))
|
|
111
|
-
paramx = tc.backend.real(tc.backend.ones([1]))
|
|
112
|
-
_, gr = fvag(paramzz, paramx)
|
|
113
|
-
np.testing.assert_allclose(gr[0].shape, [7])
|
|
114
|
-
paramzz = tc.backend.real(tc.backend.ones([1]))
|
|
115
|
-
paramx = tc.backend.real(tc.backend.ones([6]))
|
|
116
|
-
_, gr = fvag(paramzz, paramx)
|
|
117
|
-
np.testing.assert_allclose(gr[0].shape, [1])
|
|
118
|
-
np.testing.assert_allclose(gr[1].shape, [6])
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
def test_state_wrapper():
|
|
122
|
-
Bell_pair_block_state = tc.templates.blocks.state_centric(
|
|
123
|
-
tc.templates.blocks.Bell_pair_block
|
|
124
|
-
)
|
|
125
|
-
s = Bell_pair_block_state(np.array([1.0, 0, 0, 0]))
|
|
126
|
-
np.testing.assert_allclose(
|
|
127
|
-
s, np.array([0.0, 0.70710677 + 0.0j, -0.70710677 + 0.0j, 0]), atol=1e-5
|
|
128
|
-
)
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
@pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")])
|
|
132
|
-
def test_amplitude_encoding(backend):
|
|
133
|
-
batched_amplitude_encoding = tc.backend.vmap(
|
|
134
|
-
tc.templates.dataset.amplitude_encoding, vectorized_argnums=0
|
|
135
|
-
)
|
|
136
|
-
figs = np.stack([np.eye(2), np.ones([2, 2])])
|
|
137
|
-
figs = tc.array_to_tensor(figs)
|
|
138
|
-
states = batched_amplitude_encoding(figs, 3)
|
|
139
|
-
# note that you cannot use nqubits=3 here for jax backend
|
|
140
|
-
# see this issue: https://github.com/google/jax/issues/7465
|
|
141
|
-
np.testing.assert_allclose(states[1], np.array([0.5, 0.5, 0.5, 0.5, 0, 0, 0, 0]))
|
|
142
|
-
states = batched_amplitude_encoding(
|
|
143
|
-
figs, 2, tc.array_to_tensor(np.array([0, 3, 1, 2]), dtype="int32")
|
|
144
|
-
)
|
|
145
|
-
np.testing.assert_allclose(states[0], 1 / np.sqrt(2) * np.array([1, 1, 0, 0]))
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
@pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
|
|
149
|
-
def test_mpo_measurement(backend):
|
|
150
|
-
def f(theta):
|
|
151
|
-
mpo = tc.quantum.QuOperator.from_local_tensor(
|
|
152
|
-
tc.array_to_tensor(tc.gates._x_matrix), [2, 2, 2], [0]
|
|
153
|
-
)
|
|
154
|
-
c = tc.Circuit(3)
|
|
155
|
-
c.ry(0, theta=theta)
|
|
156
|
-
c.H(1)
|
|
157
|
-
c.H(2)
|
|
158
|
-
e = tc.templates.measurements.mpo_expectation(c, mpo)
|
|
159
|
-
return e
|
|
160
|
-
|
|
161
|
-
v, g = tc.backend.jit(tc.backend.value_and_grad(f))(tc.backend.ones([]))
|
|
162
|
-
|
|
163
|
-
np.testing.assert_allclose(v, 0.84147, atol=1e-4)
|
|
164
|
-
np.testing.assert_allclose(g, 0.54032, atol=1e-4)
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
@pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
|
|
168
|
-
def test_operator_measurement(backend):
|
|
169
|
-
mpo = tc.quantum.QuOperator.from_local_tensor(
|
|
170
|
-
tc.array_to_tensor(tc.gates._x_matrix), [2, 2], [0]
|
|
171
|
-
)
|
|
172
|
-
dense = tc.array_to_tensor(np.kron(tc.gates._x_matrix, np.eye(2)))
|
|
173
|
-
sparse = tc.quantum.PauliString2COO([1, 0])
|
|
174
|
-
|
|
175
|
-
for h in [dense, sparse, mpo]:
|
|
176
|
-
|
|
177
|
-
def f(theta):
|
|
178
|
-
c = tc.Circuit(2)
|
|
179
|
-
c.ry(0, theta=theta)
|
|
180
|
-
c.H(1)
|
|
181
|
-
e = tc.templates.measurements.operator_expectation(c, h)
|
|
182
|
-
return e
|
|
183
|
-
|
|
184
|
-
v, g = tc.backend.jit(tc.backend.value_and_grad(f))(tc.backend.ones([]))
|
|
185
|
-
|
|
186
|
-
np.testing.assert_allclose(v, 0.84147, atol=1e-4)
|
|
187
|
-
np.testing.assert_allclose(g, 0.54032, atol=1e-4)
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
@pytest.fixture
|
|
191
|
-
def symmetric_matrix():
|
|
192
|
-
matrix = np.array([[-5.0, -2.0], [-2.0, 6.0]])
|
|
193
|
-
nsym_matrix = np.array(
|
|
194
|
-
[[1.0, 2.0, 3.0], [2.0, 4.0, 5.0], [3.0, 5.0, 6.0], [8.0, 7.0, 6.0]]
|
|
195
|
-
)
|
|
196
|
-
return matrix, nsym_matrix
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
def test_QUBO_to_Ising(symmetric_matrix):
|
|
200
|
-
matrix1, matrix2 = symmetric_matrix
|
|
201
|
-
pauli_terms, weights, offset = tc.templates.conversions.QUBO_to_Ising(matrix1)
|
|
202
|
-
n = matrix1.shape[0]
|
|
203
|
-
expected_num_terms = n + n * (n - 1) // 2
|
|
204
|
-
assert len(pauli_terms) == expected_num_terms
|
|
205
|
-
assert len(weights) == expected_num_terms
|
|
206
|
-
assert isinstance(pauli_terms, list)
|
|
207
|
-
assert isinstance(weights, np.ndarray)
|
|
208
|
-
assert isinstance(offset, float)
|
|
209
|
-
assert pauli_terms == [
|
|
210
|
-
[1, 0],
|
|
211
|
-
[0, 1],
|
|
212
|
-
[1, 1],
|
|
213
|
-
]
|
|
214
|
-
assert all(weights == np.array([3.5, -2.0, -1.0]))
|
|
215
|
-
assert offset == -0.5
|
|
216
|
-
|
|
217
|
-
with pytest.raises(ValueError):
|
|
218
|
-
tc.templates.conversions.QUBO_to_Ising(matrix2)
|
tests/test_torchnn.py
DELETED
|
@@ -1,99 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import sys
|
|
3
|
-
import numpy as np
|
|
4
|
-
import pytest
|
|
5
|
-
from pytest_lazyfixture import lazy_fixture as lf
|
|
6
|
-
|
|
7
|
-
thisfile = os.path.abspath(__file__)
|
|
8
|
-
modulepath = os.path.dirname(os.path.dirname(thisfile))
|
|
9
|
-
|
|
10
|
-
sys.path.insert(0, modulepath)
|
|
11
|
-
|
|
12
|
-
import tensorcircuit as tc
|
|
13
|
-
|
|
14
|
-
try:
|
|
15
|
-
import torch
|
|
16
|
-
except ImportError:
|
|
17
|
-
pytest.skip("torch is not installed", allow_module_level=True)
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
@pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb"), lf("torchb")])
|
|
21
|
-
def test_quantumnet(backend):
|
|
22
|
-
n = 6
|
|
23
|
-
nlayers = 2
|
|
24
|
-
|
|
25
|
-
def qpred(x, weights):
|
|
26
|
-
c = tc.Circuit(n)
|
|
27
|
-
for i in range(n):
|
|
28
|
-
c.rx(i, theta=x[i])
|
|
29
|
-
for j in range(nlayers):
|
|
30
|
-
for i in range(n - 1):
|
|
31
|
-
c.cnot(i, i + 1)
|
|
32
|
-
for i in range(n):
|
|
33
|
-
c.rx(i, theta=weights[2 * j, i])
|
|
34
|
-
c.ry(i, theta=weights[2 * j + 1, i])
|
|
35
|
-
ypred = tc.backend.stack([c.expectation_ps(x=[i]) for i in range(n)])
|
|
36
|
-
return tc.backend.real(ypred)
|
|
37
|
-
|
|
38
|
-
if tc.backend.name == "pytorch":
|
|
39
|
-
use_interface = False
|
|
40
|
-
else:
|
|
41
|
-
use_interface = True
|
|
42
|
-
|
|
43
|
-
ql = tc.TorchLayer(
|
|
44
|
-
qpred, weights_shape=[2 * nlayers, n], use_interface=use_interface
|
|
45
|
-
)
|
|
46
|
-
|
|
47
|
-
yp = ql(torch.ones([3, n]))
|
|
48
|
-
print(yp)
|
|
49
|
-
|
|
50
|
-
np.testing.assert_allclose(yp.shape, np.array([3, n]))
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
@pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
|
|
54
|
-
def test_inputs_multiple(backend):
|
|
55
|
-
n = 3
|
|
56
|
-
p = 0.1
|
|
57
|
-
K = tc.backend
|
|
58
|
-
torchb = tc.get_backend("pytorch")
|
|
59
|
-
|
|
60
|
-
def f(state, noise, weights):
|
|
61
|
-
c = tc.Circuit(n, inputs=state)
|
|
62
|
-
for i in range(n):
|
|
63
|
-
c.rz(i, theta=weights[i])
|
|
64
|
-
for i in range(n):
|
|
65
|
-
c.depolarizing(i, px=p, py=p, pz=p, status=noise[i])
|
|
66
|
-
return K.real(c.expectation_ps(x=[0]))
|
|
67
|
-
|
|
68
|
-
layer = tc.TorchLayer(f, [n], use_vmap=True, vectorized_argnums=[0, 1])
|
|
69
|
-
state = torchb.ones([2, 2**n]) / 2 ** (n / 2)
|
|
70
|
-
noise = 0.2 * torchb.ones([2, n], dtype="float32")
|
|
71
|
-
l = layer(state, noise)
|
|
72
|
-
lsum = torchb.sum(l)
|
|
73
|
-
print(l)
|
|
74
|
-
lsum.backward()
|
|
75
|
-
for p in layer.parameters():
|
|
76
|
-
print(p.grad)
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
@pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb"), lf("torchb")])
|
|
80
|
-
def test_torchnn_hardware(backend):
|
|
81
|
-
n = 2
|
|
82
|
-
|
|
83
|
-
def qf(inputs, param):
|
|
84
|
-
inputs = tc.backend.convert_to_tensor(tc.get_backend("pytorch").numpy(inputs))
|
|
85
|
-
param = tc.backend.convert_to_tensor(tc.get_backend("pytorch").numpy(param))
|
|
86
|
-
|
|
87
|
-
c = tc.Circuit(n)
|
|
88
|
-
c.rx(0, theta=inputs[0])
|
|
89
|
-
c.rx(1, theta=inputs[1])
|
|
90
|
-
c.h(1)
|
|
91
|
-
c.rzz(0, 1, theta=param[0])
|
|
92
|
-
r = tc.backend.stack([c.expectation_ps(z=[i]) for i in range(n)])
|
|
93
|
-
|
|
94
|
-
r = tc.get_backend("pytorch").convert_to_tensor(tc.backend.numpy(r))
|
|
95
|
-
return torch.real(r)
|
|
96
|
-
|
|
97
|
-
ql = tc.torchnn.HardwareNet(qf, [1])
|
|
98
|
-
qnet = torch.nn.Sequential(ql, torch.nn.Linear(2, 1))
|
|
99
|
-
print(qnet(torch.ones([5, 2])))
|
tests/test_van.py
DELETED
|
@@ -1,102 +0,0 @@
|
|
|
1
|
-
import sys
|
|
2
|
-
import os
|
|
3
|
-
import itertools
|
|
4
|
-
|
|
5
|
-
thisfile = os.path.abspath(__file__)
|
|
6
|
-
modulepath = os.path.dirname(os.path.dirname(thisfile))
|
|
7
|
-
|
|
8
|
-
sys.path.insert(0, modulepath)
|
|
9
|
-
|
|
10
|
-
import numpy as np
|
|
11
|
-
import tensorflow as tf
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
from tensorcircuit.applications.van import MaskedLinear, ResidualBlock, MADE, PixelCNN
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
def test_masklinear():
|
|
18
|
-
ml = MaskedLinear(5, 10, 3, mask=tf.zeros([10, 3, 5, 3]))
|
|
19
|
-
ml.set_weights([tf.ones([10, 3, 5, 3]), tf.ones([10, 3])])
|
|
20
|
-
tf.debugging.assert_near(ml(tf.ones([1, 5, 3])), tf.ones([1, 10, 3]))
|
|
21
|
-
|
|
22
|
-
mask = np.zeros([10, 3, 5, 3])
|
|
23
|
-
mask[3, 2, 1, 0] = 1.0
|
|
24
|
-
ml = MaskedLinear(5, 10, 3, mask=tf.constant(mask, dtype=tf.float32))
|
|
25
|
-
ml.set_weights([tf.ones([10, 3, 5, 3]), tf.zeros([10, 3])])
|
|
26
|
-
assert tf.reduce_sum(ml(tf.ones([5, 3]))[3, :]) == 1.0
|
|
27
|
-
|
|
28
|
-
w = tf.random.uniform(shape=[10, 3, 5, 3])
|
|
29
|
-
b = tf.random.uniform(shape=[10, 3])
|
|
30
|
-
w_m = tf.reshape(w, [30, 15])
|
|
31
|
-
b_m = tf.reshape(b, [30, 1])
|
|
32
|
-
inputs = tf.ones([5, 3])
|
|
33
|
-
inputs_m = tf.reshape(inputs, [15, 1])
|
|
34
|
-
r_m = w_m @ inputs_m + b_m
|
|
35
|
-
r = tf.reshape(r_m, [10, 3])
|
|
36
|
-
ml = MaskedLinear(5, 10, 3)
|
|
37
|
-
ml.set_weights([w, b])
|
|
38
|
-
tf.debugging.assert_near(ml(inputs), r)
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
def test_residual_block():
|
|
42
|
-
dense1 = tf.keras.layers.Dense(10, use_bias=False)
|
|
43
|
-
dense1.build([1, 1])
|
|
44
|
-
dense1.set_weights([np.ones([1, 10])])
|
|
45
|
-
dense2 = tf.keras.layers.Dense(1, use_bias=False)
|
|
46
|
-
dense2.build([1, 10])
|
|
47
|
-
dense2.set_weights([np.ones([10, 1])])
|
|
48
|
-
m = ResidualBlock([dense1, dense2])
|
|
49
|
-
assert m(tf.ones([1, 1])) == 11.0
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
def test_made():
|
|
53
|
-
import itertools
|
|
54
|
-
|
|
55
|
-
m = MADE(2, 2, 6, 3, 3, nonmerge=False)
|
|
56
|
-
l = []
|
|
57
|
-
for i in itertools.product(*[list(range(3)) for _ in range(2)]):
|
|
58
|
-
l.append(list(i))
|
|
59
|
-
basis = tf.constant(l, dtype=tf.int32)
|
|
60
|
-
print(basis)
|
|
61
|
-
ptot = tf.reduce_sum(tf.exp(m.log_prob(tf.one_hot(basis, depth=3))))
|
|
62
|
-
np.testing.assert_allclose(ptot.numpy(), 1.0, atol=1e-5)
|
|
63
|
-
|
|
64
|
-
s, logp = m.sample(10)
|
|
65
|
-
print(logp)
|
|
66
|
-
assert s.shape == (10, 2, 3)
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
def test_made_fit_peak():
|
|
70
|
-
opt = tf.optimizers.Adam(learning_rate=0.01)
|
|
71
|
-
m = MADE(5, 5, 4, 3, 2)
|
|
72
|
-
for step in range(100):
|
|
73
|
-
with tf.GradientTape() as t:
|
|
74
|
-
loss = -tf.reduce_sum(
|
|
75
|
-
m.log_prob(
|
|
76
|
-
tf.one_hot(
|
|
77
|
-
[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 1, 1, 0]], depth=3
|
|
78
|
-
)
|
|
79
|
-
)
|
|
80
|
-
)
|
|
81
|
-
gr = t.gradient(loss, m.variables)
|
|
82
|
-
if step % 20 == 0:
|
|
83
|
-
print(
|
|
84
|
-
tf.exp(
|
|
85
|
-
m.log_prob(
|
|
86
|
-
tf.one_hot(
|
|
87
|
-
[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 1, 1, 0]], depth=3
|
|
88
|
-
)
|
|
89
|
-
)
|
|
90
|
-
).numpy()
|
|
91
|
-
)
|
|
92
|
-
opt.apply_gradients(zip(gr, m.variables))
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
def test_pixelcnn():
|
|
96
|
-
m = PixelCNN(3, 5, 8)
|
|
97
|
-
l = []
|
|
98
|
-
for i in itertools.product(*[list(range(3)) for _ in range(4)]):
|
|
99
|
-
l.append(list(i))
|
|
100
|
-
basis = tf.constant(tf.reshape(l, [-1, 2, 2]), dtype=tf.int32)
|
|
101
|
-
ptot = tf.reduce_sum(tf.exp(m.log_prob(tf.one_hot(basis, depth=3))))
|
|
102
|
-
np.testing.assert_allclose(ptot.numpy(), 1.0, atol=1e-5)
|