tensorcircuit-nightly 1.0.2.dev20250108__py3-none-any.whl → 1.4.0.dev20251103__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tensorcircuit-nightly might be problematic. Click here for more details.
- tensorcircuit/__init__.py +18 -2
- tensorcircuit/about.py +46 -0
- tensorcircuit/abstractcircuit.py +4 -0
- tensorcircuit/analogcircuit.py +413 -0
- tensorcircuit/applications/layers.py +1 -1
- tensorcircuit/applications/van.py +1 -1
- tensorcircuit/backends/abstract_backend.py +320 -7
- tensorcircuit/backends/cupy_backend.py +3 -1
- tensorcircuit/backends/jax_backend.py +102 -4
- tensorcircuit/backends/jax_ops.py +110 -1
- tensorcircuit/backends/numpy_backend.py +49 -3
- tensorcircuit/backends/pytorch_backend.py +92 -3
- tensorcircuit/backends/tensorflow_backend.py +102 -3
- tensorcircuit/basecircuit.py +157 -98
- tensorcircuit/circuit.py +115 -57
- tensorcircuit/cloud/local.py +1 -1
- tensorcircuit/cloud/quafu_provider.py +1 -1
- tensorcircuit/cloud/tencent.py +1 -1
- tensorcircuit/compiler/simple_compiler.py +2 -2
- tensorcircuit/cons.py +142 -21
- tensorcircuit/densitymatrix.py +43 -14
- tensorcircuit/experimental.py +387 -129
- tensorcircuit/fgs.py +282 -81
- tensorcircuit/gates.py +66 -22
- tensorcircuit/interfaces/__init__.py +1 -3
- tensorcircuit/interfaces/jax.py +189 -0
- tensorcircuit/keras.py +3 -3
- tensorcircuit/mpscircuit.py +154 -65
- tensorcircuit/quantum.py +868 -152
- tensorcircuit/quditcircuit.py +733 -0
- tensorcircuit/quditgates.py +618 -0
- tensorcircuit/results/counts.py +147 -20
- tensorcircuit/results/readout_mitigation.py +4 -1
- tensorcircuit/shadows.py +1 -1
- tensorcircuit/simplify.py +3 -1
- tensorcircuit/stabilizercircuit.py +479 -0
- tensorcircuit/templates/__init__.py +2 -0
- tensorcircuit/templates/blocks.py +2 -2
- tensorcircuit/templates/hamiltonians.py +174 -0
- tensorcircuit/templates/lattice.py +1789 -0
- tensorcircuit/timeevol.py +896 -0
- tensorcircuit/translation.py +10 -3
- tensorcircuit/utils.py +7 -0
- {tensorcircuit_nightly-1.0.2.dev20250108.dist-info → tensorcircuit_nightly-1.4.0.dev20251103.dist-info}/METADATA +73 -23
- tensorcircuit_nightly-1.4.0.dev20251103.dist-info/RECORD +96 -0
- {tensorcircuit_nightly-1.0.2.dev20250108.dist-info → tensorcircuit_nightly-1.4.0.dev20251103.dist-info}/WHEEL +1 -1
- {tensorcircuit_nightly-1.0.2.dev20250108.dist-info → tensorcircuit_nightly-1.4.0.dev20251103.dist-info}/top_level.txt +0 -1
- tensorcircuit_nightly-1.0.2.dev20250108.dist-info/RECORD +0 -115
- tests/__init__.py +0 -0
- tests/conftest.py +0 -67
- tests/test_backends.py +0 -1031
- tests/test_calibrating.py +0 -149
- tests/test_channels.py +0 -365
- tests/test_circuit.py +0 -1699
- tests/test_cloud.py +0 -219
- tests/test_compiler.py +0 -147
- tests/test_dmcircuit.py +0 -555
- tests/test_ensemble.py +0 -72
- tests/test_fgs.py +0 -310
- tests/test_gates.py +0 -156
- tests/test_interfaces.py +0 -429
- tests/test_keras.py +0 -160
- tests/test_miscs.py +0 -277
- tests/test_mpscircuit.py +0 -341
- tests/test_noisemodel.py +0 -156
- tests/test_qaoa.py +0 -86
- tests/test_qem.py +0 -152
- tests/test_quantum.py +0 -526
- tests/test_quantum_attr.py +0 -42
- tests/test_results.py +0 -347
- tests/test_shadows.py +0 -160
- tests/test_simplify.py +0 -46
- tests/test_templates.py +0 -218
- tests/test_torchnn.py +0 -99
- tests/test_van.py +0 -102
- {tensorcircuit_nightly-1.0.2.dev20250108.dist-info → tensorcircuit_nightly-1.4.0.dev20251103.dist-info/licenses}/LICENSE +0 -0
tests/test_results.py
DELETED
|
@@ -1,347 +0,0 @@
|
|
|
1
|
-
import pytest
|
|
2
|
-
import numpy as np
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
import tensorcircuit as tc
|
|
6
|
-
from tensorcircuit.results import counts
|
|
7
|
-
from tensorcircuit.results.readout_mitigation import ReadoutMit
|
|
8
|
-
|
|
9
|
-
d = {"000": 2, "101": 3, "100": 4}
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
def test_marginal_count():
|
|
13
|
-
assert counts.marginal_count(d, [1, 2])["00"] == 6
|
|
14
|
-
assert counts.marginal_count(d, [1])["0"] == 9
|
|
15
|
-
assert counts.marginal_count(d, [2, 1, 0])["001"] == 4
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
def test_count2vec():
|
|
19
|
-
assert counts.vec2count(counts.count2vec(d, normalization=False), prune=True) == d
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def test_kl():
|
|
23
|
-
a = {"00": 512, "11": 512}
|
|
24
|
-
assert counts.kl_divergence(a, a) == 0
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
def test_expectation():
|
|
28
|
-
assert counts.expectation(d, [0, 1]) == -5 / 9
|
|
29
|
-
assert counts.expectation(d, None, [[1, -1], [1, 0], [1, 1]]) == -5 / 9
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
def test_plot_histogram():
|
|
33
|
-
d = {"00": 10, "01": 2, "11": 8}
|
|
34
|
-
d1 = {"00": 11, "11": 9}
|
|
35
|
-
print(counts.plot_histogram([d, d1]))
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
def test_readout():
|
|
39
|
-
nqubit = 4
|
|
40
|
-
shots = 4096
|
|
41
|
-
c = tc.Circuit(nqubit)
|
|
42
|
-
c.H(0)
|
|
43
|
-
c.cnot(0, 1)
|
|
44
|
-
c.x(3)
|
|
45
|
-
|
|
46
|
-
idea_count = c.sample(batch=shots, allow_state=True, format="count_dict_bin")
|
|
47
|
-
raw_count = run([c], shots)[0]
|
|
48
|
-
|
|
49
|
-
# test "inverse", "constrained_least_square", "M3"
|
|
50
|
-
mit = ReadoutMit(execute=run)
|
|
51
|
-
mit.cals_from_system([0, 1, 2, 3, 6], shots=10000, method="local")
|
|
52
|
-
|
|
53
|
-
mit_count1 = mit.apply_correction(
|
|
54
|
-
raw_count, [1, 3, 2], method="inverse"
|
|
55
|
-
) # direct(Max2),iterative(Max3), inverse,square
|
|
56
|
-
mit_count2 = mit.apply_correction(
|
|
57
|
-
raw_count, [1, 3, 2], method="constrained_least_square"
|
|
58
|
-
)
|
|
59
|
-
idea_count2 = counts.marginal_count(idea_count, [1, 3, 2])
|
|
60
|
-
|
|
61
|
-
assert counts.kl_divergence(idea_count2, mit_count1) < 0.05
|
|
62
|
-
assert counts.kl_divergence(idea_count2, mit_count2) < 0.05
|
|
63
|
-
|
|
64
|
-
# test "global" and "equal"
|
|
65
|
-
mit = ReadoutMit(execute=run)
|
|
66
|
-
mit.cals_from_system([0, 1, 2, 3], shots=100000, method="global")
|
|
67
|
-
A_global = mit.get_matrix([1, 3, 2])
|
|
68
|
-
mit_countg = mit.apply_correction(
|
|
69
|
-
raw_count, [1, 3, 2], method="constrained_least_square"
|
|
70
|
-
)
|
|
71
|
-
|
|
72
|
-
mit = ReadoutMit(execute=run)
|
|
73
|
-
mit.cals_from_system([0, 1, 2, 3], shots=100000, method="local")
|
|
74
|
-
A_local = mit.get_matrix([1, 3, 2])
|
|
75
|
-
mit_countl = mit.apply_correction(
|
|
76
|
-
raw_count, [1, 3, 2], method="constrained_least_square"
|
|
77
|
-
)
|
|
78
|
-
|
|
79
|
-
np.testing.assert_allclose(A_global, A_local, atol=1e-2)
|
|
80
|
-
assert counts.kl_divergence(mit_countg, mit_countl) < 0.05
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
def test_readout_masks():
|
|
84
|
-
mit = ReadoutMit(execute=run)
|
|
85
|
-
mit.cals_from_system(
|
|
86
|
-
[1, 2, 4], shots=8192, method="local", masks=["01010", "10101", "11111"]
|
|
87
|
-
)
|
|
88
|
-
np.testing.assert_allclose(
|
|
89
|
-
mit.single_qubit_cals[1][0, 0], 0.02 * np.sin(2) + 0.978, atol=1e-2
|
|
90
|
-
)
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
def test_readout_expv():
|
|
94
|
-
nqubit = 4
|
|
95
|
-
c = tc.Circuit(nqubit)
|
|
96
|
-
c.H(0)
|
|
97
|
-
c.cnot(0, 1)
|
|
98
|
-
c.x(3)
|
|
99
|
-
|
|
100
|
-
idea_count = c.sample(batch=100000, allow_state=True, format="count_dict_bin")
|
|
101
|
-
raw_count = run([c], 100000)[0]
|
|
102
|
-
|
|
103
|
-
cal_qubits = [0, 1, 2, 3]
|
|
104
|
-
use_qubits = [0, 1]
|
|
105
|
-
|
|
106
|
-
# idea_value = c.expectation_ps(z=[0,1])
|
|
107
|
-
idea_count2 = counts.marginal_count(idea_count, use_qubits)
|
|
108
|
-
idea_value = counts.expectation(idea_count2, z=[0, 1])
|
|
109
|
-
|
|
110
|
-
mit = ReadoutMit(execute=run)
|
|
111
|
-
mit.cals_from_system(cal_qubits, shots=100000, method="local")
|
|
112
|
-
mit_count = mit.apply_correction(raw_count, use_qubits, method="inverse")
|
|
113
|
-
mit_value = counts.expectation(mit_count, z=[0, 1])
|
|
114
|
-
|
|
115
|
-
mit = ReadoutMit(execute=run)
|
|
116
|
-
mit.cals_from_system(cal_qubits, shots=100000, method="local")
|
|
117
|
-
mit_value1 = mit.expectation(raw_count, z=[0, 1], method="inverse")
|
|
118
|
-
|
|
119
|
-
mit = ReadoutMit(execute=run)
|
|
120
|
-
mit.cals_from_system(cal_qubits, shots=100000, method="global")
|
|
121
|
-
mit_value2 = mit.expectation(raw_count, z=[0, 1], method="square")
|
|
122
|
-
|
|
123
|
-
np.testing.assert_allclose(idea_value, mit_value, atol=1e-2)
|
|
124
|
-
np.testing.assert_allclose(idea_value, mit_value1, atol=1e-2)
|
|
125
|
-
np.testing.assert_allclose(idea_value, mit_value2, atol=1e-2)
|
|
126
|
-
|
|
127
|
-
# test large size
|
|
128
|
-
nqubit = 20
|
|
129
|
-
c = tc.Circuit(nqubit)
|
|
130
|
-
c.H(0)
|
|
131
|
-
for i in range(nqubit - 1):
|
|
132
|
-
c.cnot(i, i + 1)
|
|
133
|
-
c.rx(1, theta=0.9)
|
|
134
|
-
|
|
135
|
-
idea_count = c.sample(batch=100000, allow_state=True, format="count_dict_bin")
|
|
136
|
-
raw_count = run([c], 100000)[0]
|
|
137
|
-
|
|
138
|
-
cal_qubits = list(range(nqubit))
|
|
139
|
-
use_qubits = list(range(nqubit))
|
|
140
|
-
|
|
141
|
-
# idea_value = c.expectation_ps(z=[0,1])
|
|
142
|
-
idea_count2 = counts.marginal_count(idea_count, use_qubits)
|
|
143
|
-
idea_value = counts.expectation(idea_count2, z=list(range(nqubit)))
|
|
144
|
-
|
|
145
|
-
mit = ReadoutMit(execute=run)
|
|
146
|
-
mit.cals_from_system(cal_qubits, shots=100000, method="local")
|
|
147
|
-
mit_value1 = mit.expectation(raw_count, z=list(range(nqubit)), method="inverse")
|
|
148
|
-
|
|
149
|
-
np.testing.assert_allclose(idea_value, mit_value1, atol=1e-1)
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
def test_M3():
|
|
153
|
-
try:
|
|
154
|
-
import mthree # pylint: disable=unused-import
|
|
155
|
-
except ImportError:
|
|
156
|
-
pytest.skip("****** No mthree, skipping test suit *******")
|
|
157
|
-
|
|
158
|
-
nqubit = 20
|
|
159
|
-
c = tc.Circuit(nqubit)
|
|
160
|
-
c.H(0)
|
|
161
|
-
for i in range(nqubit - 1):
|
|
162
|
-
c.cnot(i, i + 1)
|
|
163
|
-
c.rx(1, theta=0.9)
|
|
164
|
-
|
|
165
|
-
idea_count = c.sample(batch=100000, allow_state=True, format="count_dict_bin")
|
|
166
|
-
raw_count = run([c], 100000)[0]
|
|
167
|
-
|
|
168
|
-
cal_qubits = list(range(nqubit))
|
|
169
|
-
use_qubits = list(range(nqubit))
|
|
170
|
-
|
|
171
|
-
idea_count2 = counts.marginal_count(idea_count, use_qubits)
|
|
172
|
-
idea_value = counts.expectation(idea_count2, z=list(range(nqubit)))
|
|
173
|
-
|
|
174
|
-
mit = ReadoutMit(execute=run)
|
|
175
|
-
mit.cals_from_system(cal_qubits, shots=100000, method="local")
|
|
176
|
-
mit_count = mit.apply_correction(raw_count, use_qubits, method="M3_auto")
|
|
177
|
-
mit_value = counts.expectation(mit_count, z=list(range(nqubit)))
|
|
178
|
-
np.testing.assert_allclose(idea_value, mit_value, atol=1e-1)
|
|
179
|
-
|
|
180
|
-
nqubit = 4
|
|
181
|
-
shots = 4096
|
|
182
|
-
c = tc.Circuit(nqubit)
|
|
183
|
-
c.H(0)
|
|
184
|
-
c.cnot(0, 1)
|
|
185
|
-
c.x(3)
|
|
186
|
-
|
|
187
|
-
idea_count = c.sample(batch=shots, allow_state=True, format="count_dict_bin")
|
|
188
|
-
raw_count = run([c], shots)[0]
|
|
189
|
-
|
|
190
|
-
mit_count3 = mit.apply_correction(raw_count, [1, 3, 2], method="M3_direct")
|
|
191
|
-
mit_count4 = mit.apply_correction(raw_count, [1, 3, 2], method="M3_iterative")
|
|
192
|
-
idea_count2 = counts.marginal_count(idea_count, [1, 3, 2])
|
|
193
|
-
assert counts.kl_divergence(idea_count2, mit_count3) < 0.05
|
|
194
|
-
assert counts.kl_divergence(idea_count2, mit_count4) < 0.05
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
def partial_sample(c, batch, readout_error=None):
|
|
198
|
-
measure_index = []
|
|
199
|
-
for inst in c._extra_qir:
|
|
200
|
-
if inst["name"] == "measure":
|
|
201
|
-
measure_index.append(inst["index"][0])
|
|
202
|
-
if len(measure_index) == 0:
|
|
203
|
-
measure_index = list(range(c._nqubits))
|
|
204
|
-
|
|
205
|
-
ct = c.sample(
|
|
206
|
-
allow_state=True,
|
|
207
|
-
batch=batch,
|
|
208
|
-
readout_error=readout_error,
|
|
209
|
-
format="count_dict_bin",
|
|
210
|
-
)
|
|
211
|
-
return tc.results.counts.marginal_count(ct, measure_index)
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
def run(cs, shots):
|
|
215
|
-
# customized backend for mitigation test
|
|
216
|
-
ts = []
|
|
217
|
-
for c in cs:
|
|
218
|
-
count = simulator(c, shots)
|
|
219
|
-
ts.append(count)
|
|
220
|
-
return ts
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
def simulator(c, shots, logical_physical_mapping=None):
|
|
224
|
-
# with readout_error noise
|
|
225
|
-
nqubit = c._nqubits
|
|
226
|
-
if logical_physical_mapping is None:
|
|
227
|
-
logical_physical_mapping = {i: i for i in range(nqubit)}
|
|
228
|
-
|
|
229
|
-
gg = []
|
|
230
|
-
for i in range(200):
|
|
231
|
-
gg.append(np.sin(i) * 0.02 + 0.978)
|
|
232
|
-
# gg.append(0.98 - i * 0.01)
|
|
233
|
-
readout_error = np.reshape(gg[0 : nqubit * 2], (nqubit, 2))
|
|
234
|
-
mapped_readout_error = [[1, 1]] * nqubit
|
|
235
|
-
for lq, phyq in logical_physical_mapping.items():
|
|
236
|
-
mapped_readout_error[lq] = readout_error[phyq]
|
|
237
|
-
return partial_sample(c, shots, mapped_readout_error)
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
def test_mapping():
|
|
241
|
-
nqubit = 15
|
|
242
|
-
shots = 100000
|
|
243
|
-
c = tc.Circuit(nqubit)
|
|
244
|
-
c.H(4)
|
|
245
|
-
c.cnot(4, 5)
|
|
246
|
-
c.cnot(5, 6)
|
|
247
|
-
c.cnot(6, 7)
|
|
248
|
-
c.rx(4, theta=0.8)
|
|
249
|
-
c.rx(7, theta=1.8)
|
|
250
|
-
c.measure_instruction(4)
|
|
251
|
-
c.measure_instruction(5)
|
|
252
|
-
c.measure_instruction(6)
|
|
253
|
-
c.measure_instruction(7)
|
|
254
|
-
|
|
255
|
-
mit = ReadoutMit(execute=run)
|
|
256
|
-
mit.cals_from_system(list(range(15)), shots=100000, method="local")
|
|
257
|
-
|
|
258
|
-
show_qubits = [6, 7, 5]
|
|
259
|
-
|
|
260
|
-
idea_count = c.sample(batch=shots, allow_state=True, format="count_dict_bin")
|
|
261
|
-
idea_count1 = counts.marginal_count(idea_count, show_qubits)
|
|
262
|
-
|
|
263
|
-
def miti_kl_mean(logical_physical_mapping):
|
|
264
|
-
ls = []
|
|
265
|
-
for _ in range(10):
|
|
266
|
-
raw_count = simulator(c, shots, logical_physical_mapping)
|
|
267
|
-
mit_count1 = mit.apply_correction(
|
|
268
|
-
raw_count,
|
|
269
|
-
qubits=show_qubits,
|
|
270
|
-
positional_logical_mapping={1: 5, 0: 4, 2: 6, 3: 7},
|
|
271
|
-
logical_physical_mapping=logical_physical_mapping,
|
|
272
|
-
method="square",
|
|
273
|
-
)
|
|
274
|
-
ls.append(counts.kl_divergence(idea_count1, mit_count1))
|
|
275
|
-
# print("std", np.std(listtt), np.mean(listtt)) # smaller error rate and larger shots, better mititation.
|
|
276
|
-
np.testing.assert_allclose(np.mean(ls), 0.01, atol=1e-2)
|
|
277
|
-
|
|
278
|
-
logical_physical_mapping = {4: 0, 6: 2, 7: 3, 5: 1}
|
|
279
|
-
miti_kl_mean(logical_physical_mapping)
|
|
280
|
-
|
|
281
|
-
logical_physical_mapping = {4: 4, 5: 5, 6: 6, 7: 7}
|
|
282
|
-
miti_kl_mean(logical_physical_mapping)
|
|
283
|
-
|
|
284
|
-
logical_physical_mapping = {4: 8, 5: 9, 6: 10, 7: 11}
|
|
285
|
-
miti_kl_mean(logical_physical_mapping)
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
def test_readout_expv_map():
|
|
289
|
-
shots = 100000
|
|
290
|
-
nqubit = 7
|
|
291
|
-
c = tc.Circuit(nqubit)
|
|
292
|
-
c.H(3)
|
|
293
|
-
c.cnot(3, 4)
|
|
294
|
-
c.cnot(4, 5)
|
|
295
|
-
c.rx(3, theta=0.8)
|
|
296
|
-
c.rx(4, theta=1.2)
|
|
297
|
-
c.measure_instruction(3)
|
|
298
|
-
c.measure_instruction(4)
|
|
299
|
-
c.measure_instruction(5)
|
|
300
|
-
|
|
301
|
-
idea_count = c.sample(batch=100000, allow_state=True, format="count_dict_bin")
|
|
302
|
-
idea_value = counts.expectation(idea_count, z=[4, 5])
|
|
303
|
-
|
|
304
|
-
# logical_physical_mapping = {3: 3, 4: 4, 5: 5}
|
|
305
|
-
logical_physical_mapping = {3: 1, 5: 3, 4: 6}
|
|
306
|
-
positional_logical_mapping = {1: 4, 0: 3, 2: 5}
|
|
307
|
-
|
|
308
|
-
raw_count = simulator(c, shots, logical_physical_mapping)
|
|
309
|
-
|
|
310
|
-
cal_qubits = list(range(nqubit))
|
|
311
|
-
|
|
312
|
-
mit = ReadoutMit(execute=run)
|
|
313
|
-
mit.cals_from_system(cal_qubits, shots=100000, method="local")
|
|
314
|
-
mit_value1 = mit.expectation(
|
|
315
|
-
raw_count,
|
|
316
|
-
z=[4, 5],
|
|
317
|
-
positional_logical_mapping=positional_logical_mapping,
|
|
318
|
-
logical_physical_mapping=logical_physical_mapping,
|
|
319
|
-
method="inverse",
|
|
320
|
-
)
|
|
321
|
-
|
|
322
|
-
mit_count = mit.apply_correction(
|
|
323
|
-
raw_count,
|
|
324
|
-
qubits=[3, 4, 5],
|
|
325
|
-
positional_logical_mapping=positional_logical_mapping,
|
|
326
|
-
logical_physical_mapping=logical_physical_mapping,
|
|
327
|
-
method="inverse",
|
|
328
|
-
)
|
|
329
|
-
mit_value = counts.expectation(mit_count, z=[1, 2])
|
|
330
|
-
# print("idea", idea_value)
|
|
331
|
-
# print("mit", mit_value)
|
|
332
|
-
|
|
333
|
-
mit = ReadoutMit(execute=run)
|
|
334
|
-
mit.cals_from_system(cal_qubits, shots=100000, method="global")
|
|
335
|
-
mit_value2 = mit.expectation(
|
|
336
|
-
raw_count,
|
|
337
|
-
z=[4, 5],
|
|
338
|
-
positional_logical_mapping=positional_logical_mapping,
|
|
339
|
-
logical_physical_mapping=logical_physical_mapping,
|
|
340
|
-
method="inverse",
|
|
341
|
-
)
|
|
342
|
-
|
|
343
|
-
# print("mit1", mit_value1)
|
|
344
|
-
# print("mit2", mit_value2)
|
|
345
|
-
np.testing.assert_allclose(idea_value, mit_value, atol=3 * 1e-2)
|
|
346
|
-
np.testing.assert_allclose(idea_value, mit_value1, atol=3 * 1e-2)
|
|
347
|
-
np.testing.assert_allclose(idea_value, mit_value2, atol=3 * 1e-2)
|
tests/test_shadows.py
DELETED
|
@@ -1,160 +0,0 @@
|
|
|
1
|
-
import pytest
|
|
2
|
-
from pytest_lazyfixture import lazy_fixture as lf
|
|
3
|
-
import numpy as np
|
|
4
|
-
import tensorcircuit as tc
|
|
5
|
-
from tensorcircuit.shadows import (
|
|
6
|
-
shadow_bound,
|
|
7
|
-
shadow_snapshots,
|
|
8
|
-
global_shadow_state,
|
|
9
|
-
entropy_shadow,
|
|
10
|
-
renyi_entropy_2,
|
|
11
|
-
expectation_ps_shadow,
|
|
12
|
-
global_shadow_state1,
|
|
13
|
-
)
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
@pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
|
|
17
|
-
def test_jit(backend):
|
|
18
|
-
nq, repeat = 8, 5
|
|
19
|
-
ps = [1, 0, 0, 0, 2, 0, 0, 0]
|
|
20
|
-
sub = (1, 3, 6, 7)
|
|
21
|
-
error = 0.1
|
|
22
|
-
ns, k = shadow_bound(ps, error)
|
|
23
|
-
ns //= repeat
|
|
24
|
-
|
|
25
|
-
thetas = 2 * np.random.rand(2, nq) - 1
|
|
26
|
-
|
|
27
|
-
c = tc.Circuit(nq)
|
|
28
|
-
for i in range(nq):
|
|
29
|
-
c.H(i)
|
|
30
|
-
for i in range(2):
|
|
31
|
-
for j in range(nq):
|
|
32
|
-
c.cnot(j, (j + 1) % nq)
|
|
33
|
-
for j in range(nq):
|
|
34
|
-
c.rz(j, theta=thetas[i, j] * np.pi)
|
|
35
|
-
|
|
36
|
-
psi = c.state()
|
|
37
|
-
pauli_strings = tc.backend.convert_to_tensor(np.random.randint(1, 4, size=(ns, nq)))
|
|
38
|
-
status = tc.backend.convert_to_tensor(np.random.rand(ns, repeat))
|
|
39
|
-
|
|
40
|
-
def classical_shadow(psi, pauli_strings, status):
|
|
41
|
-
lss_states = shadow_snapshots(psi, pauli_strings, status)
|
|
42
|
-
expc = expectation_ps_shadow(lss_states, ps=ps, k=k)
|
|
43
|
-
ent = entropy_shadow(lss_states, sub=sub, alpha=2)
|
|
44
|
-
return expc, ent
|
|
45
|
-
|
|
46
|
-
csjit = tc.backend.jit(classical_shadow)
|
|
47
|
-
|
|
48
|
-
exact_expc = c.expectation_ps(ps=ps)
|
|
49
|
-
exact_rdm = tc.quantum.reduced_density_matrix(psi, cut=[0, 2, 4, 5])
|
|
50
|
-
exact_ent = tc.quantum.renyi_entropy(exact_rdm, k=2)
|
|
51
|
-
expc, ent = csjit(psi, pauli_strings, status)
|
|
52
|
-
expc = np.median(expc)
|
|
53
|
-
|
|
54
|
-
np.testing.assert_allclose(expc, exact_expc, atol=error)
|
|
55
|
-
np.testing.assert_allclose(ent, exact_ent, atol=5 * error)
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
@pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
|
|
59
|
-
def test_state(backend):
|
|
60
|
-
nq, ns = 2, 10000
|
|
61
|
-
|
|
62
|
-
c = tc.Circuit(nq)
|
|
63
|
-
c.H(0)
|
|
64
|
-
c.cnot(0, 1)
|
|
65
|
-
|
|
66
|
-
psi = c.state()
|
|
67
|
-
bell_state = psi[:, None] @ psi[None, :]
|
|
68
|
-
|
|
69
|
-
pauli_strings = tc.backend.convert_to_tensor(np.random.randint(1, 4, size=(ns, nq)))
|
|
70
|
-
status = tc.backend.convert_to_tensor(np.random.rand(ns, 5))
|
|
71
|
-
lss_states = shadow_snapshots(c.state(), pauli_strings, status)
|
|
72
|
-
sdw_state = global_shadow_state(lss_states)
|
|
73
|
-
sdw_state1 = global_shadow_state1(lss_states)
|
|
74
|
-
|
|
75
|
-
np.testing.assert_allclose(sdw_state, bell_state, atol=0.1)
|
|
76
|
-
np.testing.assert_allclose(sdw_state1, bell_state, atol=0.1)
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
@pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
|
|
80
|
-
def test_ent(backend):
|
|
81
|
-
nq, ns, repeat = 6, 1000, 500
|
|
82
|
-
|
|
83
|
-
thetas = 2 * np.random.rand(2, nq) - 1
|
|
84
|
-
|
|
85
|
-
c = tc.Circuit(nq)
|
|
86
|
-
for i in range(nq):
|
|
87
|
-
c.H(i)
|
|
88
|
-
for i in range(2):
|
|
89
|
-
for j in range(nq):
|
|
90
|
-
c.cnot(j, (j + 1) % nq)
|
|
91
|
-
for j in range(nq):
|
|
92
|
-
c.rz(j, theta=thetas[i, j] * np.pi)
|
|
93
|
-
|
|
94
|
-
sub = [1, 4]
|
|
95
|
-
psi = c.state()
|
|
96
|
-
|
|
97
|
-
pauli_strings = tc.backend.convert_to_tensor(np.random.randint(1, 4, size=(ns, nq)))
|
|
98
|
-
status = tc.backend.convert_to_tensor(np.random.rand(ns, repeat))
|
|
99
|
-
snapshots = shadow_snapshots(psi, pauli_strings, status, measurement_only=True)
|
|
100
|
-
|
|
101
|
-
exact_rdm = tc.quantum.reduced_density_matrix(
|
|
102
|
-
psi, cut=[i for i in range(nq) if i not in sub]
|
|
103
|
-
)
|
|
104
|
-
exact_ent = tc.quantum.renyi_entropy(exact_rdm, k=2)
|
|
105
|
-
ent = entropy_shadow(snapshots, pauli_strings, sub, alpha=2)
|
|
106
|
-
ent2 = renyi_entropy_2(snapshots, sub)
|
|
107
|
-
|
|
108
|
-
np.testing.assert_allclose(ent, exact_ent, atol=0.1)
|
|
109
|
-
np.testing.assert_allclose(ent2, exact_ent, atol=0.1)
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
# @pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
|
|
113
|
-
# def test_expc(backend):
|
|
114
|
-
# import pennylane as qml
|
|
115
|
-
#
|
|
116
|
-
# nq, ns, repeat = 6, 2000, 1000
|
|
117
|
-
#
|
|
118
|
-
# thetas = 2 * np.random.rand(2, nq) - 1
|
|
119
|
-
#
|
|
120
|
-
# c = tc.Circuit(nq)
|
|
121
|
-
# for i in range(nq):
|
|
122
|
-
# c.H(i)
|
|
123
|
-
# for i in range(2):
|
|
124
|
-
# for j in range(nq):
|
|
125
|
-
# c.cnot(j, (j + 1) % nq)
|
|
126
|
-
# for j in range(nq):
|
|
127
|
-
# c.rz(j, theta=thetas[i, j] * np.pi)
|
|
128
|
-
#
|
|
129
|
-
# ps = [1, 0, 0, 0, 0, 3]
|
|
130
|
-
# sub = [1, 4]
|
|
131
|
-
# psi = c.state()
|
|
132
|
-
#
|
|
133
|
-
# pauli_strings = tc.backend.convert_to_tensor(np.random.randint(1, 4, size=(ns, nq)))
|
|
134
|
-
# status = tc.backend.convert_to_tensor(np.random.rand(ns, repeat))
|
|
135
|
-
# snapshots = shadow_snapshots(psi, pauli_strings, status, measurement_only=True)
|
|
136
|
-
#
|
|
137
|
-
# exact_expc = c.expectation_ps(ps=ps)
|
|
138
|
-
# exact_rdm = tc.quantum.reduced_density_matrix(
|
|
139
|
-
# psi, cut=[i for i in range(nq) if i not in sub]
|
|
140
|
-
# )
|
|
141
|
-
# exact_ent = tc.quantum.renyi_entropy(exact_rdm, k=2)
|
|
142
|
-
# print(exact_expc, exact_ent)
|
|
143
|
-
#
|
|
144
|
-
# expc = np.median(expection_ps_shadow(snapshots, pauli_strings, ps=ps, k=9))
|
|
145
|
-
# ent = entropy_shadow(snapshots, pauli_strings, sub, alpha=2)
|
|
146
|
-
# ent2 = renyi_entropy_2(snapshots, sub)
|
|
147
|
-
# print(expc, ent, ent2)
|
|
148
|
-
#
|
|
149
|
-
# pl_snapshots = np.asarray(snapshots).reshape(ns * repeat, nq)
|
|
150
|
-
# pl_ps = np.tile(np.asarray(pauli_strings - 1)[:, None, :], (1, repeat, 1)).reshape(
|
|
151
|
-
# ns * repeat, nq
|
|
152
|
-
# )
|
|
153
|
-
# shadow = qml.ClassicalShadow(pl_snapshots, pl_ps)
|
|
154
|
-
# H = qml.PauliX(0) @ qml.PauliZ(5)
|
|
155
|
-
# pl_expc = shadow.expval(H, k=9)
|
|
156
|
-
# pl_ent = shadow.entropy(sub, alpha=2)
|
|
157
|
-
# print(pl_expc, pl_ent)
|
|
158
|
-
#
|
|
159
|
-
# assert np.isclose(expc, pl_expc)
|
|
160
|
-
# assert np.isclose(ent, pl_ent)
|
tests/test_simplify.py
DELETED
|
@@ -1,46 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import sys
|
|
3
|
-
|
|
4
|
-
thisfile = os.path.abspath(__file__)
|
|
5
|
-
modulepath = os.path.dirname(os.path.dirname(thisfile))
|
|
6
|
-
|
|
7
|
-
sys.path.insert(0, modulepath)
|
|
8
|
-
import numpy as np
|
|
9
|
-
import tensornetwork as tn
|
|
10
|
-
from tensorcircuit import simplify
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
def test_infer_shape():
|
|
14
|
-
a = tn.Node(np.ones([2, 3, 5]))
|
|
15
|
-
b = tn.Node(np.ones([3, 5, 7]))
|
|
16
|
-
a[1] ^ b[0]
|
|
17
|
-
a[2] ^ b[1]
|
|
18
|
-
assert simplify.infer_new_shape(a, b) == ((2, 7), (2, 3, 5), (3, 5, 7))
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
def test_rank_simplify():
|
|
22
|
-
a = tn.Node(np.ones([2, 2]), name="a")
|
|
23
|
-
b = tn.Node(np.ones([2, 2]), name="b")
|
|
24
|
-
c = tn.Node(np.ones([2, 2, 2, 2]), name="c")
|
|
25
|
-
d = tn.Node(np.ones([2, 2, 2, 2, 2, 2]), name="d")
|
|
26
|
-
e = tn.Node(np.ones([2, 2]), name="e")
|
|
27
|
-
|
|
28
|
-
a[1] ^ c[0]
|
|
29
|
-
b[1] ^ c[1]
|
|
30
|
-
c[2] ^ d[0]
|
|
31
|
-
c[3] ^ d[1]
|
|
32
|
-
d[4] ^ e[0]
|
|
33
|
-
|
|
34
|
-
nodes = simplify._full_rank_simplify([a, b, c, d, e])
|
|
35
|
-
assert nodes[0].shape == tuple([2 for _ in range(6)])
|
|
36
|
-
assert len(nodes) == 1
|
|
37
|
-
|
|
38
|
-
f = tn.Node(np.ones([2, 2]), name="f")
|
|
39
|
-
g = tn.Node(np.ones([2, 2, 2, 2]), name="g")
|
|
40
|
-
h = tn.Node(np.ones([2, 2, 2, 2]), name="h")
|
|
41
|
-
|
|
42
|
-
f[1] ^ g[0]
|
|
43
|
-
g[2] ^ h[1]
|
|
44
|
-
|
|
45
|
-
nodes = simplify._full_rank_simplify([f, g, h])
|
|
46
|
-
assert len(nodes) == 2
|