tensorcircuit-nightly 1.3.0.dev20250809__py3-none-any.whl → 1.3.0.dev20250811__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tensorcircuit-nightly might be problematic. Click here for more details.

Files changed (37) hide show
  1. tensorcircuit/__init__.py +1 -1
  2. {tensorcircuit_nightly-1.3.0.dev20250809.dist-info → tensorcircuit_nightly-1.3.0.dev20250811.dist-info}/METADATA +1 -1
  3. {tensorcircuit_nightly-1.3.0.dev20250809.dist-info → tensorcircuit_nightly-1.3.0.dev20250811.dist-info}/RECORD +6 -37
  4. {tensorcircuit_nightly-1.3.0.dev20250809.dist-info → tensorcircuit_nightly-1.3.0.dev20250811.dist-info}/top_level.txt +0 -1
  5. tests/__init__.py +0 -0
  6. tests/conftest.py +0 -67
  7. tests/test_backends.py +0 -1156
  8. tests/test_calibrating.py +0 -149
  9. tests/test_channels.py +0 -409
  10. tests/test_circuit.py +0 -1713
  11. tests/test_cloud.py +0 -219
  12. tests/test_compiler.py +0 -147
  13. tests/test_dmcircuit.py +0 -555
  14. tests/test_ensemble.py +0 -72
  15. tests/test_fgs.py +0 -318
  16. tests/test_gates.py +0 -156
  17. tests/test_hamiltonians.py +0 -159
  18. tests/test_interfaces.py +0 -557
  19. tests/test_keras.py +0 -160
  20. tests/test_lattice.py +0 -1750
  21. tests/test_miscs.py +0 -304
  22. tests/test_mpscircuit.py +0 -341
  23. tests/test_noisemodel.py +0 -156
  24. tests/test_qaoa.py +0 -86
  25. tests/test_qem.py +0 -152
  26. tests/test_quantum.py +0 -549
  27. tests/test_quantum_attr.py +0 -42
  28. tests/test_results.py +0 -379
  29. tests/test_shadows.py +0 -160
  30. tests/test_simplify.py +0 -46
  31. tests/test_stabilizer.py +0 -226
  32. tests/test_templates.py +0 -218
  33. tests/test_timeevol.py +0 -641
  34. tests/test_torchnn.py +0 -99
  35. tests/test_van.py +0 -102
  36. {tensorcircuit_nightly-1.3.0.dev20250809.dist-info → tensorcircuit_nightly-1.3.0.dev20250811.dist-info}/WHEEL +0 -0
  37. {tensorcircuit_nightly-1.3.0.dev20250809.dist-info → tensorcircuit_nightly-1.3.0.dev20250811.dist-info}/licenses/LICENSE +0 -0
tests/test_results.py DELETED
@@ -1,379 +0,0 @@
1
- import pytest
2
- import numpy as np
3
-
4
-
5
- import tensorcircuit as tc
6
- from tensorcircuit.results import counts
7
- from tensorcircuit.results.readout_mitigation import ReadoutMit
8
-
9
- d = {"000": 2, "101": 3, "100": 4}
10
-
11
-
12
- def test_marginal_count():
13
- assert counts.marginal_count(d, [1, 2])["00"] == 6
14
- assert counts.marginal_count(d, [1])["0"] == 9
15
- assert counts.marginal_count(d, [2, 1, 0])["001"] == 4
16
-
17
-
18
- def test_merge_count():
19
- c1 = {"00": 10, "01": 20, "11": 30}
20
- c2 = {"00": 5, "10": 15, "11": 25}
21
- c3 = {"01": 10, "10": 20}
22
-
23
- # Test merging two count dicts
24
- merged = counts.merge_count(c1, c2)
25
- assert merged["00"] == 15
26
- assert merged["01"] == 20
27
- assert merged["10"] == 15
28
- assert merged["11"] == 55
29
-
30
- # Test merging three count dicts
31
- merged = counts.merge_count(c1, c2, c3)
32
- assert merged["00"] == 15
33
- assert merged["01"] == 30
34
- assert merged["10"] == 35
35
- assert merged["11"] == 55
36
-
37
- # Test merging single count dict
38
- merged = counts.merge_count(c1)
39
- assert merged == c1
40
-
41
- # Test merging empty dicts
42
- merged = counts.merge_count({}, {})
43
- assert merged == {}
44
-
45
- # Test merging empty with non-empty
46
- merged = counts.merge_count({}, c1)
47
- assert merged == c1
48
-
49
-
50
- def test_count2vec():
51
- assert counts.vec2count(counts.count2vec(d, normalization=False), prune=True) == d
52
-
53
-
54
- def test_kl():
55
- a = {"00": 512, "11": 512}
56
- assert counts.kl_divergence(a, a) == 0
57
-
58
-
59
- def test_expectation():
60
- assert counts.expectation(d, [0, 1]) == -5 / 9
61
- assert counts.expectation(d, None, [[1, -1], [1, 0], [1, 1]]) == -5 / 9
62
-
63
-
64
- def test_plot_histogram():
65
- d = {"00": 10, "01": 2, "11": 8}
66
- d1 = {"00": 11, "11": 9}
67
- print(counts.plot_histogram([d, d1]))
68
-
69
-
70
- def test_readout():
71
- nqubit = 4
72
- shots = 4096
73
- c = tc.Circuit(nqubit)
74
- c.H(0)
75
- c.cnot(0, 1)
76
- c.x(3)
77
-
78
- idea_count = c.sample(batch=shots, allow_state=True, format="count_dict_bin")
79
- raw_count = run([c], shots)[0]
80
-
81
- # test "inverse", "constrained_least_square", "M3"
82
- mit = ReadoutMit(execute=run)
83
- mit.cals_from_system([0, 1, 2, 3, 6], shots=10000, method="local")
84
-
85
- mit_count1 = mit.apply_correction(
86
- raw_count, [1, 3, 2], method="inverse"
87
- ) # direct(Max2),iterative(Max3), inverse,square
88
- mit_count2 = mit.apply_correction(
89
- raw_count, [1, 3, 2], method="constrained_least_square"
90
- )
91
- idea_count2 = counts.marginal_count(idea_count, [1, 3, 2])
92
-
93
- assert counts.kl_divergence(idea_count2, mit_count1) < 0.05
94
- assert counts.kl_divergence(idea_count2, mit_count2) < 0.05
95
-
96
- # test "global" and "equal"
97
- mit = ReadoutMit(execute=run)
98
- mit.cals_from_system([0, 1, 2, 3], shots=100000, method="global")
99
- A_global = mit.get_matrix([1, 3, 2])
100
- mit_countg = mit.apply_correction(
101
- raw_count, [1, 3, 2], method="constrained_least_square"
102
- )
103
-
104
- mit = ReadoutMit(execute=run)
105
- mit.cals_from_system([0, 1, 2, 3], shots=100000, method="local")
106
- A_local = mit.get_matrix([1, 3, 2])
107
- mit_countl = mit.apply_correction(
108
- raw_count, [1, 3, 2], method="constrained_least_square"
109
- )
110
-
111
- np.testing.assert_allclose(A_global, A_local, atol=1e-2)
112
- assert counts.kl_divergence(mit_countg, mit_countl) < 0.05
113
-
114
-
115
- def test_readout_masks():
116
- mit = ReadoutMit(execute=run)
117
- mit.cals_from_system(
118
- [1, 2, 4], shots=8192, method="local", masks=["01010", "10101", "11111"]
119
- )
120
- np.testing.assert_allclose(
121
- mit.single_qubit_cals[1][0, 0], 0.02 * np.sin(2) + 0.978, atol=1e-2
122
- )
123
-
124
-
125
- def test_readout_expv():
126
- nqubit = 4
127
- c = tc.Circuit(nqubit)
128
- c.H(0)
129
- c.cnot(0, 1)
130
- c.x(3)
131
-
132
- idea_count = c.sample(batch=100000, allow_state=True, format="count_dict_bin")
133
- raw_count = run([c], 100000)[0]
134
-
135
- cal_qubits = [0, 1, 2, 3]
136
- use_qubits = [0, 1]
137
-
138
- # idea_value = c.expectation_ps(z=[0,1])
139
- idea_count2 = counts.marginal_count(idea_count, use_qubits)
140
- idea_value = counts.expectation(idea_count2, z=[0, 1])
141
-
142
- mit = ReadoutMit(execute=run)
143
- mit.cals_from_system(cal_qubits, shots=100000, method="local")
144
- mit_count = mit.apply_correction(raw_count, use_qubits, method="inverse")
145
- mit_value = counts.expectation(mit_count, z=[0, 1])
146
-
147
- mit = ReadoutMit(execute=run)
148
- mit.cals_from_system(cal_qubits, shots=100000, method="local")
149
- mit_value1 = mit.expectation(raw_count, z=[0, 1], method="inverse")
150
-
151
- mit = ReadoutMit(execute=run)
152
- mit.cals_from_system(cal_qubits, shots=100000, method="global")
153
- mit_value2 = mit.expectation(raw_count, z=[0, 1], method="square")
154
-
155
- np.testing.assert_allclose(idea_value, mit_value, atol=1e-2)
156
- np.testing.assert_allclose(idea_value, mit_value1, atol=1e-2)
157
- np.testing.assert_allclose(idea_value, mit_value2, atol=1e-2)
158
-
159
- # test large size
160
- nqubit = 20
161
- c = tc.Circuit(nqubit)
162
- c.H(0)
163
- for i in range(nqubit - 1):
164
- c.cnot(i, i + 1)
165
- c.rx(1, theta=0.9)
166
-
167
- idea_count = c.sample(batch=100000, allow_state=True, format="count_dict_bin")
168
- raw_count = run([c], 100000)[0]
169
-
170
- cal_qubits = list(range(nqubit))
171
- use_qubits = list(range(nqubit))
172
-
173
- # idea_value = c.expectation_ps(z=[0,1])
174
- idea_count2 = counts.marginal_count(idea_count, use_qubits)
175
- idea_value = counts.expectation(idea_count2, z=list(range(nqubit)))
176
-
177
- mit = ReadoutMit(execute=run)
178
- mit.cals_from_system(cal_qubits, shots=100000, method="local")
179
- mit_value1 = mit.expectation(raw_count, z=list(range(nqubit)), method="inverse")
180
-
181
- np.testing.assert_allclose(idea_value, mit_value1, atol=1e-1)
182
-
183
-
184
- def test_M3():
185
- try:
186
- import mthree # pylint: disable=unused-import
187
- except ImportError:
188
- pytest.skip("****** No mthree, skipping test suit *******")
189
-
190
- nqubit = 20
191
- c = tc.Circuit(nqubit)
192
- c.H(0)
193
- for i in range(nqubit - 1):
194
- c.cnot(i, i + 1)
195
- c.rx(1, theta=0.9)
196
-
197
- idea_count = c.sample(batch=100000, allow_state=True, format="count_dict_bin")
198
- raw_count = run([c], 100000)[0]
199
-
200
- cal_qubits = list(range(nqubit))
201
- use_qubits = list(range(nqubit))
202
-
203
- idea_count2 = counts.marginal_count(idea_count, use_qubits)
204
- idea_value = counts.expectation(idea_count2, z=list(range(nqubit)))
205
-
206
- mit = ReadoutMit(execute=run)
207
- mit.cals_from_system(cal_qubits, shots=100000, method="local")
208
- mit_count = mit.apply_correction(raw_count, use_qubits, method="M3_auto")
209
- mit_value = counts.expectation(mit_count, z=list(range(nqubit)))
210
- np.testing.assert_allclose(idea_value, mit_value, atol=1e-1)
211
-
212
- nqubit = 4
213
- shots = 4096
214
- c = tc.Circuit(nqubit)
215
- c.H(0)
216
- c.cnot(0, 1)
217
- c.x(3)
218
-
219
- idea_count = c.sample(batch=shots, allow_state=True, format="count_dict_bin")
220
- raw_count = run([c], shots)[0]
221
-
222
- mit_count3 = mit.apply_correction(raw_count, [1, 3, 2], method="M3_direct")
223
- mit_count4 = mit.apply_correction(raw_count, [1, 3, 2], method="M3_iterative")
224
- idea_count2 = counts.marginal_count(idea_count, [1, 3, 2])
225
- assert counts.kl_divergence(idea_count2, mit_count3) < 0.05
226
- assert counts.kl_divergence(idea_count2, mit_count4) < 0.05
227
-
228
-
229
- def partial_sample(c, batch, readout_error=None):
230
- measure_index = []
231
- for inst in c._extra_qir:
232
- if inst["name"] == "measure":
233
- measure_index.append(inst["index"][0])
234
- if len(measure_index) == 0:
235
- measure_index = list(range(c._nqubits))
236
-
237
- ct = c.sample(
238
- allow_state=True,
239
- batch=batch,
240
- readout_error=readout_error,
241
- format="count_dict_bin",
242
- )
243
- return tc.results.counts.marginal_count(ct, measure_index)
244
-
245
-
246
- def run(cs, shots):
247
- # customized backend for mitigation test
248
- ts = []
249
- for c in cs:
250
- count = simulator(c, shots)
251
- ts.append(count)
252
- return ts
253
-
254
-
255
- def simulator(c, shots, logical_physical_mapping=None):
256
- # with readout_error noise
257
- nqubit = c._nqubits
258
- if logical_physical_mapping is None:
259
- logical_physical_mapping = {i: i for i in range(nqubit)}
260
-
261
- gg = []
262
- for i in range(200):
263
- gg.append(np.sin(i) * 0.02 + 0.978)
264
- # gg.append(0.98 - i * 0.01)
265
- readout_error = np.reshape(gg[0 : nqubit * 2], (nqubit, 2))
266
- mapped_readout_error = [[1, 1]] * nqubit
267
- for lq, phyq in logical_physical_mapping.items():
268
- mapped_readout_error[lq] = readout_error[phyq]
269
- return partial_sample(c, shots, mapped_readout_error)
270
-
271
-
272
- def test_mapping():
273
- nqubit = 15
274
- shots = 100000
275
- c = tc.Circuit(nqubit)
276
- c.H(4)
277
- c.cnot(4, 5)
278
- c.cnot(5, 6)
279
- c.cnot(6, 7)
280
- c.rx(4, theta=0.8)
281
- c.rx(7, theta=1.8)
282
- c.measure_instruction(4)
283
- c.measure_instruction(5)
284
- c.measure_instruction(6)
285
- c.measure_instruction(7)
286
-
287
- mit = ReadoutMit(execute=run)
288
- mit.cals_from_system(list(range(15)), shots=100000, method="local")
289
-
290
- show_qubits = [6, 7, 5]
291
-
292
- idea_count = c.sample(batch=shots, allow_state=True, format="count_dict_bin")
293
- idea_count1 = counts.marginal_count(idea_count, show_qubits)
294
-
295
- def miti_kl_mean(logical_physical_mapping):
296
- ls = []
297
- for _ in range(10):
298
- raw_count = simulator(c, shots, logical_physical_mapping)
299
- mit_count1 = mit.apply_correction(
300
- raw_count,
301
- qubits=show_qubits,
302
- positional_logical_mapping={1: 5, 0: 4, 2: 6, 3: 7},
303
- logical_physical_mapping=logical_physical_mapping,
304
- method="square",
305
- )
306
- ls.append(counts.kl_divergence(idea_count1, mit_count1))
307
- # print("std", np.std(listtt), np.mean(listtt)) # smaller error rate and larger shots, better mititation.
308
- np.testing.assert_allclose(np.mean(ls), 0.01, atol=1e-2)
309
-
310
- logical_physical_mapping = {4: 0, 6: 2, 7: 3, 5: 1}
311
- miti_kl_mean(logical_physical_mapping)
312
-
313
- logical_physical_mapping = {4: 4, 5: 5, 6: 6, 7: 7}
314
- miti_kl_mean(logical_physical_mapping)
315
-
316
- logical_physical_mapping = {4: 8, 5: 9, 6: 10, 7: 11}
317
- miti_kl_mean(logical_physical_mapping)
318
-
319
-
320
- def test_readout_expv_map():
321
- shots = 100000
322
- nqubit = 7
323
- c = tc.Circuit(nqubit)
324
- c.H(3)
325
- c.cnot(3, 4)
326
- c.cnot(4, 5)
327
- c.rx(3, theta=0.8)
328
- c.rx(4, theta=1.2)
329
- c.measure_instruction(3)
330
- c.measure_instruction(4)
331
- c.measure_instruction(5)
332
-
333
- idea_count = c.sample(batch=100000, allow_state=True, format="count_dict_bin")
334
- idea_value = counts.expectation(idea_count, z=[4, 5])
335
-
336
- # logical_physical_mapping = {3: 3, 4: 4, 5: 5}
337
- logical_physical_mapping = {3: 1, 5: 3, 4: 6}
338
- positional_logical_mapping = {1: 4, 0: 3, 2: 5}
339
-
340
- raw_count = simulator(c, shots, logical_physical_mapping)
341
-
342
- cal_qubits = list(range(nqubit))
343
-
344
- mit = ReadoutMit(execute=run)
345
- mit.cals_from_system(cal_qubits, shots=100000, method="local")
346
- mit_value1 = mit.expectation(
347
- raw_count,
348
- z=[4, 5],
349
- positional_logical_mapping=positional_logical_mapping,
350
- logical_physical_mapping=logical_physical_mapping,
351
- method="inverse",
352
- )
353
-
354
- mit_count = mit.apply_correction(
355
- raw_count,
356
- qubits=[3, 4, 5],
357
- positional_logical_mapping=positional_logical_mapping,
358
- logical_physical_mapping=logical_physical_mapping,
359
- method="inverse",
360
- )
361
- mit_value = counts.expectation(mit_count, z=[1, 2])
362
- # print("idea", idea_value)
363
- # print("mit", mit_value)
364
-
365
- mit = ReadoutMit(execute=run)
366
- mit.cals_from_system(cal_qubits, shots=100000, method="global")
367
- mit_value2 = mit.expectation(
368
- raw_count,
369
- z=[4, 5],
370
- positional_logical_mapping=positional_logical_mapping,
371
- logical_physical_mapping=logical_physical_mapping,
372
- method="inverse",
373
- )
374
-
375
- # print("mit1", mit_value1)
376
- # print("mit2", mit_value2)
377
- np.testing.assert_allclose(idea_value, mit_value, atol=3 * 1e-2)
378
- np.testing.assert_allclose(idea_value, mit_value1, atol=3 * 1e-2)
379
- np.testing.assert_allclose(idea_value, mit_value2, atol=3 * 1e-2)
tests/test_shadows.py DELETED
@@ -1,160 +0,0 @@
1
- import pytest
2
- from pytest_lazyfixture import lazy_fixture as lf
3
- import numpy as np
4
- import tensorcircuit as tc
5
- from tensorcircuit.shadows import (
6
- shadow_bound,
7
- shadow_snapshots,
8
- global_shadow_state,
9
- entropy_shadow,
10
- renyi_entropy_2,
11
- expectation_ps_shadow,
12
- global_shadow_state1,
13
- )
14
-
15
-
16
- @pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
17
- def test_jit(backend):
18
- nq, repeat = 8, 5
19
- ps = [1, 0, 0, 0, 2, 0, 0, 0]
20
- sub = (1, 3, 6, 7)
21
- error = 0.1
22
- ns, k = shadow_bound(ps, error)
23
- ns //= repeat
24
-
25
- thetas = 2 * np.random.rand(2, nq) - 1
26
-
27
- c = tc.Circuit(nq)
28
- for i in range(nq):
29
- c.H(i)
30
- for i in range(2):
31
- for j in range(nq):
32
- c.cnot(j, (j + 1) % nq)
33
- for j in range(nq):
34
- c.rz(j, theta=thetas[i, j] * np.pi)
35
-
36
- psi = c.state()
37
- pauli_strings = tc.backend.convert_to_tensor(np.random.randint(1, 4, size=(ns, nq)))
38
- status = tc.backend.convert_to_tensor(np.random.rand(ns, repeat))
39
-
40
- def classical_shadow(psi, pauli_strings, status):
41
- lss_states = shadow_snapshots(psi, pauli_strings, status)
42
- expc = expectation_ps_shadow(lss_states, ps=ps, k=k)
43
- ent = entropy_shadow(lss_states, sub=sub, alpha=2)
44
- return expc, ent
45
-
46
- csjit = tc.backend.jit(classical_shadow)
47
-
48
- exact_expc = c.expectation_ps(ps=ps)
49
- exact_rdm = tc.quantum.reduced_density_matrix(psi, cut=[0, 2, 4, 5])
50
- exact_ent = tc.quantum.renyi_entropy(exact_rdm, k=2)
51
- expc, ent = csjit(psi, pauli_strings, status)
52
- expc = np.median(expc)
53
-
54
- np.testing.assert_allclose(expc, exact_expc, atol=error)
55
- np.testing.assert_allclose(ent, exact_ent, atol=5 * error)
56
-
57
-
58
- @pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
59
- def test_state(backend):
60
- nq, ns = 2, 10000
61
-
62
- c = tc.Circuit(nq)
63
- c.H(0)
64
- c.cnot(0, 1)
65
-
66
- psi = c.state()
67
- bell_state = psi[:, None] @ psi[None, :]
68
-
69
- pauli_strings = tc.backend.convert_to_tensor(np.random.randint(1, 4, size=(ns, nq)))
70
- status = tc.backend.convert_to_tensor(np.random.rand(ns, 5))
71
- lss_states = shadow_snapshots(c.state(), pauli_strings, status)
72
- sdw_state = global_shadow_state(lss_states)
73
- sdw_state1 = global_shadow_state1(lss_states)
74
-
75
- np.testing.assert_allclose(sdw_state, bell_state, atol=0.1)
76
- np.testing.assert_allclose(sdw_state1, bell_state, atol=0.1)
77
-
78
-
79
- @pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
80
- def test_ent(backend):
81
- nq, ns, repeat = 6, 1000, 500
82
-
83
- thetas = 2 * np.random.rand(2, nq) - 1
84
-
85
- c = tc.Circuit(nq)
86
- for i in range(nq):
87
- c.H(i)
88
- for i in range(2):
89
- for j in range(nq):
90
- c.cnot(j, (j + 1) % nq)
91
- for j in range(nq):
92
- c.rz(j, theta=thetas[i, j] * np.pi)
93
-
94
- sub = [1, 4]
95
- psi = c.state()
96
-
97
- pauli_strings = tc.backend.convert_to_tensor(np.random.randint(1, 4, size=(ns, nq)))
98
- status = tc.backend.convert_to_tensor(np.random.rand(ns, repeat))
99
- snapshots = shadow_snapshots(psi, pauli_strings, status, measurement_only=True)
100
-
101
- exact_rdm = tc.quantum.reduced_density_matrix(
102
- psi, cut=[i for i in range(nq) if i not in sub]
103
- )
104
- exact_ent = tc.quantum.renyi_entropy(exact_rdm, k=2)
105
- ent = entropy_shadow(snapshots, pauli_strings, sub, alpha=2)
106
- ent2 = renyi_entropy_2(snapshots, sub)
107
-
108
- np.testing.assert_allclose(ent, exact_ent, atol=0.1)
109
- np.testing.assert_allclose(ent2, exact_ent, atol=0.1)
110
-
111
-
112
- # @pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
113
- # def test_expc(backend):
114
- # import pennylane as qml
115
- #
116
- # nq, ns, repeat = 6, 2000, 1000
117
- #
118
- # thetas = 2 * np.random.rand(2, nq) - 1
119
- #
120
- # c = tc.Circuit(nq)
121
- # for i in range(nq):
122
- # c.H(i)
123
- # for i in range(2):
124
- # for j in range(nq):
125
- # c.cnot(j, (j + 1) % nq)
126
- # for j in range(nq):
127
- # c.rz(j, theta=thetas[i, j] * np.pi)
128
- #
129
- # ps = [1, 0, 0, 0, 0, 3]
130
- # sub = [1, 4]
131
- # psi = c.state()
132
- #
133
- # pauli_strings = tc.backend.convert_to_tensor(np.random.randint(1, 4, size=(ns, nq)))
134
- # status = tc.backend.convert_to_tensor(np.random.rand(ns, repeat))
135
- # snapshots = shadow_snapshots(psi, pauli_strings, status, measurement_only=True)
136
- #
137
- # exact_expc = c.expectation_ps(ps=ps)
138
- # exact_rdm = tc.quantum.reduced_density_matrix(
139
- # psi, cut=[i for i in range(nq) if i not in sub]
140
- # )
141
- # exact_ent = tc.quantum.renyi_entropy(exact_rdm, k=2)
142
- # print(exact_expc, exact_ent)
143
- #
144
- # expc = np.median(expection_ps_shadow(snapshots, pauli_strings, ps=ps, k=9))
145
- # ent = entropy_shadow(snapshots, pauli_strings, sub, alpha=2)
146
- # ent2 = renyi_entropy_2(snapshots, sub)
147
- # print(expc, ent, ent2)
148
- #
149
- # pl_snapshots = np.asarray(snapshots).reshape(ns * repeat, nq)
150
- # pl_ps = np.tile(np.asarray(pauli_strings - 1)[:, None, :], (1, repeat, 1)).reshape(
151
- # ns * repeat, nq
152
- # )
153
- # shadow = qml.ClassicalShadow(pl_snapshots, pl_ps)
154
- # H = qml.PauliX(0) @ qml.PauliZ(5)
155
- # pl_expc = shadow.expval(H, k=9)
156
- # pl_ent = shadow.entropy(sub, alpha=2)
157
- # print(pl_expc, pl_ent)
158
- #
159
- # assert np.isclose(expc, pl_expc)
160
- # assert np.isclose(ent, pl_ent)
tests/test_simplify.py DELETED
@@ -1,46 +0,0 @@
1
- import os
2
- import sys
3
-
4
- thisfile = os.path.abspath(__file__)
5
- modulepath = os.path.dirname(os.path.dirname(thisfile))
6
-
7
- sys.path.insert(0, modulepath)
8
- import numpy as np
9
- import tensornetwork as tn
10
- from tensorcircuit import simplify
11
-
12
-
13
- def test_infer_shape():
14
- a = tn.Node(np.ones([2, 3, 5]))
15
- b = tn.Node(np.ones([3, 5, 7]))
16
- a[1] ^ b[0]
17
- a[2] ^ b[1]
18
- assert simplify.infer_new_shape(a, b) == ((2, 7), (2, 3, 5), (3, 5, 7))
19
-
20
-
21
- def test_rank_simplify():
22
- a = tn.Node(np.ones([2, 2]), name="a")
23
- b = tn.Node(np.ones([2, 2]), name="b")
24
- c = tn.Node(np.ones([2, 2, 2, 2]), name="c")
25
- d = tn.Node(np.ones([2, 2, 2, 2, 2, 2]), name="d")
26
- e = tn.Node(np.ones([2, 2]), name="e")
27
-
28
- a[1] ^ c[0]
29
- b[1] ^ c[1]
30
- c[2] ^ d[0]
31
- c[3] ^ d[1]
32
- d[4] ^ e[0]
33
-
34
- nodes = simplify._full_rank_simplify([a, b, c, d, e])
35
- assert nodes[0].shape == tuple([2 for _ in range(6)])
36
- assert len(nodes) == 1
37
-
38
- f = tn.Node(np.ones([2, 2]), name="f")
39
- g = tn.Node(np.ones([2, 2, 2, 2]), name="g")
40
- h = tn.Node(np.ones([2, 2, 2, 2]), name="h")
41
-
42
- f[1] ^ g[0]
43
- g[2] ^ h[1]
44
-
45
- nodes = simplify._full_rank_simplify([f, g, h])
46
- assert len(nodes) == 2