tensorcircuit-nightly 1.3.0.dev20250809__py3-none-any.whl → 1.3.0.dev20250810__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tensorcircuit-nightly might be problematic. Click here for more details.

Files changed (37) hide show
  1. tensorcircuit/__init__.py +1 -1
  2. {tensorcircuit_nightly-1.3.0.dev20250809.dist-info → tensorcircuit_nightly-1.3.0.dev20250810.dist-info}/METADATA +1 -1
  3. {tensorcircuit_nightly-1.3.0.dev20250809.dist-info → tensorcircuit_nightly-1.3.0.dev20250810.dist-info}/RECORD +6 -37
  4. {tensorcircuit_nightly-1.3.0.dev20250809.dist-info → tensorcircuit_nightly-1.3.0.dev20250810.dist-info}/top_level.txt +0 -1
  5. tests/__init__.py +0 -0
  6. tests/conftest.py +0 -67
  7. tests/test_backends.py +0 -1156
  8. tests/test_calibrating.py +0 -149
  9. tests/test_channels.py +0 -409
  10. tests/test_circuit.py +0 -1713
  11. tests/test_cloud.py +0 -219
  12. tests/test_compiler.py +0 -147
  13. tests/test_dmcircuit.py +0 -555
  14. tests/test_ensemble.py +0 -72
  15. tests/test_fgs.py +0 -318
  16. tests/test_gates.py +0 -156
  17. tests/test_hamiltonians.py +0 -159
  18. tests/test_interfaces.py +0 -557
  19. tests/test_keras.py +0 -160
  20. tests/test_lattice.py +0 -1750
  21. tests/test_miscs.py +0 -304
  22. tests/test_mpscircuit.py +0 -341
  23. tests/test_noisemodel.py +0 -156
  24. tests/test_qaoa.py +0 -86
  25. tests/test_qem.py +0 -152
  26. tests/test_quantum.py +0 -549
  27. tests/test_quantum_attr.py +0 -42
  28. tests/test_results.py +0 -379
  29. tests/test_shadows.py +0 -160
  30. tests/test_simplify.py +0 -46
  31. tests/test_stabilizer.py +0 -226
  32. tests/test_templates.py +0 -218
  33. tests/test_timeevol.py +0 -641
  34. tests/test_torchnn.py +0 -99
  35. tests/test_van.py +0 -102
  36. {tensorcircuit_nightly-1.3.0.dev20250809.dist-info → tensorcircuit_nightly-1.3.0.dev20250810.dist-info}/WHEEL +0 -0
  37. {tensorcircuit_nightly-1.3.0.dev20250809.dist-info → tensorcircuit_nightly-1.3.0.dev20250810.dist-info}/licenses/LICENSE +0 -0
tests/test_torchnn.py DELETED
@@ -1,99 +0,0 @@
1
- import os
2
- import sys
3
- import numpy as np
4
- import pytest
5
- from pytest_lazyfixture import lazy_fixture as lf
6
-
7
- thisfile = os.path.abspath(__file__)
8
- modulepath = os.path.dirname(os.path.dirname(thisfile))
9
-
10
- sys.path.insert(0, modulepath)
11
-
12
- import tensorcircuit as tc
13
-
14
- try:
15
- import torch
16
- except ImportError:
17
- pytest.skip("torch is not installed", allow_module_level=True)
18
-
19
-
20
- @pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb"), lf("torchb")])
21
- def test_quantumnet(backend):
22
- n = 6
23
- nlayers = 2
24
-
25
- def qpred(x, weights):
26
- c = tc.Circuit(n)
27
- for i in range(n):
28
- c.rx(i, theta=x[i])
29
- for j in range(nlayers):
30
- for i in range(n - 1):
31
- c.cnot(i, i + 1)
32
- for i in range(n):
33
- c.rx(i, theta=weights[2 * j, i])
34
- c.ry(i, theta=weights[2 * j + 1, i])
35
- ypred = tc.backend.stack([c.expectation_ps(x=[i]) for i in range(n)])
36
- return tc.backend.real(ypred)
37
-
38
- if tc.backend.name == "pytorch":
39
- use_interface = False
40
- else:
41
- use_interface = True
42
-
43
- ql = tc.TorchLayer(
44
- qpred, weights_shape=[2 * nlayers, n], use_interface=use_interface
45
- )
46
-
47
- yp = ql(torch.ones([3, n]))
48
- print(yp)
49
-
50
- np.testing.assert_allclose(yp.shape, np.array([3, n]))
51
-
52
-
53
- @pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
54
- def test_inputs_multiple(backend):
55
- n = 3
56
- p = 0.1
57
- K = tc.backend
58
- torchb = tc.get_backend("pytorch")
59
-
60
- def f(state, noise, weights):
61
- c = tc.Circuit(n, inputs=state)
62
- for i in range(n):
63
- c.rz(i, theta=weights[i])
64
- for i in range(n):
65
- c.depolarizing(i, px=p, py=p, pz=p, status=noise[i])
66
- return K.real(c.expectation_ps(x=[0]))
67
-
68
- layer = tc.TorchLayer(f, [n], use_vmap=True, vectorized_argnums=[0, 1])
69
- state = torchb.ones([2, 2**n]) / 2 ** (n / 2)
70
- noise = 0.2 * torchb.ones([2, n], dtype="float32")
71
- l = layer(state, noise)
72
- lsum = torchb.sum(l)
73
- print(l)
74
- lsum.backward()
75
- for p in layer.parameters():
76
- print(p.grad)
77
-
78
-
79
- @pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb"), lf("torchb")])
80
- def test_torchnn_hardware(backend):
81
- n = 2
82
-
83
- def qf(inputs, param):
84
- inputs = tc.backend.convert_to_tensor(tc.get_backend("pytorch").numpy(inputs))
85
- param = tc.backend.convert_to_tensor(tc.get_backend("pytorch").numpy(param))
86
-
87
- c = tc.Circuit(n)
88
- c.rx(0, theta=inputs[0])
89
- c.rx(1, theta=inputs[1])
90
- c.h(1)
91
- c.rzz(0, 1, theta=param[0])
92
- r = tc.backend.stack([c.expectation_ps(z=[i]) for i in range(n)])
93
-
94
- r = tc.get_backend("pytorch").convert_to_tensor(tc.backend.numpy(r))
95
- return torch.real(r)
96
-
97
- ql = tc.torchnn.HardwareNet(qf, [1])
98
- qnet = torch.nn.Sequential(ql, torch.nn.Linear(2, 1))
99
- print(qnet(torch.ones([5, 2])))
tests/test_van.py DELETED
@@ -1,102 +0,0 @@
1
- import sys
2
- import os
3
- import itertools
4
-
5
- thisfile = os.path.abspath(__file__)
6
- modulepath = os.path.dirname(os.path.dirname(thisfile))
7
-
8
- sys.path.insert(0, modulepath)
9
-
10
- import numpy as np
11
- import tensorflow as tf
12
-
13
-
14
- from tensorcircuit.applications.van import MaskedLinear, ResidualBlock, MADE, PixelCNN
15
-
16
-
17
- def test_masklinear():
18
- ml = MaskedLinear(5, 10, 3, mask=tf.zeros([10, 3, 5, 3]))
19
- ml.set_weights([tf.ones([10, 3, 5, 3]), tf.ones([10, 3])])
20
- tf.debugging.assert_near(ml(tf.ones([1, 5, 3])), tf.ones([1, 10, 3]))
21
-
22
- mask = np.zeros([10, 3, 5, 3])
23
- mask[3, 2, 1, 0] = 1.0
24
- ml = MaskedLinear(5, 10, 3, mask=tf.constant(mask, dtype=tf.float32))
25
- ml.set_weights([tf.ones([10, 3, 5, 3]), tf.zeros([10, 3])])
26
- assert tf.reduce_sum(ml(tf.ones([5, 3]))[3, :]) == 1.0
27
-
28
- w = tf.random.uniform(shape=[10, 3, 5, 3])
29
- b = tf.random.uniform(shape=[10, 3])
30
- w_m = tf.reshape(w, [30, 15])
31
- b_m = tf.reshape(b, [30, 1])
32
- inputs = tf.ones([5, 3])
33
- inputs_m = tf.reshape(inputs, [15, 1])
34
- r_m = w_m @ inputs_m + b_m
35
- r = tf.reshape(r_m, [10, 3])
36
- ml = MaskedLinear(5, 10, 3)
37
- ml.set_weights([w, b])
38
- tf.debugging.assert_near(ml(inputs), r)
39
-
40
-
41
- def test_residual_block():
42
- dense1 = tf.keras.layers.Dense(10, use_bias=False)
43
- dense1.build([1, 1])
44
- dense1.set_weights([np.ones([1, 10])])
45
- dense2 = tf.keras.layers.Dense(1, use_bias=False)
46
- dense2.build([1, 10])
47
- dense2.set_weights([np.ones([10, 1])])
48
- m = ResidualBlock([dense1, dense2])
49
- assert m(tf.ones([1, 1])) == 11.0
50
-
51
-
52
- def test_made():
53
- import itertools
54
-
55
- m = MADE(2, 2, 6, 3, 3, nonmerge=False)
56
- l = []
57
- for i in itertools.product(*[list(range(3)) for _ in range(2)]):
58
- l.append(list(i))
59
- basis = tf.constant(l, dtype=tf.int32)
60
- print(basis)
61
- ptot = tf.reduce_sum(tf.exp(m.log_prob(tf.one_hot(basis, depth=3))))
62
- np.testing.assert_allclose(ptot.numpy(), 1.0, atol=1e-5)
63
-
64
- s, logp = m.sample(10)
65
- print(logp)
66
- assert s.shape == (10, 2, 3)
67
-
68
-
69
- def test_made_fit_peak():
70
- opt = tf.optimizers.Adam(learning_rate=0.01)
71
- m = MADE(5, 5, 4, 3, 2)
72
- for step in range(100):
73
- with tf.GradientTape() as t:
74
- loss = -tf.reduce_sum(
75
- m.log_prob(
76
- tf.one_hot(
77
- [[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 1, 1, 0]], depth=3
78
- )
79
- )
80
- )
81
- gr = t.gradient(loss, m.variables)
82
- if step % 20 == 0:
83
- print(
84
- tf.exp(
85
- m.log_prob(
86
- tf.one_hot(
87
- [[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 1, 1, 0]], depth=3
88
- )
89
- )
90
- ).numpy()
91
- )
92
- opt.apply_gradients(zip(gr, m.variables))
93
-
94
-
95
- def test_pixelcnn():
96
- m = PixelCNN(3, 5, 8)
97
- l = []
98
- for i in itertools.product(*[list(range(3)) for _ in range(4)]):
99
- l.append(list(i))
100
- basis = tf.constant(tf.reshape(l, [-1, 2, 2]), dtype=tf.int32)
101
- ptot = tf.reduce_sum(tf.exp(m.log_prob(tf.one_hot(basis, depth=3))))
102
- np.testing.assert_allclose(ptot.numpy(), 1.0, atol=1e-5)