tensorcircuit-nightly 1.2.0.dev20250326__py3-none-any.whl → 1.4.0.dev20251128__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tensorcircuit-nightly might be problematic. Click here for more details.
- tensorcircuit/__init__.py +5 -1
- tensorcircuit/abstractcircuit.py +4 -0
- tensorcircuit/analogcircuit.py +413 -0
- tensorcircuit/applications/layers.py +1 -1
- tensorcircuit/applications/van.py +1 -1
- tensorcircuit/backends/abstract_backend.py +312 -5
- tensorcircuit/backends/cupy_backend.py +3 -1
- tensorcircuit/backends/jax_backend.py +100 -4
- tensorcircuit/backends/jax_ops.py +108 -0
- tensorcircuit/backends/numpy_backend.py +49 -3
- tensorcircuit/backends/pytorch_backend.py +92 -3
- tensorcircuit/backends/tensorflow_backend.py +102 -3
- tensorcircuit/basecircuit.py +157 -98
- tensorcircuit/circuit.py +115 -57
- tensorcircuit/cloud/local.py +1 -1
- tensorcircuit/cloud/quafu_provider.py +1 -1
- tensorcircuit/cloud/tencent.py +1 -1
- tensorcircuit/compiler/simple_compiler.py +2 -2
- tensorcircuit/cons.py +105 -23
- tensorcircuit/densitymatrix.py +16 -11
- tensorcircuit/experimental.py +733 -153
- tensorcircuit/fgs.py +254 -73
- tensorcircuit/gates.py +66 -22
- tensorcircuit/interfaces/jax.py +5 -3
- tensorcircuit/interfaces/tensortrans.py +6 -2
- tensorcircuit/interfaces/torch.py +14 -4
- tensorcircuit/keras.py +3 -3
- tensorcircuit/mpscircuit.py +154 -65
- tensorcircuit/quantum.py +698 -134
- tensorcircuit/quditcircuit.py +733 -0
- tensorcircuit/quditgates.py +618 -0
- tensorcircuit/results/counts.py +131 -18
- tensorcircuit/results/readout_mitigation.py +4 -1
- tensorcircuit/shadows.py +1 -1
- tensorcircuit/simplify.py +3 -1
- tensorcircuit/stabilizercircuit.py +29 -17
- tensorcircuit/templates/__init__.py +2 -0
- tensorcircuit/templates/blocks.py +2 -2
- tensorcircuit/templates/hamiltonians.py +174 -0
- tensorcircuit/templates/lattice.py +1789 -0
- tensorcircuit/timeevol.py +896 -0
- tensorcircuit/translation.py +10 -3
- tensorcircuit/utils.py +7 -0
- {tensorcircuit_nightly-1.2.0.dev20250326.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/METADATA +66 -29
- tensorcircuit_nightly-1.4.0.dev20251128.dist-info/RECORD +96 -0
- {tensorcircuit_nightly-1.2.0.dev20250326.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/WHEEL +1 -1
- {tensorcircuit_nightly-1.2.0.dev20250326.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/top_level.txt +0 -1
- tensorcircuit_nightly-1.2.0.dev20250326.dist-info/RECORD +0 -118
- tests/__init__.py +0 -0
- tests/conftest.py +0 -67
- tests/test_backends.py +0 -1035
- tests/test_calibrating.py +0 -149
- tests/test_channels.py +0 -409
- tests/test_circuit.py +0 -1699
- tests/test_cloud.py +0 -219
- tests/test_compiler.py +0 -147
- tests/test_dmcircuit.py +0 -555
- tests/test_ensemble.py +0 -72
- tests/test_fgs.py +0 -310
- tests/test_gates.py +0 -156
- tests/test_interfaces.py +0 -562
- tests/test_keras.py +0 -160
- tests/test_miscs.py +0 -282
- tests/test_mpscircuit.py +0 -341
- tests/test_noisemodel.py +0 -156
- tests/test_qaoa.py +0 -86
- tests/test_qem.py +0 -152
- tests/test_quantum.py +0 -549
- tests/test_quantum_attr.py +0 -42
- tests/test_results.py +0 -380
- tests/test_shadows.py +0 -160
- tests/test_simplify.py +0 -46
- tests/test_stabilizer.py +0 -217
- tests/test_templates.py +0 -218
- tests/test_torchnn.py +0 -99
- tests/test_van.py +0 -102
- {tensorcircuit_nightly-1.2.0.dev20250326.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/licenses/LICENSE +0 -0
tensorcircuit/gates.py
CHANGED
|
@@ -34,6 +34,12 @@ one_state = np.array([0.0, 1.0], dtype=npdtype)
|
|
|
34
34
|
plus_state = 1.0 / np.sqrt(2) * (zero_state + one_state)
|
|
35
35
|
minus_state = 1.0 / np.sqrt(2) * (zero_state - one_state)
|
|
36
36
|
|
|
37
|
+
# Common elements as np.ndarray objects
|
|
38
|
+
_i00 = np.array([[1.0, 0.0], [0.0, 0.0]])
|
|
39
|
+
_i01 = np.array([[0.0, 1.0], [0.0, 0.0]])
|
|
40
|
+
_i10 = np.array([[0.0, 0.0], [1.0, 0.0]])
|
|
41
|
+
_i11 = np.array([[0.0, 0.0], [0.0, 1.0]])
|
|
42
|
+
|
|
37
43
|
# Common single qubit gates as np.ndarray objects
|
|
38
44
|
_h_matrix = 1 / np.sqrt(2) * np.array([[1.0, 1.0], [1.0, -1.0]])
|
|
39
45
|
_i_matrix = np.array([[1.0, 0.0], [0.0, 1.0]])
|
|
@@ -229,7 +235,7 @@ def num_to_tensor(*num: Union[float, Tensor], dtype: Optional[str] = None) -> An
|
|
|
229
235
|
# TODO(@YHPeter): fix __doc__ for same function with different names
|
|
230
236
|
|
|
231
237
|
l = []
|
|
232
|
-
if
|
|
238
|
+
if dtype is None:
|
|
233
239
|
dtype = dtypestr
|
|
234
240
|
for n in num:
|
|
235
241
|
if not backend.is_tensor(n):
|
|
@@ -245,7 +251,7 @@ array_to_tensor = num_to_tensor
|
|
|
245
251
|
|
|
246
252
|
|
|
247
253
|
def gate_wrapper(m: Tensor, n: Optional[str] = None) -> Gate:
|
|
248
|
-
if
|
|
254
|
+
if n is None:
|
|
249
255
|
n = "unknowngate"
|
|
250
256
|
m = m.astype(npdtype)
|
|
251
257
|
return Gate(deepcopy(m), name=n)
|
|
@@ -255,7 +261,7 @@ class GateF:
|
|
|
255
261
|
def __init__(
|
|
256
262
|
self, m: Tensor, n: Optional[str] = None, ctrl: Optional[List[int]] = None
|
|
257
263
|
):
|
|
258
|
-
if
|
|
264
|
+
if n is None:
|
|
259
265
|
n = "unknowngate"
|
|
260
266
|
self.m = m
|
|
261
267
|
self.n = n
|
|
@@ -310,7 +316,7 @@ class GateF:
|
|
|
310
316
|
|
|
311
317
|
return Gate(cu, name="c" + self.n)
|
|
312
318
|
|
|
313
|
-
if
|
|
319
|
+
if self.ctrl is None:
|
|
314
320
|
ctrl = [1]
|
|
315
321
|
else:
|
|
316
322
|
ctrl = [1] + self.ctrl
|
|
@@ -330,7 +336,7 @@ class GateF:
|
|
|
330
336
|
# TODO(@refraction-ray): ctrl convention to be finally determined
|
|
331
337
|
return Gate(ocu, name="o" + self.n)
|
|
332
338
|
|
|
333
|
-
if
|
|
339
|
+
if self.ctrl is None:
|
|
334
340
|
ctrl = [0]
|
|
335
341
|
else:
|
|
336
342
|
ctrl = [0] + self.ctrl
|
|
@@ -349,7 +355,7 @@ class GateVF(GateF):
|
|
|
349
355
|
n: Optional[str] = None,
|
|
350
356
|
ctrl: Optional[List[int]] = None,
|
|
351
357
|
):
|
|
352
|
-
if
|
|
358
|
+
if n is None:
|
|
353
359
|
n = "unknowngate"
|
|
354
360
|
self.f = f
|
|
355
361
|
self.n = n
|
|
@@ -483,7 +489,7 @@ def phase_gate(theta: float = 0) -> Gate:
|
|
|
483
489
|
:rtype: Gate
|
|
484
490
|
"""
|
|
485
491
|
theta = array_to_tensor(theta)
|
|
486
|
-
i00, i11 = array_to_tensor(
|
|
492
|
+
i00, i11 = array_to_tensor(_i00, _i11)
|
|
487
493
|
unitary = i00 + backend.exp(1.0j * theta) * i11
|
|
488
494
|
return Gate(unitary)
|
|
489
495
|
|
|
@@ -512,7 +518,7 @@ def get_u_parameter(m: Tensor) -> Tuple[float, float, float]:
|
|
|
512
518
|
return theta, phi, lbd
|
|
513
519
|
|
|
514
520
|
|
|
515
|
-
def u_gate(theta: float = 0, phi: float = 0, lbd: float = 0) -> Gate:
|
|
521
|
+
def u_gate(theta: float = 0.0, phi: float = 0.0, lbd: float = 0.0) -> Gate:
|
|
516
522
|
r"""
|
|
517
523
|
IBMQ U gate following the converntion of OpenQASM3.0.
|
|
518
524
|
See `OpenQASM doc <https://openqasm.com/language/gates.html#built-in-gates>`_
|
|
@@ -533,12 +539,7 @@ def u_gate(theta: float = 0, phi: float = 0, lbd: float = 0) -> Gate:
|
|
|
533
539
|
:rtype: Gate
|
|
534
540
|
"""
|
|
535
541
|
theta, phi, lbd = array_to_tensor(theta, phi, lbd)
|
|
536
|
-
i00, i01, i10, i11 = array_to_tensor(
|
|
537
|
-
np.array([[1, 0], [0, 0]]),
|
|
538
|
-
np.array([[0, 1], [0, 0]]),
|
|
539
|
-
np.array([[0, 0], [1, 0]]),
|
|
540
|
-
np.array([[0, 0], [0, 1]]),
|
|
541
|
-
)
|
|
542
|
+
i00, i01, i10, i11 = array_to_tensor(_i00, _i01, _i10, _i11)
|
|
542
543
|
unitary = (
|
|
543
544
|
backend.cos(theta / 2) * i00
|
|
544
545
|
- backend.exp(1.0j * lbd) * backend.sin(theta / 2) * i01
|
|
@@ -548,7 +549,7 @@ def u_gate(theta: float = 0, phi: float = 0, lbd: float = 0) -> Gate:
|
|
|
548
549
|
return Gate(unitary)
|
|
549
550
|
|
|
550
551
|
|
|
551
|
-
def r_gate(theta: float = 0, alpha: float = 0, phi: float = 0) -> Gate:
|
|
552
|
+
def r_gate(theta: float = 0.0, alpha: float = 0.0, phi: float = 0.0) -> Gate:
|
|
552
553
|
r"""
|
|
553
554
|
General single qubit rotation gate
|
|
554
555
|
|
|
@@ -582,7 +583,7 @@ def r_gate(theta: float = 0, alpha: float = 0, phi: float = 0) -> Gate:
|
|
|
582
583
|
# r = r_gate
|
|
583
584
|
|
|
584
585
|
|
|
585
|
-
def rx_gate(theta: float = 0) -> Gate:
|
|
586
|
+
def rx_gate(theta: float = 0.0) -> Gate:
|
|
586
587
|
r"""
|
|
587
588
|
Rotation gate along :math:`x` axis.
|
|
588
589
|
|
|
@@ -603,7 +604,7 @@ def rx_gate(theta: float = 0) -> Gate:
|
|
|
603
604
|
# rx = rx_gate
|
|
604
605
|
|
|
605
606
|
|
|
606
|
-
def ry_gate(theta: float = 0) -> Gate:
|
|
607
|
+
def ry_gate(theta: float = 0.0) -> Gate:
|
|
607
608
|
r"""
|
|
608
609
|
Rotation gate along :math:`y` axis.
|
|
609
610
|
|
|
@@ -624,7 +625,7 @@ def ry_gate(theta: float = 0) -> Gate:
|
|
|
624
625
|
# ry = ry_gate
|
|
625
626
|
|
|
626
627
|
|
|
627
|
-
def rz_gate(theta: float = 0) -> Gate:
|
|
628
|
+
def rz_gate(theta: float = 0.0) -> Gate:
|
|
628
629
|
r"""
|
|
629
630
|
Rotation gate along :math:`z` axis.
|
|
630
631
|
|
|
@@ -645,7 +646,7 @@ def rz_gate(theta: float = 0) -> Gate:
|
|
|
645
646
|
# rz = rz_gate
|
|
646
647
|
|
|
647
648
|
|
|
648
|
-
def rgate_theoretical(theta: float = 0, alpha: float = 0, phi: float = 0) -> Gate:
|
|
649
|
+
def rgate_theoretical(theta: float = 0.0, alpha: float = 0.0, phi: float = 0.0) -> Gate:
|
|
649
650
|
r"""
|
|
650
651
|
Rotation gate implemented by matrix exponential. The output is the same as `rgate`.
|
|
651
652
|
|
|
@@ -723,7 +724,7 @@ def iswap_gate(theta: float = 1.0) -> Gate:
|
|
|
723
724
|
# iswap = iswap_gate
|
|
724
725
|
|
|
725
726
|
|
|
726
|
-
def cr_gate(theta: float = 0, alpha: float = 0, phi: float = 0) -> Gate:
|
|
727
|
+
def cr_gate(theta: float = 0.0, alpha: float = 0.0, phi: float = 0.0) -> Gate:
|
|
727
728
|
r"""
|
|
728
729
|
Controlled rotation gate. When the control qubit is 1, `rgate` is applied to the target qubit.
|
|
729
730
|
|
|
@@ -775,7 +776,7 @@ def random_two_qubit_gate() -> Gate:
|
|
|
775
776
|
return Gate(deepcopy(unitary), name="R2Q")
|
|
776
777
|
|
|
777
778
|
|
|
778
|
-
def any_gate(unitary: Tensor, name: str = "any") -> Gate:
|
|
779
|
+
def any_gate(unitary: Tensor, name: str = "any", dim: Optional[int] = None) -> Gate:
|
|
779
780
|
"""
|
|
780
781
|
Note one should provide the gate with properly reshaped.
|
|
781
782
|
|
|
@@ -783,6 +784,8 @@ def any_gate(unitary: Tensor, name: str = "any") -> Gate:
|
|
|
783
784
|
:type unitary: Tensor
|
|
784
785
|
:param name: The name of the gate.
|
|
785
786
|
:type name: str
|
|
787
|
+
:param dim: The dimension of the gate.
|
|
788
|
+
:type dim: int
|
|
786
789
|
:return: the resulted gate
|
|
787
790
|
:rtype: Gate
|
|
788
791
|
"""
|
|
@@ -791,7 +794,10 @@ def any_gate(unitary: Tensor, name: str = "any") -> Gate:
|
|
|
791
794
|
unitary.tensor = backend.cast(unitary.tensor, dtypestr)
|
|
792
795
|
return unitary
|
|
793
796
|
unitary = backend.cast(unitary, dtypestr)
|
|
794
|
-
|
|
797
|
+
if dim is None or dim == 2:
|
|
798
|
+
unitary = backend.reshape2(unitary)
|
|
799
|
+
else:
|
|
800
|
+
unitary = backend.reshaped(unitary, dim)
|
|
795
801
|
# nleg = int(np.log2(backend.sizen(unitary)))
|
|
796
802
|
# unitary = backend.reshape(unitary, [2 for _ in range(nleg)])
|
|
797
803
|
return Gate(unitary, name=name)
|
|
@@ -864,6 +870,43 @@ def exponential_gate_unity(
|
|
|
864
870
|
return Gate(mat, name="exp1-" + name)
|
|
865
871
|
|
|
866
872
|
|
|
873
|
+
def su4_gate(theta: Tensor, name: str = "su(4)") -> Gate:
|
|
874
|
+
r"""
|
|
875
|
+
Two-qubit general SU(4) gate.
|
|
876
|
+
|
|
877
|
+
:param theta: the angle tensor (15 components) of the gate.
|
|
878
|
+
:type theta: Tensor
|
|
879
|
+
:param name: the name of the gate.
|
|
880
|
+
:type name: str
|
|
881
|
+
:return: a gate object.
|
|
882
|
+
:rtype: Gate
|
|
883
|
+
"""
|
|
884
|
+
theta = num_to_tensor(theta)
|
|
885
|
+
pauli_ops = array_to_tensor(
|
|
886
|
+
_ix_matrix,
|
|
887
|
+
_iy_matrix,
|
|
888
|
+
_iz_matrix,
|
|
889
|
+
_xi_matrix,
|
|
890
|
+
_xx_matrix,
|
|
891
|
+
_xy_matrix,
|
|
892
|
+
_xz_matrix,
|
|
893
|
+
_yi_matrix,
|
|
894
|
+
_yx_matrix,
|
|
895
|
+
_yy_matrix,
|
|
896
|
+
_yz_matrix,
|
|
897
|
+
_zi_matrix,
|
|
898
|
+
_zx_matrix,
|
|
899
|
+
_zy_matrix,
|
|
900
|
+
_zz_matrix,
|
|
901
|
+
)
|
|
902
|
+
generator = backend.sum(
|
|
903
|
+
backend.stack([theta[i] * pauli_ops[i] for i in range(15)]), axis=0
|
|
904
|
+
)
|
|
905
|
+
mat = backend.expm(-1j * generator)
|
|
906
|
+
mat = backend.reshape2(mat)
|
|
907
|
+
return Gate(mat, name=name)
|
|
908
|
+
|
|
909
|
+
|
|
867
910
|
exp1_gate = exponential_gate_unity
|
|
868
911
|
# exp1 = exponential_gate_unity
|
|
869
912
|
rzz_gate = partial(exp1_gate, unitary=_zz_matrix, half=True)
|
|
@@ -968,6 +1011,7 @@ def meta_vgate() -> None:
|
|
|
968
1011
|
"rzz",
|
|
969
1012
|
"rxx",
|
|
970
1013
|
"ryy",
|
|
1014
|
+
"su4",
|
|
971
1015
|
]:
|
|
972
1016
|
for funcname in [f, f + "gate"]:
|
|
973
1017
|
setattr(thismodule, funcname, GateVF(getattr(thismodule, f + "_gate"), f))
|
tensorcircuit/interfaces/jax.py
CHANGED
|
@@ -5,9 +5,6 @@ Interface wraps quantum function as a jax function
|
|
|
5
5
|
from typing import Any, Callable, Tuple, Optional, Union, Sequence
|
|
6
6
|
from functools import wraps, partial
|
|
7
7
|
|
|
8
|
-
import jax
|
|
9
|
-
from jax import custom_vjp
|
|
10
|
-
|
|
11
8
|
from ..cons import backend
|
|
12
9
|
from .tensortrans import general_args_to_backend
|
|
13
10
|
|
|
@@ -22,6 +19,8 @@ def jax_wrapper(
|
|
|
22
19
|
] = None,
|
|
23
20
|
output_dtype: Optional[Union[Any, Sequence[Any]]] = None,
|
|
24
21
|
) -> Callable[..., Any]:
|
|
22
|
+
import jax
|
|
23
|
+
|
|
25
24
|
@wraps(fun)
|
|
26
25
|
def fun_jax(*x: Any) -> Any:
|
|
27
26
|
def wrapped_fun(*args: Any) -> Any:
|
|
@@ -129,6 +128,9 @@ def create_jax_function(
|
|
|
129
128
|
output_shape: Optional[Union[Tuple[int, ...], Tuple[()]]] = None,
|
|
130
129
|
output_dtype: Optional[Any] = None,
|
|
131
130
|
) -> Callable[..., Any]:
|
|
131
|
+
import jax
|
|
132
|
+
from jax import custom_vjp
|
|
133
|
+
|
|
132
134
|
if jit:
|
|
133
135
|
fun = backend.jit(fun)
|
|
134
136
|
|
|
@@ -132,13 +132,17 @@ def general_args_to_backend(
|
|
|
132
132
|
target_backend = backend
|
|
133
133
|
elif isinstance(target_backend, str):
|
|
134
134
|
target_backend = get_backend(target_backend)
|
|
135
|
+
try:
|
|
136
|
+
t = backend.tree_map(target_backend.from_dlpack, caps)
|
|
137
|
+
except TypeError:
|
|
138
|
+
t = backend.tree_map(target_backend.from_dlpack, args)
|
|
139
|
+
|
|
135
140
|
if dtype is None:
|
|
136
|
-
return
|
|
141
|
+
return t
|
|
137
142
|
if isinstance(dtype, str):
|
|
138
143
|
leaves, treedef = backend.tree_flatten(args)
|
|
139
144
|
dtype = [dtype for _ in range(len(leaves))]
|
|
140
145
|
dtype = backend.tree_unflatten(treedef, dtype)
|
|
141
|
-
t = backend.tree_map(target_backend.from_dlpack, caps)
|
|
142
146
|
t = backend.tree_map(target_backend.cast, t, dtype)
|
|
143
147
|
return t
|
|
144
148
|
|
|
@@ -69,12 +69,14 @@ def torch_interface(
|
|
|
69
69
|
@staticmethod
|
|
70
70
|
def forward(ctx: Any, *x: Any) -> Any: # type: ignore
|
|
71
71
|
# ctx.xdtype = [xi.dtype for xi in x]
|
|
72
|
-
ctx.
|
|
72
|
+
ctx.save_for_backward(*x)
|
|
73
|
+
x_detached = backend.tree_map(lambda s: s.detach(), x)
|
|
74
|
+
ctx.xdtype = backend.tree_map(lambda s: s.dtype, x_detached)
|
|
73
75
|
# (x, )
|
|
74
76
|
if len(ctx.xdtype) == 1:
|
|
75
77
|
ctx.xdtype = ctx.xdtype[0]
|
|
76
|
-
ctx.device = (backend.tree_flatten(
|
|
77
|
-
x = general_args_to_backend(
|
|
78
|
+
ctx.device = (backend.tree_flatten(x_detached)[0][0]).device
|
|
79
|
+
x = general_args_to_backend(x_detached, enable_dlpack=enable_dlpack)
|
|
78
80
|
y = fun(*x)
|
|
79
81
|
ctx.ydtype = backend.tree_map(lambda s: s.dtype, y)
|
|
80
82
|
if len(x) == 1:
|
|
@@ -88,6 +90,9 @@ def torch_interface(
|
|
|
88
90
|
|
|
89
91
|
@staticmethod
|
|
90
92
|
def backward(ctx: Any, *grad_y: Any) -> Any:
|
|
93
|
+
x = ctx.saved_tensors
|
|
94
|
+
x_detached = backend.tree_map(lambda s: s.detach(), x)
|
|
95
|
+
x_backend = general_args_to_backend(x_detached, enable_dlpack=enable_dlpack)
|
|
91
96
|
if len(grad_y) == 1:
|
|
92
97
|
grad_y = grad_y[0]
|
|
93
98
|
grad_y = backend.tree_map(lambda s: s.contiguous(), grad_y)
|
|
@@ -96,7 +101,12 @@ def torch_interface(
|
|
|
96
101
|
)
|
|
97
102
|
# grad_y = general_args_to_numpy(grad_y)
|
|
98
103
|
# grad_y = numpy_args_to_backend(grad_y, dtype=ctx.ydtype) # backend.dtype
|
|
99
|
-
|
|
104
|
+
if len(x_backend) == 1:
|
|
105
|
+
x_backend_for_vjp = x_backend[0]
|
|
106
|
+
else:
|
|
107
|
+
x_backend_for_vjp = x_backend
|
|
108
|
+
|
|
109
|
+
_, g = vjp_fun(x_backend_for_vjp, grad_y)
|
|
100
110
|
# a redundency due to current vjp API
|
|
101
111
|
|
|
102
112
|
r = general_args_to_backend(
|
tensorcircuit/keras.py
CHANGED
|
@@ -24,7 +24,7 @@ class QuantumLayer(Layer): # type: ignore
|
|
|
24
24
|
initializer: Union[Text, Sequence[Text]] = "glorot_uniform",
|
|
25
25
|
constraint: Optional[Union[Text, Sequence[Text]]] = None,
|
|
26
26
|
regularizer: Optional[Union[Text, Sequence[Text]]] = None,
|
|
27
|
-
**kwargs: Any
|
|
27
|
+
**kwargs: Any,
|
|
28
28
|
) -> None:
|
|
29
29
|
"""
|
|
30
30
|
`QuantumLayer` wraps the quantum function `f` as a `keras.Layer`
|
|
@@ -103,7 +103,7 @@ class QuantumLayer(Layer): # type: ignore
|
|
|
103
103
|
inputs: tf.Tensor,
|
|
104
104
|
training: Optional[bool] = None,
|
|
105
105
|
mask: Optional[tf.Tensor] = None,
|
|
106
|
-
**kwargs: Any
|
|
106
|
+
**kwargs: Any,
|
|
107
107
|
) -> tf.Tensor:
|
|
108
108
|
# input_shape = list(inputs.shape)
|
|
109
109
|
# inputs = tf.reshape(inputs, (-1, input_shape[-1]))
|
|
@@ -154,7 +154,7 @@ class HardwareLayer(QuantumLayer):
|
|
|
154
154
|
inputs: tf.Tensor,
|
|
155
155
|
training: Optional[bool] = None,
|
|
156
156
|
mask: Optional[tf.Tensor] = None,
|
|
157
|
-
**kwargs: Any
|
|
157
|
+
**kwargs: Any,
|
|
158
158
|
) -> tf.Tensor:
|
|
159
159
|
if inputs is None: # not possible
|
|
160
160
|
result = self.f(*self.pqc_weights, **kwargs)
|