tensorcircuit-nightly 1.3.0.dev20250729__py3-none-any.whl → 1.3.0.dev20250730__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tensorcircuit-nightly might be problematic. Click here for more details.
- tensorcircuit/__init__.py +1 -1
- tensorcircuit/backends/abstract_backend.py +14 -2
- tensorcircuit/backends/numpy_backend.py +1 -0
- tensorcircuit/backends/tensorflow_backend.py +4 -1
- tensorcircuit/timeevol.py +280 -32
- {tensorcircuit_nightly-1.3.0.dev20250729.dist-info → tensorcircuit_nightly-1.3.0.dev20250730.dist-info}/METADATA +1 -1
- {tensorcircuit_nightly-1.3.0.dev20250729.dist-info → tensorcircuit_nightly-1.3.0.dev20250730.dist-info}/RECORD +11 -11
- tests/test_timeevol.py +224 -8
- {tensorcircuit_nightly-1.3.0.dev20250729.dist-info → tensorcircuit_nightly-1.3.0.dev20250730.dist-info}/WHEEL +0 -0
- {tensorcircuit_nightly-1.3.0.dev20250729.dist-info → tensorcircuit_nightly-1.3.0.dev20250730.dist-info}/licenses/LICENSE +0 -0
- {tensorcircuit_nightly-1.3.0.dev20250729.dist-info → tensorcircuit_nightly-1.3.0.dev20250730.dist-info}/top_level.txt +0 -0
tensorcircuit/__init__.py
CHANGED
|
@@ -1389,9 +1389,21 @@ class ExtendedBackend:
|
|
|
1389
1389
|
:rtype: Tensor
|
|
1390
1390
|
"""
|
|
1391
1391
|
carry = init
|
|
1392
|
-
|
|
1393
|
-
|
|
1392
|
+
# Check if `xs` is a PyTree (tuple or list) of arrays.
|
|
1393
|
+
if isinstance(xs, (tuple, list)):
|
|
1394
|
+
for x_slice_tuple in zip(*xs):
|
|
1395
|
+
# x_slice_tuple will be (k_elems[i], j_elems[i]) at each step.
|
|
1396
|
+
carry = f(carry, x_slice_tuple)
|
|
1397
|
+
else:
|
|
1398
|
+
# If xs is a single array, iterate normally.
|
|
1399
|
+
for x in xs:
|
|
1400
|
+
carry = f(carry, x)
|
|
1401
|
+
|
|
1394
1402
|
return carry
|
|
1403
|
+
# carry = init
|
|
1404
|
+
# for x in xs:
|
|
1405
|
+
# carry = f(carry, x)
|
|
1406
|
+
# return carry
|
|
1395
1407
|
|
|
1396
1408
|
def stop_gradient(self: Any, a: Tensor) -> Tensor:
|
|
1397
1409
|
"""
|
|
@@ -200,6 +200,7 @@ class NumpyBackend(numpy_backend.NumPyBackend, ExtendedBackend): # type: ignore
|
|
|
200
200
|
return softmax(a, axis=axis)
|
|
201
201
|
|
|
202
202
|
def onehot(self, a: Tensor, num: int) -> Tensor:
|
|
203
|
+
a = np.asarray(a)
|
|
203
204
|
res = np.eye(num)[a.reshape([-1])]
|
|
204
205
|
return res.reshape(list(a.shape) + [num])
|
|
205
206
|
# https://stackoverflow.com/questions/38592324/one-hot-encoding-using-numpy
|
|
@@ -719,7 +719,10 @@ class TensorFlowBackend(tensorflow_backend.TensorFlowBackend, ExtendedBackend):
|
|
|
719
719
|
def scan(
|
|
720
720
|
self, f: Callable[[Tensor, Tensor], Tensor], xs: Tensor, init: Tensor
|
|
721
721
|
) -> Tensor:
|
|
722
|
-
|
|
722
|
+
stacked_results = tf.scan(f, xs, init)
|
|
723
|
+
final_state = tf.nest.map_structure(lambda x: x[-1], stacked_results)
|
|
724
|
+
return final_state
|
|
725
|
+
# return tf.scan(f, xs, init)[-1]
|
|
723
726
|
|
|
724
727
|
def device(self, a: Tensor) -> str:
|
|
725
728
|
dev = a.device
|
tensorcircuit/timeevol.py
CHANGED
|
@@ -3,14 +3,165 @@ Analog time evolution engines
|
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
5
|
from typing import Any, Tuple, Optional, Callable, List, Sequence
|
|
6
|
+
from functools import partial
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
6
9
|
|
|
7
10
|
from .cons import backend, dtypestr, rdtypestr, contractor
|
|
8
11
|
from .gates import Gate
|
|
12
|
+
from .utils import arg_alias
|
|
9
13
|
|
|
10
14
|
Tensor = Any
|
|
11
15
|
Circuit = Any
|
|
12
16
|
|
|
13
17
|
|
|
18
|
+
def lanczos_iteration_scan(
|
|
19
|
+
hamiltonian: Any, initial_vector: Any, subspace_dimension: int
|
|
20
|
+
) -> Tuple[Any, Any]:
|
|
21
|
+
"""
|
|
22
|
+
Use Lanczos algorithm to construct orthogonal basis and projected Hamiltonian
|
|
23
|
+
of Krylov subspace, using `tc.backend.scan` for JIT compatibility.
|
|
24
|
+
|
|
25
|
+
:param hamiltonian: Sparse or dense Hamiltonian matrix
|
|
26
|
+
:type hamiltonian: Tensor
|
|
27
|
+
:param initial_vector: Initial quantum state vector
|
|
28
|
+
:type initial_vector: Tensor
|
|
29
|
+
:param subspace_dimension: Dimension of Krylov subspace
|
|
30
|
+
:type subspace_dimension: int
|
|
31
|
+
:return: Tuple containing (basis matrix, projected Hamiltonian)
|
|
32
|
+
:rtype: Tuple[Tensor, Tensor]
|
|
33
|
+
"""
|
|
34
|
+
state_size = backend.shape_tuple(initial_vector)[0]
|
|
35
|
+
|
|
36
|
+
# Main scan body for the outer loop (iterating j)
|
|
37
|
+
def lanczos_step(carry: Tuple[Any, ...], j: int) -> Tuple[Any, ...]:
|
|
38
|
+
v, basis, alphas, betas = carry
|
|
39
|
+
|
|
40
|
+
if backend.is_sparse(hamiltonian):
|
|
41
|
+
w = backend.sparse_dense_matmul(hamiltonian, v)
|
|
42
|
+
else:
|
|
43
|
+
w = backend.matvec(hamiltonian, v)
|
|
44
|
+
|
|
45
|
+
alpha = backend.real(backend.sum(backend.conj(v) * w))
|
|
46
|
+
w = w - backend.cast(alpha, dtypestr) * v
|
|
47
|
+
|
|
48
|
+
# Inner scan for re-orthogonalization (iterating k)
|
|
49
|
+
# def ortho_step(inner_carry: Tuple[Any, Any], k: int) -> Tuple[Any, Any]:
|
|
50
|
+
# w_carry, j_val = inner_carry
|
|
51
|
+
|
|
52
|
+
# def do_projection() -> Any:
|
|
53
|
+
# # `basis` is available here through closure
|
|
54
|
+
# v_k = basis[:, k]
|
|
55
|
+
# projection = backend.sum(backend.conj(v_k) * w_carry)
|
|
56
|
+
# return w_carry - projection * v_k
|
|
57
|
+
|
|
58
|
+
# def do_nothing() -> Any:
|
|
59
|
+
# return w_carry
|
|
60
|
+
|
|
61
|
+
# # Orthogonalize against v_0, ..., v_j
|
|
62
|
+
# w_new = backend.cond(k <= j_val, do_projection, do_nothing)
|
|
63
|
+
# return (w_new, j_val) # Return the new carry for the inner loop
|
|
64
|
+
|
|
65
|
+
# # Pass `j` into the inner scan's carry
|
|
66
|
+
# inner_init_carry = (w, j)
|
|
67
|
+
# final_inner_carry = backend.scan(
|
|
68
|
+
# ortho_step, backend.arange(subspace_dimension), inner_init_carry
|
|
69
|
+
# )
|
|
70
|
+
# w_ortho = final_inner_carry[0]
|
|
71
|
+
|
|
72
|
+
def ortho_step(w_carry: Any, elems_tuple: Tuple[Any, Any]) -> Any:
|
|
73
|
+
k, j_from_elems = elems_tuple
|
|
74
|
+
|
|
75
|
+
def do_projection() -> Any:
|
|
76
|
+
v_k = basis[:, k]
|
|
77
|
+
projection = backend.sum(backend.conj(v_k) * w_carry)
|
|
78
|
+
return w_carry - projection * v_k
|
|
79
|
+
|
|
80
|
+
def do_nothing() -> Any:
|
|
81
|
+
return backend.cast(w_carry, dtype=dtypestr)
|
|
82
|
+
|
|
83
|
+
w_new = backend.cond(k <= j_from_elems, do_projection, do_nothing)
|
|
84
|
+
return w_new
|
|
85
|
+
|
|
86
|
+
k_elems = backend.arange(subspace_dimension)
|
|
87
|
+
j_elems = backend.tile(backend.reshape(j, [1]), [subspace_dimension])
|
|
88
|
+
inner_elems = (k_elems, j_elems)
|
|
89
|
+
w_ortho = backend.scan(ortho_step, inner_elems, w)
|
|
90
|
+
|
|
91
|
+
beta = backend.norm(w_ortho)
|
|
92
|
+
beta = backend.real(beta)
|
|
93
|
+
|
|
94
|
+
# Update alphas and betas arrays
|
|
95
|
+
new_alphas = backend.scatter(
|
|
96
|
+
alphas, backend.reshape(j, [1, 1]), backend.reshape(alpha, [1])
|
|
97
|
+
)
|
|
98
|
+
new_betas = backend.scatter(
|
|
99
|
+
betas, backend.reshape(j, [1, 1]), backend.reshape(beta, [1])
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
def update_state_fn() -> Tuple[Any, Any]:
|
|
103
|
+
epsilon = 1e-15
|
|
104
|
+
next_v = w_ortho / backend.cast(beta + epsilon, dtypestr)
|
|
105
|
+
|
|
106
|
+
one_hot_update = backend.onehot(j + 1, subspace_dimension)
|
|
107
|
+
one_hot_update = backend.cast(one_hot_update, dtype=dtypestr)
|
|
108
|
+
|
|
109
|
+
# Create a mask to update only the (j+1)-th column
|
|
110
|
+
mask = 1.0 - backend.reshape(one_hot_update, [1, subspace_dimension])
|
|
111
|
+
new_basis = basis * mask + backend.reshape(
|
|
112
|
+
next_v, [-1, 1]
|
|
113
|
+
) * backend.reshape(one_hot_update, [1, subspace_dimension])
|
|
114
|
+
|
|
115
|
+
return next_v, new_basis
|
|
116
|
+
|
|
117
|
+
def keep_state_fn() -> Tuple[Any, Any]:
|
|
118
|
+
return v, basis
|
|
119
|
+
|
|
120
|
+
next_v_carry, new_basis = backend.cond(
|
|
121
|
+
j < subspace_dimension - 1, update_state_fn, keep_state_fn
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
return (next_v_carry, new_basis, new_alphas, new_betas)
|
|
125
|
+
|
|
126
|
+
# Prepare initial state for the main scan
|
|
127
|
+
v0 = initial_vector / backend.norm(initial_vector)
|
|
128
|
+
|
|
129
|
+
init_basis = backend.zeros((state_size, subspace_dimension), dtype=dtypestr)
|
|
130
|
+
init_alphas = backend.zeros((subspace_dimension,), dtype=rdtypestr)
|
|
131
|
+
init_betas = backend.zeros((subspace_dimension,), dtype=rdtypestr)
|
|
132
|
+
|
|
133
|
+
one_hot_0 = backend.onehot(0, subspace_dimension)
|
|
134
|
+
one_hot_0 = backend.cast(one_hot_0, dtype=dtypestr)
|
|
135
|
+
init_basis = init_basis + backend.reshape(v0, [-1, 1]) * backend.reshape(
|
|
136
|
+
one_hot_0, [1, subspace_dimension]
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
init_carry = (v0, init_basis, init_alphas, init_betas)
|
|
140
|
+
|
|
141
|
+
# Run the main scan
|
|
142
|
+
final_carry = backend.scan(
|
|
143
|
+
lanczos_step, backend.arange(subspace_dimension), init_carry
|
|
144
|
+
)
|
|
145
|
+
basis_matrix, alphas_tensor, betas_tensor = (
|
|
146
|
+
final_carry[1],
|
|
147
|
+
final_carry[2],
|
|
148
|
+
final_carry[3],
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
betas_off_diag = betas_tensor[:-1]
|
|
152
|
+
|
|
153
|
+
diag_part = backend.diagflat(alphas_tensor)
|
|
154
|
+
if backend.shape_tuple(betas_off_diag)[0] > 0:
|
|
155
|
+
off_diag_part = backend.diagflat(betas_off_diag, k=1)
|
|
156
|
+
projected_hamiltonian = (
|
|
157
|
+
diag_part + off_diag_part + backend.conj(backend.transpose(off_diag_part))
|
|
158
|
+
)
|
|
159
|
+
else:
|
|
160
|
+
projected_hamiltonian = diag_part
|
|
161
|
+
|
|
162
|
+
return basis_matrix, projected_hamiltonian
|
|
163
|
+
|
|
164
|
+
|
|
14
165
|
def lanczos_iteration(
|
|
15
166
|
hamiltonian: Tensor, initial_vector: Tensor, subspace_dimension: int
|
|
16
167
|
) -> Tuple[Tensor, Tensor]:
|
|
@@ -113,9 +264,10 @@ def lanczos_iteration(
|
|
|
113
264
|
def krylov_evol(
|
|
114
265
|
hamiltonian: Tensor,
|
|
115
266
|
initial_state: Tensor,
|
|
116
|
-
|
|
267
|
+
times: Tensor,
|
|
117
268
|
subspace_dimension: int,
|
|
118
269
|
callback: Optional[Callable[[Any], Any]] = None,
|
|
270
|
+
scan_impl: bool = False,
|
|
119
271
|
) -> Any:
|
|
120
272
|
"""
|
|
121
273
|
Perform quantum state time evolution using Krylov subspace method.
|
|
@@ -124,21 +276,30 @@ def krylov_evol(
|
|
|
124
276
|
:type hamiltonian: Tensor
|
|
125
277
|
:param initial_state: Initial quantum state
|
|
126
278
|
:type initial_state: Tensor
|
|
127
|
-
:param
|
|
128
|
-
:type
|
|
279
|
+
:param times: List of time points
|
|
280
|
+
:type times: Tensor
|
|
129
281
|
:param subspace_dimension: Krylov subspace dimension
|
|
130
282
|
:type subspace_dimension: int
|
|
131
283
|
:param callback: Optional callback function applied to quantum state at
|
|
132
284
|
each evolution time point, return some observables
|
|
133
285
|
:type callback: Optional[Callable[[Any], Any]], optional
|
|
286
|
+
:param scan_impl: whether use scan implementation, suitable for jit but may be slow on numpy
|
|
287
|
+
defaults False, True not work for tensorflow backend + jit, due to stupid issue of tensorflow
|
|
288
|
+
context separation and the notorious inaccesibletensor error
|
|
289
|
+
:type scan_impl: bool, optional
|
|
134
290
|
:return: List of evolved quantum states, or list of callback function results
|
|
135
291
|
(if callback provided)
|
|
136
292
|
:rtype: Any
|
|
137
293
|
"""
|
|
138
294
|
# TODO(@refraction-ray): stable and efficient AD is to be investigated
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
295
|
+
if not scan_impl:
|
|
296
|
+
basis_matrix, projected_hamiltonian = lanczos_iteration(
|
|
297
|
+
hamiltonian, initial_state, subspace_dimension
|
|
298
|
+
)
|
|
299
|
+
else:
|
|
300
|
+
basis_matrix, projected_hamiltonian = lanczos_iteration_scan(
|
|
301
|
+
hamiltonian, initial_state, subspace_dimension
|
|
302
|
+
)
|
|
142
303
|
initial_state = backend.cast(initial_state, dtypestr)
|
|
143
304
|
# Project initial state to Krylov subspace: |psi_proj> = V_m^† |psi(0)>
|
|
144
305
|
projected_state = backend.matvec(
|
|
@@ -148,8 +309,9 @@ def krylov_evol(
|
|
|
148
309
|
# Perform spectral decomposition of projected Hamiltonian: T_m = U D U^†
|
|
149
310
|
eigenvalues, eigenvectors = backend.eigh(projected_hamiltonian)
|
|
150
311
|
eigenvalues = backend.cast(eigenvalues, dtypestr)
|
|
151
|
-
|
|
152
|
-
|
|
312
|
+
eigenvectors = backend.cast(eigenvectors, dtypestr)
|
|
313
|
+
times = backend.convert_to_tensor(times)
|
|
314
|
+
times = backend.cast(times, dtypestr)
|
|
153
315
|
|
|
154
316
|
# Transform projected state to eigenbasis: |psi_coeff> = U^† |psi_proj>
|
|
155
317
|
eigenvectors_projected_state = backend.matvec(
|
|
@@ -158,7 +320,7 @@ def krylov_evol(
|
|
|
158
320
|
|
|
159
321
|
# Calculate exp(-i*projected_H*t) * projected_state
|
|
160
322
|
results = []
|
|
161
|
-
for t in
|
|
323
|
+
for t in times:
|
|
162
324
|
# Calculate exp(-i*eigenvalues*t)
|
|
163
325
|
exp_diagonal = backend.exp(-1j * eigenvalues * t)
|
|
164
326
|
|
|
@@ -182,22 +344,26 @@ def krylov_evol(
|
|
|
182
344
|
return backend.stack(results)
|
|
183
345
|
|
|
184
346
|
|
|
347
|
+
@partial(
|
|
348
|
+
arg_alias,
|
|
349
|
+
alias_dict={"h": ["hamiltonian"], "psi0": ["initial_state"], "tlist": ["times"]},
|
|
350
|
+
)
|
|
185
351
|
def hamiltonian_evol(
|
|
186
|
-
tlist: Tensor,
|
|
187
352
|
h: Tensor,
|
|
188
353
|
psi0: Tensor,
|
|
354
|
+
tlist: Tensor,
|
|
189
355
|
callback: Optional[Callable[..., Any]] = None,
|
|
190
356
|
) -> Tensor:
|
|
191
357
|
"""
|
|
192
358
|
Fast implementation of time independent Hamiltonian evolution using eigendecomposition.
|
|
193
359
|
By default, performs imaginary time evolution.
|
|
194
360
|
|
|
195
|
-
:param tlist: Time points for evolution
|
|
196
|
-
:type tlist: Tensor
|
|
197
361
|
:param h: Time-independent Hamiltonian matrix
|
|
198
362
|
:type h: Tensor
|
|
199
363
|
:param psi0: Initial state vector
|
|
200
364
|
:type psi0: Tensor
|
|
365
|
+
:param tlist: Time points for evolution
|
|
366
|
+
:type tlist: Tensor
|
|
201
367
|
:param callback: Optional function to process state at each time point
|
|
202
368
|
:type callback: Optional[Callable[..., Any]], optional
|
|
203
369
|
:return: Evolution results at each time point. If callback is None, returns state vectors;
|
|
@@ -233,9 +399,10 @@ def hamiltonian_evol(
|
|
|
233
399
|
psi0 = backend.cast(psi0, dtypestr)
|
|
234
400
|
es, u = backend.eigh(h)
|
|
235
401
|
u = backend.cast(u, dtypestr)
|
|
236
|
-
utpsi0 = backend.
|
|
237
|
-
backend.transpose(u) @ backend.reshape(psi0, [-1, 1])
|
|
238
|
-
)
|
|
402
|
+
utpsi0 = backend.convert_to_tensor(
|
|
403
|
+
backend.transpose(u) @ backend.reshape(psi0, [-1, 1])
|
|
404
|
+
) # in case np.matrix...
|
|
405
|
+
utpsi0 = backend.reshape(utpsi0, [-1])
|
|
239
406
|
es = backend.cast(es, dtypestr)
|
|
240
407
|
tlist = backend.cast(backend.convert_to_tensor(tlist), dtypestr)
|
|
241
408
|
|
|
@@ -255,6 +422,7 @@ def hamiltonian_evol(
|
|
|
255
422
|
ed_evol = hamiltonian_evol
|
|
256
423
|
|
|
257
424
|
|
|
425
|
+
@partial(arg_alias, alias_dict={"h_fun": ["hamiltonian"], "t": ["times"]})
|
|
258
426
|
def evol_local(
|
|
259
427
|
c: Circuit,
|
|
260
428
|
index: Sequence[int],
|
|
@@ -279,16 +447,58 @@ def evol_local(
|
|
|
279
447
|
:return: _description_
|
|
280
448
|
:rtype: Circuit
|
|
281
449
|
"""
|
|
450
|
+
s = c.state()
|
|
451
|
+
n = int(np.log2(s.shape[-1]) + 1e-7)
|
|
452
|
+
if isinstance(t, float):
|
|
453
|
+
t = backend.stack([0.0, t])
|
|
454
|
+
s1 = ode_evol_local(h_fun, s, t, index, None, *args, **solver_kws)
|
|
455
|
+
return type(c)(n, inputs=s1[-1])
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
def ode_evol_local(
|
|
459
|
+
hamiltonian: Callable[..., Tensor],
|
|
460
|
+
initial_state: Tensor,
|
|
461
|
+
times: Tensor,
|
|
462
|
+
index: Sequence[int],
|
|
463
|
+
callback: Optional[Callable[..., Tensor]] = None,
|
|
464
|
+
*args: Any,
|
|
465
|
+
**solver_kws: Any,
|
|
466
|
+
) -> Tensor:
|
|
467
|
+
"""
|
|
468
|
+
ODE-based time evolution for a time-dependent Hamiltonian acting on a subsystem of qubits.
|
|
469
|
+
|
|
470
|
+
This function solves the time-dependent Schrodinger equation using numerical ODE integration.
|
|
471
|
+
The Hamiltonian is applied only to a specific subset of qubits (indices) in the system.
|
|
472
|
+
|
|
473
|
+
Note: This function currently only supports the JAX backend.
|
|
474
|
+
|
|
475
|
+
:param hamiltonian: A function that returns a dense Hamiltonian matrix for the specified
|
|
476
|
+
subsystem size. The function signature should be hamiltonian(time, *args) -> Tensor.
|
|
477
|
+
:type hamiltonian: Callable[..., Tensor]
|
|
478
|
+
:param initial_state: The initial quantum state vector of the full system.
|
|
479
|
+
:type initial_state: Tensor
|
|
480
|
+
:param times: Time points for which to compute the evolution. Should be a 1D array of times.
|
|
481
|
+
:type times: Tensor
|
|
482
|
+
:param index: Indices of qubits where the Hamiltonian is applied.
|
|
483
|
+
:type index: Sequence[int]
|
|
484
|
+
:param callback: Optional function to apply to the state at each time step.
|
|
485
|
+
:type callback: Optional[Callable[..., Tensor]]
|
|
486
|
+
:param args: Additional arguments to pass to the Hamiltonian function.
|
|
487
|
+
:param solver_kws: Additional keyword arguments to pass to the ODE solver.
|
|
488
|
+
:return: Evolved quantum states at the specified time points. If callback is provided,
|
|
489
|
+
returns the callback results; otherwise returns the state vectors.
|
|
490
|
+
:rtype: Tensor
|
|
491
|
+
"""
|
|
282
492
|
from jax.experimental.ode import odeint
|
|
283
493
|
|
|
284
|
-
s =
|
|
285
|
-
n =
|
|
494
|
+
s = initial_state
|
|
495
|
+
n = int(np.log2(backend.shape_tuple(initial_state)[-1]) + 1e-7)
|
|
286
496
|
l = len(index)
|
|
287
497
|
|
|
288
498
|
def f(y: Tensor, t: Tensor, *args: Any) -> Tensor:
|
|
289
499
|
y = backend.reshape2(y)
|
|
290
500
|
y = Gate(y)
|
|
291
|
-
h = -1.0j *
|
|
501
|
+
h = -1.0j * hamiltonian(t, *args)
|
|
292
502
|
h = backend.reshape2(h)
|
|
293
503
|
h = Gate(h)
|
|
294
504
|
edges = []
|
|
@@ -302,15 +512,15 @@ def evol_local(
|
|
|
302
512
|
y = contractor([y, h], output_edge_order=edges)
|
|
303
513
|
return backend.reshape(y.tensor, [-1])
|
|
304
514
|
|
|
305
|
-
ts = backend.
|
|
515
|
+
ts = backend.convert_to_tensor(times)
|
|
306
516
|
ts = backend.cast(ts, dtype=rdtypestr)
|
|
307
517
|
s1 = odeint(f, s, ts, *args, **solver_kws)
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
ode_evol_local = evol_local
|
|
518
|
+
if not callback:
|
|
519
|
+
return s1
|
|
520
|
+
return backend.stack([callback(s1[i]) for i in range(len(s1))])
|
|
312
521
|
|
|
313
522
|
|
|
523
|
+
@partial(arg_alias, alias_dict={"h_fun": ["hamiltonian"], "t": ["times"]})
|
|
314
524
|
def evol_global(
|
|
315
525
|
c: Circuit, h_fun: Callable[..., Tensor], t: float, *args: Any, **solver_kws: Any
|
|
316
526
|
) -> Circuit:
|
|
@@ -328,19 +538,57 @@ def evol_global(
|
|
|
328
538
|
:return: _description_
|
|
329
539
|
:rtype: Circuit
|
|
330
540
|
"""
|
|
331
|
-
from jax.experimental.ode import odeint
|
|
332
|
-
|
|
333
541
|
s = c.state()
|
|
334
542
|
n = c._nqubits
|
|
543
|
+
if isinstance(t, float):
|
|
544
|
+
t = backend.stack([0.0, t])
|
|
545
|
+
s1 = ode_evol_global(h_fun, s, t, None, *args, **solver_kws)
|
|
546
|
+
return type(c)(n, inputs=s1[-1])
|
|
335
547
|
|
|
336
|
-
def f(y: Tensor, t: Tensor, *args: Any) -> Tensor:
|
|
337
|
-
h = -1.0j * h_fun(t, *args)
|
|
338
|
-
return backend.sparse_dense_matmul(h, y)
|
|
339
548
|
|
|
340
|
-
|
|
549
|
+
def ode_evol_global(
|
|
550
|
+
hamiltonian: Callable[..., Tensor],
|
|
551
|
+
initial_state: Tensor,
|
|
552
|
+
times: Tensor,
|
|
553
|
+
callback: Optional[Callable[..., Tensor]] = None,
|
|
554
|
+
*args: Any,
|
|
555
|
+
**solver_kws: Any,
|
|
556
|
+
) -> Tensor:
|
|
557
|
+
"""
|
|
558
|
+
ODE-based time evolution for a time-dependent Hamiltonian acting on the entire system.
|
|
559
|
+
|
|
560
|
+
This function solves the time-dependent Schrodinger equation using numerical ODE integration.
|
|
561
|
+
The Hamiltonian is applied to the full system and should be provided in sparse matrix format
|
|
562
|
+
for efficiency.
|
|
563
|
+
|
|
564
|
+
Note: This function currently only supports the JAX backend.
|
|
565
|
+
|
|
566
|
+
:param hamiltonian: A function that returns a sparse Hamiltonian matrix for the full system.
|
|
567
|
+
The function signature should be hamiltonian(time, *args) -> Tensor.
|
|
568
|
+
:type hamiltonian: Callable[..., Tensor]
|
|
569
|
+
:param initial_state: The initial quantum state vector.
|
|
570
|
+
:type initial_state: Tensor
|
|
571
|
+
:param times: Time points for which to compute the evolution. Should be a 1D array of times.
|
|
572
|
+
:type times: Tensor
|
|
573
|
+
:param callback: Optional function to apply to the state at each time step.
|
|
574
|
+
:type callback: Optional[Callable[..., Tensor]]
|
|
575
|
+
:param args: Additional arguments to pass to the Hamiltonian function.
|
|
576
|
+
:param solver_kws: Additional keyword arguments to pass to the ODE solver.
|
|
577
|
+
:return: Evolved quantum states at the specified time points. If callback is provided,
|
|
578
|
+
returns the callback results; otherwise returns the state vectors.
|
|
579
|
+
:rtype: Tensor
|
|
580
|
+
"""
|
|
581
|
+
from jax.experimental.ode import odeint
|
|
582
|
+
|
|
583
|
+
s = initial_state
|
|
584
|
+
ts = backend.convert_to_tensor(times)
|
|
341
585
|
ts = backend.cast(ts, dtype=rdtypestr)
|
|
342
|
-
s1 = odeint(f, s, ts, *args, **solver_kws)
|
|
343
|
-
return type(c)(n, inputs=s1[-1])
|
|
344
586
|
|
|
587
|
+
def f(y: Tensor, t: Tensor, *args: Any) -> Tensor:
|
|
588
|
+
h = -1.0j * hamiltonian(t, *args)
|
|
589
|
+
return backend.sparse_dense_matmul(h, y)
|
|
345
590
|
|
|
346
|
-
|
|
591
|
+
s1 = odeint(f, s, ts, *args, **solver_kws)
|
|
592
|
+
if not callback:
|
|
593
|
+
return s1
|
|
594
|
+
return backend.stack([callback(s1[i]) for i in range(len(s1))])
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
tensorcircuit/__init__.py,sha256=
|
|
1
|
+
tensorcircuit/__init__.py,sha256=DrH3d7oEWpI2MmrSmabmwoovMMYBNoLZeCk6SFC360M,2055
|
|
2
2
|
tensorcircuit/about.py,sha256=DazTswU2nAwOmASTaDII3L04PVtaQ7oiWPty5YMI3Wk,5267
|
|
3
3
|
tensorcircuit/abstractcircuit.py,sha256=0osacPqq7B1EJki-cI1aLYoVRmjFaG9q3XevWMs7SsA,44125
|
|
4
4
|
tensorcircuit/asciiart.py,sha256=neY1OWFwtoW5cHPNwkQHgRPktDniQvdlP9QKHkk52fM,8236
|
|
@@ -18,7 +18,7 @@ tensorcircuit/quantum.py,sha256=LNkIv5cJ2KG6puC18zTuXi-5cojW1Tnz-N-WjZ0Qu5Q,9021
|
|
|
18
18
|
tensorcircuit/shadows.py,sha256=6XmWNubbuaxFNvZVWu-RXd0lN9Jkk-xwong_K8o8_KE,17014
|
|
19
19
|
tensorcircuit/simplify.py,sha256=O11G3UYiVAc30GOfwXXmhLXwGZrQ8OVwLTMQMZp_XBc,9414
|
|
20
20
|
tensorcircuit/stabilizercircuit.py,sha256=yNqcEKtYzRYrgqGil8QEyKN4OEMp9g6uOG2zuRaU8uc,15465
|
|
21
|
-
tensorcircuit/timeevol.py,sha256=
|
|
21
|
+
tensorcircuit/timeevol.py,sha256=8p4C3nhUQ9eC2wYfZ9w5BGFIt25NPEJTXmZU_iZy4tM,21607
|
|
22
22
|
tensorcircuit/torchnn.py,sha256=z_QpM0QC3mydGyWpyp877j-tSFCPyzynCwqrTWaw-IA,4637
|
|
23
23
|
tensorcircuit/translation.py,sha256=VnU7DnYmbk1cWjqa7N68WNLNDn3DwENrMzmbG4_CQco,28611
|
|
24
24
|
tensorcircuit/utils.py,sha256=nEDR1wTh1WF_yV6UyZYlifqOPWdKk_Krr4HjhrWHnGQ,7228
|
|
@@ -40,15 +40,15 @@ tensorcircuit/applications/physics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQe
|
|
|
40
40
|
tensorcircuit/applications/physics/baseline.py,sha256=RWrzMGnC0PtmpYSFkvCE7r1llR88gncXuCakAAhFE-w,1775
|
|
41
41
|
tensorcircuit/applications/physics/fss.py,sha256=ny3U9ZDmT459PXjA1oUGfarBOlSKSy6fs04vD9s1XH4,3633
|
|
42
42
|
tensorcircuit/backends/__init__.py,sha256=WiUmbUFzM29w3hKfhuKxVUk3PpqDFiXf4za9g0ctpZA,80
|
|
43
|
-
tensorcircuit/backends/abstract_backend.py,sha256=
|
|
43
|
+
tensorcircuit/backends/abstract_backend.py,sha256=pmuf8NFmtRXnlBTsRrv2s6weCNiq3cd9gA7wiKvIobM,59589
|
|
44
44
|
tensorcircuit/backends/backend_factory.py,sha256=Z0aQ-RnxOnQzp-SRw8sefAH8XyBSlj2NXZwOlHinbfY,1713
|
|
45
45
|
tensorcircuit/backends/cupy_backend.py,sha256=4vgO3lnQnsvWL5hukhskjJp37EAHqio6z6TVXTQcdjs,15077
|
|
46
46
|
tensorcircuit/backends/jax_backend.py,sha256=dkDQ380CJHIdlt1fZvlN_g8DIowWPEcTTV_XBcs0YB0,26088
|
|
47
47
|
tensorcircuit/backends/jax_ops.py,sha256=o7tLlQMRnaKWcr5rVnOMqwG6KZVpR8M8ryNQ-ceXVxs,4789
|
|
48
|
-
tensorcircuit/backends/numpy_backend.py,sha256=
|
|
48
|
+
tensorcircuit/backends/numpy_backend.py,sha256=PhbpXeATQ6X4kZ3xA-RvQVO_dKrpFh5vyXcCKKDMC7U,14197
|
|
49
49
|
tensorcircuit/backends/pytorch_backend.py,sha256=yhfZSrm99yNW-dmijk8t6zAkbVgLRd4b_aIWKrpT7bY,24230
|
|
50
50
|
tensorcircuit/backends/pytorch_ops.py,sha256=lLxpK6OqfpVwifyFlgsqhpnt-oIn4R5paPMVg51WaW0,3826
|
|
51
|
-
tensorcircuit/backends/tensorflow_backend.py,sha256=
|
|
51
|
+
tensorcircuit/backends/tensorflow_backend.py,sha256=T2BmFxOyl2QU4dSIwUMCLyPspLrNFLx3hVfD7TD1No0,36598
|
|
52
52
|
tensorcircuit/backends/tf_ops.py,sha256=FJwDU7LhZrt0VUIx12DJU0gZnWhMv7B7r9sAKG710As,3378
|
|
53
53
|
tensorcircuit/cloud/__init__.py,sha256=n0Lx07GYF6YbdIa6AJCLJk4zlAm5CqaeHszvkxxuoI4,139
|
|
54
54
|
tensorcircuit/cloud/abstraction.py,sha256=6aSxbz0MP21jBVdFbSMrvJPLQH117vGz9sSHbMFoodE,14582
|
|
@@ -86,7 +86,7 @@ tensorcircuit/templates/graphs.py,sha256=cPYrxjoem0xZ-Is9dZKAvEzWZL_FejfIRiCEOTA
|
|
|
86
86
|
tensorcircuit/templates/hamiltonians.py,sha256=Ag8djD6lckTeU7I99gCbXiQAb2VYqzm_p7-hpXo-5u4,5554
|
|
87
87
|
tensorcircuit/templates/lattice.py,sha256=F35ebANk0DSmSHLR0-Q_hUbcznyCmZjb4fKmvCMywmA,58575
|
|
88
88
|
tensorcircuit/templates/measurements.py,sha256=pzc5Aa9S416Ilg4aOY77Z6ZhUlYcXnAkQNQFTuHjFFs,10943
|
|
89
|
-
tensorcircuit_nightly-1.3.0.
|
|
89
|
+
tensorcircuit_nightly-1.3.0.dev20250730.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
|
90
90
|
tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
91
91
|
tests/conftest.py,sha256=J9nHlLE3Zspz1rMyzadEuBWhaS5I4Q9sq0lnWybcdIA,1457
|
|
92
92
|
tests/test_backends.py,sha256=rClxb2gyAoGeXd_ZYVSAJ0zEvJ7z_2btAeFM_Iy_wwY,33925
|
|
@@ -115,10 +115,10 @@ tests/test_shadows.py,sha256=1T3kJesVJ5XfZrSncL80xdq-taGCSnTDF3eL15UlavY,5160
|
|
|
115
115
|
tests/test_simplify.py,sha256=35tbOu1QANsPvY1buLwNhqPnMkBOsnBtHn82qaukmgI,1175
|
|
116
116
|
tests/test_stabilizer.py,sha256=MivuZ5pY7GOcEPTanhtrflXostyLBToHyjfPqCU0tG0,5450
|
|
117
117
|
tests/test_templates.py,sha256=Xm9otFFaaBWG9TZpgJ-nNh9MBfRipTzFWL8fBOnie2k,7192
|
|
118
|
-
tests/test_timeevol.py,sha256=
|
|
118
|
+
tests/test_timeevol.py,sha256=N2x5BjmOwDStQ1sd0mkdENt7Y_MNES6_3JyMyOF43Iw,14780
|
|
119
119
|
tests/test_torchnn.py,sha256=CHLTfWkF7Ses5_XnGFN_uv_JddfgenFEFzaDtSH8XYU,2848
|
|
120
120
|
tests/test_van.py,sha256=kAWz860ivlb5zAJuYpzuBe27qccT-Yf0jatf5uXtTo4,3163
|
|
121
|
-
tensorcircuit_nightly-1.3.0.
|
|
122
|
-
tensorcircuit_nightly-1.3.0.
|
|
123
|
-
tensorcircuit_nightly-1.3.0.
|
|
124
|
-
tensorcircuit_nightly-1.3.0.
|
|
121
|
+
tensorcircuit_nightly-1.3.0.dev20250730.dist-info/METADATA,sha256=o5DEwWjsxTEMdi4Rxhl8KHljen9REwS9D_MtlG8SFdU,34922
|
|
122
|
+
tensorcircuit_nightly-1.3.0.dev20250730.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
123
|
+
tensorcircuit_nightly-1.3.0.dev20250730.dist-info/top_level.txt,sha256=O_Iqeh2x02lasEYMI9iyPNNNtMzcpg5qvwMOkZQ7n4A,20
|
|
124
|
+
tensorcircuit_nightly-1.3.0.dev20250730.dist-info/RECORD,,
|
tests/test_timeevol.py
CHANGED
|
@@ -11,7 +11,7 @@ sys.path.insert(0, modulepath)
|
|
|
11
11
|
import tensorcircuit as tc
|
|
12
12
|
|
|
13
13
|
|
|
14
|
-
def
|
|
14
|
+
def test_circuit_ode_evol(jaxb):
|
|
15
15
|
def h_square(t, b):
|
|
16
16
|
return (tc.backend.sign(t - 1.0) + 1) / 2 * b * tc.gates.x().tensor
|
|
17
17
|
|
|
@@ -19,9 +19,7 @@ def test_ode_evol(jaxb):
|
|
|
19
19
|
c.x(0)
|
|
20
20
|
c.cx(0, 1)
|
|
21
21
|
c.h(2)
|
|
22
|
-
c = tc.timeevol.
|
|
23
|
-
c, [1], h_square, 2.0, tc.backend.convert_to_tensor(0.2)
|
|
24
|
-
)
|
|
22
|
+
c = tc.timeevol.evol_local(c, [1], h_square, 2.0, tc.backend.convert_to_tensor(0.2))
|
|
25
23
|
c.rx(1, theta=np.pi - 0.4)
|
|
26
24
|
np.testing.assert_allclose(c.expectation_ps(z=[1]), 1.0, atol=1e-5)
|
|
27
25
|
|
|
@@ -34,13 +32,156 @@ def test_ode_evol(jaxb):
|
|
|
34
32
|
c.x(0)
|
|
35
33
|
c.cx(0, 1)
|
|
36
34
|
c.h(2)
|
|
37
|
-
c = tc.timeevol.
|
|
35
|
+
c = tc.timeevol.evol_global(
|
|
38
36
|
c, h_square_sparse, 2.0, tc.backend.convert_to_tensor(0.2)
|
|
39
37
|
)
|
|
40
38
|
c.rx(1, theta=np.pi - 0.4)
|
|
41
39
|
np.testing.assert_allclose(c.expectation_ps(z=[1]), 1.0, atol=1e-5)
|
|
42
40
|
|
|
43
41
|
|
|
42
|
+
def test_ode_evol_local(jaxb):
|
|
43
|
+
def local_hamiltonian(t, Omega, phi):
|
|
44
|
+
angle = phi * t
|
|
45
|
+
coeff = Omega * tc.backend.cos(2.0 * t) # Amplitude modulation
|
|
46
|
+
|
|
47
|
+
# Single-qubit Rabi Hamiltonian (2x2 matrix)
|
|
48
|
+
hx = coeff * tc.backend.cos(angle) * tc.gates.x().tensor
|
|
49
|
+
hy = coeff * tc.backend.sin(angle) * tc.gates.y().tensor
|
|
50
|
+
return hx + hy
|
|
51
|
+
|
|
52
|
+
# Initial state: GHZ state |0000⟩ + |1111⟩
|
|
53
|
+
c = tc.Circuit(4)
|
|
54
|
+
c.h(0)
|
|
55
|
+
for i in range(3):
|
|
56
|
+
c.cnot(i, i + 1)
|
|
57
|
+
psi0 = c.state()
|
|
58
|
+
|
|
59
|
+
# Time points
|
|
60
|
+
times = tc.backend.arange(0.0, 3.0, 0.1)
|
|
61
|
+
|
|
62
|
+
# Evolve with local Hamiltonian acting on qubit 1
|
|
63
|
+
states = tc.timeevol.ode_evol_local(
|
|
64
|
+
local_hamiltonian,
|
|
65
|
+
psi0,
|
|
66
|
+
times,
|
|
67
|
+
[1], # Apply to qubit 1
|
|
68
|
+
None,
|
|
69
|
+
1.0,
|
|
70
|
+
2.0, # Omega=1.0, phi=2.0
|
|
71
|
+
)
|
|
72
|
+
assert tc.backend.shape_tuple(states) == (30, 16)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def test_ode_evol_global(jaxb):
|
|
76
|
+
# Create a time-dependent transverse field Hamiltonian
|
|
77
|
+
# H(t) = -∑ᵢ Jᵢ(t) ZᵢZᵢ₊₁ - ∑ᵢ hᵢ(t) Xᵢ
|
|
78
|
+
|
|
79
|
+
# Time-dependent coefficients
|
|
80
|
+
def time_dep_J(t):
|
|
81
|
+
return 1.0 + 0.5 * tc.backend.sin(2.0 * t)
|
|
82
|
+
|
|
83
|
+
def time_dep_h(t):
|
|
84
|
+
return 0.5 * tc.backend.cos(1.5 * t)
|
|
85
|
+
|
|
86
|
+
zz_ham = tc.quantum.PauliStringSum2COO(
|
|
87
|
+
[[3, 3, 0, 0], [0, 3, 3, 0], [0, 0, 3, 3]], [1, 1, 1]
|
|
88
|
+
)
|
|
89
|
+
x_ham = tc.quantum.PauliStringSum2COO(
|
|
90
|
+
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], [1, 1, 1, 1]
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
# Hamiltonian construction function
|
|
94
|
+
def hamiltonian_func(t):
|
|
95
|
+
# Create time-dependent ZZ terms
|
|
96
|
+
zz_coeff = time_dep_J(t)
|
|
97
|
+
|
|
98
|
+
# Create time-dependent X terms
|
|
99
|
+
x_coeff = time_dep_h(t)
|
|
100
|
+
|
|
101
|
+
return zz_coeff * zz_ham + x_coeff * x_ham
|
|
102
|
+
|
|
103
|
+
# Initial state: |↑↓↑↓⟩
|
|
104
|
+
c = tc.Circuit(4)
|
|
105
|
+
c.x([1, 3])
|
|
106
|
+
psi0 = c.state()
|
|
107
|
+
|
|
108
|
+
# Time points for evolution
|
|
109
|
+
times = tc.backend.arange(0, 5, 0.5)
|
|
110
|
+
|
|
111
|
+
def zobs(state):
|
|
112
|
+
n = int(np.log2(state.shape[-1]))
|
|
113
|
+
c = tc.Circuit(n, inputs=state)
|
|
114
|
+
return tc.backend.real(c.expectation_ps(z=[0]))
|
|
115
|
+
|
|
116
|
+
# Perform global ODE evolution
|
|
117
|
+
states = tc.timeevol.ode_evol_global(hamiltonian_func, psi0, times, zobs)
|
|
118
|
+
assert tc.backend.shape_tuple(states) == (10,)
|
|
119
|
+
|
|
120
|
+
zz_ham = tc.quantum.PauliStringSum2COO([[3, 3, 0, 0], [0, 3, 3, 0]], [1, 1])
|
|
121
|
+
x_ham = tc.quantum.PauliStringSum2COO([[1, 0, 0, 0], [0, 1, 0, 0]], [1, 1])
|
|
122
|
+
|
|
123
|
+
# Example with parameterized Hamiltonian and optimization
|
|
124
|
+
def parametrized_hamiltonian(t, params):
|
|
125
|
+
# params = [J0, J1, h0, h1] - parameters to optimize
|
|
126
|
+
J_t = params[0] + params[1] * tc.backend.sin(2.0 * t)
|
|
127
|
+
h_t = params[2] + params[3] * tc.backend.cos(1.5 * t)
|
|
128
|
+
|
|
129
|
+
return J_t * zz_ham + h_t * x_ham
|
|
130
|
+
|
|
131
|
+
# Observable function: measure ZZ correlation
|
|
132
|
+
def zz_correlation(state):
|
|
133
|
+
n = int(np.log2(state.shape[0]))
|
|
134
|
+
circuit = tc.Circuit(n, inputs=state)
|
|
135
|
+
return circuit.expectation_ps(z=[0, 1])
|
|
136
|
+
|
|
137
|
+
@tc.backend.jit
|
|
138
|
+
@tc.backend.value_and_grad
|
|
139
|
+
def objective_function(params):
|
|
140
|
+
states = tc.timeevol.ode_evol_global(
|
|
141
|
+
parametrized_hamiltonian,
|
|
142
|
+
psi0,
|
|
143
|
+
tc.backend.convert_to_tensor([0, 1.0]),
|
|
144
|
+
None,
|
|
145
|
+
params,
|
|
146
|
+
)
|
|
147
|
+
# Measure ZZ correlation at final time
|
|
148
|
+
final_state = states[-1]
|
|
149
|
+
return tc.backend.real(zz_correlation(final_state))
|
|
150
|
+
|
|
151
|
+
print(objective_function(tc.backend.ones([4])))
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
@pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")])
|
|
155
|
+
def test_ed_evol(backend):
|
|
156
|
+
n = 4
|
|
157
|
+
g = tc.templates.graphs.Line1D(n, pbc=False)
|
|
158
|
+
h = tc.quantum.heisenberg_hamiltonian(g, hzz=1.0, hxx=1.0, hyy=1.0, sparse=False)
|
|
159
|
+
|
|
160
|
+
# Initial Neel state: |↑↓↑↓⟩
|
|
161
|
+
c = tc.Circuit(n)
|
|
162
|
+
c.x([1, 3]) # Apply X gates to qubits 1 and 3
|
|
163
|
+
psi0 = c.state()
|
|
164
|
+
|
|
165
|
+
# Imaginary time evolution times
|
|
166
|
+
times = tc.backend.convert_to_tensor([0.0, 0.5, 1.0, 2.0])
|
|
167
|
+
|
|
168
|
+
# Evolve and get states
|
|
169
|
+
states = tc.timeevol.ed_evol(h, psi0, times)
|
|
170
|
+
print(states)
|
|
171
|
+
|
|
172
|
+
def evolve_and_measure(params):
|
|
173
|
+
# Parametrized Hamiltonian
|
|
174
|
+
h_param = tc.quantum.heisenberg_hamiltonian(
|
|
175
|
+
g, hzz=params[0], hxx=params[1], hyy=params[2], sparse=False
|
|
176
|
+
)
|
|
177
|
+
states = tc.timeevol.ed_evol(h_param, psi0, times)
|
|
178
|
+
# Measure observable on final state
|
|
179
|
+
circuit = tc.Circuit(n, inputs=states[-1])
|
|
180
|
+
return tc.backend.real(circuit.expectation_ps(z=[0]))
|
|
181
|
+
|
|
182
|
+
evolve_and_measure(tc.backend.ones([3]))
|
|
183
|
+
|
|
184
|
+
|
|
44
185
|
@pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")])
|
|
45
186
|
def test_hamiltonian_evol_basic(backend):
|
|
46
187
|
"""Test basic functionality of hamiltonian_evol with a simple 2-qubit Hamiltonian"""
|
|
@@ -63,7 +204,7 @@ def test_hamiltonian_evol_basic(backend):
|
|
|
63
204
|
)
|
|
64
205
|
|
|
65
206
|
# Evolve and get states
|
|
66
|
-
states = tc.timeevol.hamiltonian_evol(
|
|
207
|
+
states = tc.timeevol.hamiltonian_evol(h, psi0, times)
|
|
67
208
|
|
|
68
209
|
# Check output shape
|
|
69
210
|
assert states.shape == (3, 4)
|
|
@@ -99,7 +240,7 @@ def test_hamiltonian_evol_with_callback(backend):
|
|
|
99
240
|
return tc.backend.real(c.expectation_ps(z=[0]))
|
|
100
241
|
|
|
101
242
|
# Evolve with callback
|
|
102
|
-
results = tc.timeevol.hamiltonian_evol(
|
|
243
|
+
results = tc.timeevol.hamiltonian_evol(h, psi0, times, callback)
|
|
103
244
|
|
|
104
245
|
# Check output shape - should be scalar for each time point
|
|
105
246
|
assert results.shape == (3,)
|
|
@@ -121,7 +262,7 @@ def test_hamiltonian_evol_imaginary_time(backend):
|
|
|
121
262
|
times = tc.backend.convert_to_tensor([0.0, 10.0])
|
|
122
263
|
|
|
123
264
|
# Evolve
|
|
124
|
-
states = tc.timeevol.hamiltonian_evol(
|
|
265
|
+
states = tc.timeevol.hamiltonian_evol(h, psi0, times)
|
|
125
266
|
|
|
126
267
|
# Ground state is |1⟩ (eigenvalue 1.0), so after long imaginary time
|
|
127
268
|
# evolution, we should approach this state
|
|
@@ -236,3 +377,78 @@ def test_krylov_evol_subspace_accuracy(backend):
|
|
|
236
377
|
# At least verify they have the correct shape
|
|
237
378
|
assert state_small.shape == (1, 2**n)
|
|
238
379
|
assert state_large.shape == (1, 2**n)
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
@pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")])
|
|
383
|
+
def test_krylov_evol_scan_impl(backend):
|
|
384
|
+
"""Test krylov_evol with scan_impl=True"""
|
|
385
|
+
n = 4
|
|
386
|
+
# Create a 1D chain graph
|
|
387
|
+
g = tc.templates.graphs.Line1D(n, pbc=False)
|
|
388
|
+
|
|
389
|
+
# Generate Heisenberg Hamiltonian
|
|
390
|
+
h = tc.quantum.heisenberg_hamiltonian(g, hzz=1.0, hxx=1.0, hyy=1.0, sparse=True)
|
|
391
|
+
|
|
392
|
+
c = tc.Circuit(n)
|
|
393
|
+
c.x([1, 2])
|
|
394
|
+
psi0 = c.state()
|
|
395
|
+
|
|
396
|
+
# Evolution times
|
|
397
|
+
times = tc.backend.convert_to_tensor([0.0, 0.5])
|
|
398
|
+
|
|
399
|
+
# Perform Krylov evolution with scan implementation
|
|
400
|
+
states_scan = tc.timeevol.krylov_evol(
|
|
401
|
+
h, psi0, times, subspace_dimension=8, scan_impl=True
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
states_scan_dense = tc.timeevol.krylov_evol(
|
|
405
|
+
tc.backend.to_dense(h), psi0, times, subspace_dimension=8, scan_impl=True
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
# Perform Krylov evolution with regular implementation
|
|
409
|
+
states_regular = tc.timeevol.krylov_evol(
|
|
410
|
+
h, psi0, times, subspace_dimension=8, scan_impl=False
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
# Check output shapes
|
|
414
|
+
assert states_scan.shape == (2, 2**n)
|
|
415
|
+
assert states_regular.shape == (2, 2**n)
|
|
416
|
+
|
|
417
|
+
# Results should be the same (up to numerical precision)
|
|
418
|
+
np.testing.assert_allclose(states_scan, states_regular, atol=1e-5)
|
|
419
|
+
np.testing.assert_allclose(states_scan_dense, states_regular, atol=1e-5)
|
|
420
|
+
|
|
421
|
+
# All states should be normalized
|
|
422
|
+
for state in states_scan:
|
|
423
|
+
norm = tc.backend.norm(state)
|
|
424
|
+
np.testing.assert_allclose(norm, 1.0, atol=1e-5)
|
|
425
|
+
|
|
426
|
+
|
|
427
|
+
@pytest.mark.parametrize("backend", [lf("jaxb")])
|
|
428
|
+
def test_krylov_evol_gradient(backend):
|
|
429
|
+
"""Test gradient computation with krylov_evol"""
|
|
430
|
+
n = 5
|
|
431
|
+
# Create a 1D chain graph
|
|
432
|
+
g = tc.templates.graphs.Line1D(n, pbc=False)
|
|
433
|
+
|
|
434
|
+
# Generate Heisenberg Hamiltonian
|
|
435
|
+
h = tc.quantum.heisenberg_hamiltonian(g, hzz=1.0, hxx=1.0, hyy=1.0, sparse=False)
|
|
436
|
+
|
|
437
|
+
c = tc.Circuit(n)
|
|
438
|
+
c.x([1, 2])
|
|
439
|
+
psi0 = c.state()
|
|
440
|
+
|
|
441
|
+
# Evolution time
|
|
442
|
+
t = tc.backend.convert_to_tensor([1.0])
|
|
443
|
+
|
|
444
|
+
# Define a simple loss function based on the evolved state
|
|
445
|
+
def loss_function(t):
|
|
446
|
+
states = tc.timeevol.krylov_evol(
|
|
447
|
+
h, psi0, t, subspace_dimension=8, scan_impl=True
|
|
448
|
+
)
|
|
449
|
+
# Compute the sum of absolute values of the final state as a simple loss
|
|
450
|
+
return tc.backend.sum(tc.backend.abs(states[0]))
|
|
451
|
+
|
|
452
|
+
grad_fn = tc.backend.jit(tc.backend.grad(loss_function))
|
|
453
|
+
gradient = grad_fn(t)
|
|
454
|
+
print(gradient)
|
|
File without changes
|
|
File without changes
|