tensorcircuit-nightly 1.3.0.dev20250729__py3-none-any.whl → 1.3.0.dev20250731__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tensorcircuit-nightly might be problematic. Click here for more details.

tensorcircuit/timeevol.py CHANGED
@@ -3,14 +3,167 @@ Analog time evolution engines
3
3
  """
4
4
 
5
5
  from typing import Any, Tuple, Optional, Callable, List, Sequence
6
+ from functools import partial
7
+
8
+ import numpy as np
6
9
 
7
10
  from .cons import backend, dtypestr, rdtypestr, contractor
8
11
  from .gates import Gate
12
+ from .utils import arg_alias
9
13
 
10
14
  Tensor = Any
11
15
  Circuit = Any
12
16
 
13
17
 
18
+ def lanczos_iteration_scan(
19
+ hamiltonian: Any, initial_vector: Any, subspace_dimension: int
20
+ ) -> Tuple[Any, Any]:
21
+ """
22
+ Use Lanczos algorithm to construct orthogonal basis and projected Hamiltonian
23
+ of Krylov subspace, using `tc.backend.scan` for JIT compatibility.
24
+
25
+ :param hamiltonian: Sparse or dense Hamiltonian matrix
26
+ :type hamiltonian: Tensor
27
+ :param initial_vector: Initial quantum state vector
28
+ :type initial_vector: Tensor
29
+ :param subspace_dimension: Dimension of Krylov subspace
30
+ :type subspace_dimension: int
31
+ :return: Tuple containing (basis matrix, projected Hamiltonian)
32
+ :rtype: Tuple[Tensor, Tensor]
33
+ """
34
+ state_size = backend.shape_tuple(initial_vector)[0]
35
+ if backend.is_sparse(hamiltonian):
36
+ hamiltonian = backend.sparse_csr_from_coo(hamiltonian)
37
+
38
+ # Main scan body for the outer loop (iterating j)
39
+ def lanczos_step(carry: Tuple[Any, ...], j: int) -> Tuple[Any, ...]:
40
+ v, basis, alphas, betas = carry
41
+
42
+ if backend.is_sparse(hamiltonian):
43
+ w = backend.sparse_dense_matmul(hamiltonian, v)
44
+ else:
45
+ w = backend.matvec(hamiltonian, v)
46
+
47
+ alpha = backend.real(backend.sum(backend.conj(v) * w))
48
+ w = w - backend.cast(alpha, dtypestr) * v
49
+
50
+ # Inner scan for re-orthogonalization (iterating k)
51
+ # def ortho_step(inner_carry: Tuple[Any, Any], k: int) -> Tuple[Any, Any]:
52
+ # w_carry, j_val = inner_carry
53
+
54
+ # def do_projection() -> Any:
55
+ # # `basis` is available here through closure
56
+ # v_k = basis[:, k]
57
+ # projection = backend.sum(backend.conj(v_k) * w_carry)
58
+ # return w_carry - projection * v_k
59
+
60
+ # def do_nothing() -> Any:
61
+ # return w_carry
62
+
63
+ # # Orthogonalize against v_0, ..., v_j
64
+ # w_new = backend.cond(k <= j_val, do_projection, do_nothing)
65
+ # return (w_new, j_val) # Return the new carry for the inner loop
66
+
67
+ # # Pass `j` into the inner scan's carry
68
+ # inner_init_carry = (w, j)
69
+ # final_inner_carry = backend.scan(
70
+ # ortho_step, backend.arange(subspace_dimension), inner_init_carry
71
+ # )
72
+ # w_ortho = final_inner_carry[0]
73
+
74
+ def ortho_step(w_carry: Any, elems_tuple: Tuple[Any, Any]) -> Any:
75
+ k, j_from_elems = elems_tuple
76
+
77
+ def do_projection() -> Any:
78
+ v_k = basis[:, k]
79
+ projection = backend.sum(backend.conj(v_k) * w_carry)
80
+ return w_carry - projection * v_k
81
+
82
+ def do_nothing() -> Any:
83
+ return backend.cast(w_carry, dtype=dtypestr)
84
+
85
+ w_new = backend.cond(k <= j_from_elems, do_projection, do_nothing)
86
+ return w_new
87
+
88
+ k_elems = backend.arange(subspace_dimension)
89
+ j_elems = backend.tile(backend.reshape(j, [1]), [subspace_dimension])
90
+ inner_elems = (k_elems, j_elems)
91
+ w_ortho = backend.scan(ortho_step, inner_elems, w)
92
+
93
+ beta = backend.norm(w_ortho)
94
+ beta = backend.real(beta)
95
+
96
+ # Update alphas and betas arrays
97
+ new_alphas = backend.scatter(
98
+ alphas, backend.reshape(j, [1, 1]), backend.reshape(alpha, [1])
99
+ )
100
+ new_betas = backend.scatter(
101
+ betas, backend.reshape(j, [1, 1]), backend.reshape(beta, [1])
102
+ )
103
+
104
+ def update_state_fn() -> Tuple[Any, Any]:
105
+ epsilon = 1e-15
106
+ next_v = w_ortho / backend.cast(beta + epsilon, dtypestr)
107
+
108
+ one_hot_update = backend.onehot(j + 1, subspace_dimension)
109
+ one_hot_update = backend.cast(one_hot_update, dtype=dtypestr)
110
+
111
+ # Create a mask to update only the (j+1)-th column
112
+ mask = 1.0 - backend.reshape(one_hot_update, [1, subspace_dimension])
113
+ new_basis = basis * mask + backend.reshape(
114
+ next_v, [-1, 1]
115
+ ) * backend.reshape(one_hot_update, [1, subspace_dimension])
116
+
117
+ return next_v, new_basis
118
+
119
+ def keep_state_fn() -> Tuple[Any, Any]:
120
+ return v, basis
121
+
122
+ next_v_carry, new_basis = backend.cond(
123
+ j < subspace_dimension - 1, update_state_fn, keep_state_fn
124
+ )
125
+
126
+ return (next_v_carry, new_basis, new_alphas, new_betas)
127
+
128
+ # Prepare initial state for the main scan
129
+ v0 = initial_vector / backend.norm(initial_vector)
130
+
131
+ init_basis = backend.zeros((state_size, subspace_dimension), dtype=dtypestr)
132
+ init_alphas = backend.zeros((subspace_dimension,), dtype=rdtypestr)
133
+ init_betas = backend.zeros((subspace_dimension,), dtype=rdtypestr)
134
+
135
+ one_hot_0 = backend.onehot(0, subspace_dimension)
136
+ one_hot_0 = backend.cast(one_hot_0, dtype=dtypestr)
137
+ init_basis = init_basis + backend.reshape(v0, [-1, 1]) * backend.reshape(
138
+ one_hot_0, [1, subspace_dimension]
139
+ )
140
+
141
+ init_carry = (v0, init_basis, init_alphas, init_betas)
142
+
143
+ # Run the main scan
144
+ final_carry = backend.scan(
145
+ lanczos_step, backend.arange(subspace_dimension), init_carry
146
+ )
147
+ basis_matrix, alphas_tensor, betas_tensor = (
148
+ final_carry[1],
149
+ final_carry[2],
150
+ final_carry[3],
151
+ )
152
+
153
+ betas_off_diag = betas_tensor[:-1]
154
+
155
+ diag_part = backend.diagflat(alphas_tensor)
156
+ if backend.shape_tuple(betas_off_diag)[0] > 0:
157
+ off_diag_part = backend.diagflat(betas_off_diag, k=1)
158
+ projected_hamiltonian = (
159
+ diag_part + off_diag_part + backend.conj(backend.transpose(off_diag_part))
160
+ )
161
+ else:
162
+ projected_hamiltonian = diag_part
163
+
164
+ return basis_matrix, projected_hamiltonian
165
+
166
+
14
167
  def lanczos_iteration(
15
168
  hamiltonian: Tensor, initial_vector: Tensor, subspace_dimension: int
16
169
  ) -> Tuple[Tensor, Tensor]:
@@ -45,6 +198,9 @@ def lanczos_iteration(
45
198
  # Add first basis vector
46
199
  basis_vectors.append(vector)
47
200
 
201
+ if backend.is_sparse(hamiltonian):
202
+ hamiltonian = backend.sparse_csr_from_coo(hamiltonian)
203
+
48
204
  # Lanczos iteration (fixed number of iterations for JIT compatibility)
49
205
  for j in range(subspace_dimension):
50
206
  # Calculate H|v_j>
@@ -113,9 +269,10 @@ def lanczos_iteration(
113
269
  def krylov_evol(
114
270
  hamiltonian: Tensor,
115
271
  initial_state: Tensor,
116
- time_points: Tensor,
272
+ times: Tensor,
117
273
  subspace_dimension: int,
118
274
  callback: Optional[Callable[[Any], Any]] = None,
275
+ scan_impl: bool = False,
119
276
  ) -> Any:
120
277
  """
121
278
  Perform quantum state time evolution using Krylov subspace method.
@@ -124,21 +281,30 @@ def krylov_evol(
124
281
  :type hamiltonian: Tensor
125
282
  :param initial_state: Initial quantum state
126
283
  :type initial_state: Tensor
127
- :param time_points: List of time points
128
- :type time_points: Tensor
284
+ :param times: List of time points
285
+ :type times: Tensor
129
286
  :param subspace_dimension: Krylov subspace dimension
130
287
  :type subspace_dimension: int
131
288
  :param callback: Optional callback function applied to quantum state at
132
289
  each evolution time point, return some observables
133
290
  :type callback: Optional[Callable[[Any], Any]], optional
291
+ :param scan_impl: whether use scan implementation, suitable for jit but may be slow on numpy
292
+ defaults False, True not work for tensorflow backend + jit, due to stupid issue of tensorflow
293
+ context separation and the notorious inaccesibletensor error
294
+ :type scan_impl: bool, optional
134
295
  :return: List of evolved quantum states, or list of callback function results
135
296
  (if callback provided)
136
297
  :rtype: Any
137
298
  """
138
299
  # TODO(@refraction-ray): stable and efficient AD is to be investigated
139
- basis_matrix, projected_hamiltonian = lanczos_iteration(
140
- hamiltonian, initial_state, subspace_dimension
141
- )
300
+ if not scan_impl:
301
+ basis_matrix, projected_hamiltonian = lanczos_iteration(
302
+ hamiltonian, initial_state, subspace_dimension
303
+ )
304
+ else:
305
+ basis_matrix, projected_hamiltonian = lanczos_iteration_scan(
306
+ hamiltonian, initial_state, subspace_dimension
307
+ )
142
308
  initial_state = backend.cast(initial_state, dtypestr)
143
309
  # Project initial state to Krylov subspace: |psi_proj> = V_m^† |psi(0)>
144
310
  projected_state = backend.matvec(
@@ -148,8 +314,9 @@ def krylov_evol(
148
314
  # Perform spectral decomposition of projected Hamiltonian: T_m = U D U^†
149
315
  eigenvalues, eigenvectors = backend.eigh(projected_hamiltonian)
150
316
  eigenvalues = backend.cast(eigenvalues, dtypestr)
151
- time_points = backend.convert_to_tensor(time_points)
152
- time_points = backend.cast(time_points, dtypestr)
317
+ eigenvectors = backend.cast(eigenvectors, dtypestr)
318
+ times = backend.convert_to_tensor(times)
319
+ times = backend.cast(times, dtypestr)
153
320
 
154
321
  # Transform projected state to eigenbasis: |psi_coeff> = U^† |psi_proj>
155
322
  eigenvectors_projected_state = backend.matvec(
@@ -158,7 +325,7 @@ def krylov_evol(
158
325
 
159
326
  # Calculate exp(-i*projected_H*t) * projected_state
160
327
  results = []
161
- for t in time_points:
328
+ for t in times:
162
329
  # Calculate exp(-i*eigenvalues*t)
163
330
  exp_diagonal = backend.exp(-1j * eigenvalues * t)
164
331
 
@@ -182,22 +349,26 @@ def krylov_evol(
182
349
  return backend.stack(results)
183
350
 
184
351
 
352
+ @partial(
353
+ arg_alias,
354
+ alias_dict={"h": ["hamiltonian"], "psi0": ["initial_state"], "tlist": ["times"]},
355
+ )
185
356
  def hamiltonian_evol(
186
- tlist: Tensor,
187
357
  h: Tensor,
188
358
  psi0: Tensor,
359
+ tlist: Tensor,
189
360
  callback: Optional[Callable[..., Any]] = None,
190
361
  ) -> Tensor:
191
362
  """
192
363
  Fast implementation of time independent Hamiltonian evolution using eigendecomposition.
193
364
  By default, performs imaginary time evolution.
194
365
 
195
- :param tlist: Time points for evolution
196
- :type tlist: Tensor
197
366
  :param h: Time-independent Hamiltonian matrix
198
367
  :type h: Tensor
199
368
  :param psi0: Initial state vector
200
369
  :type psi0: Tensor
370
+ :param tlist: Time points for evolution
371
+ :type tlist: Tensor
201
372
  :param callback: Optional function to process state at each time point
202
373
  :type callback: Optional[Callable[..., Any]], optional
203
374
  :return: Evolution results at each time point. If callback is None, returns state vectors;
@@ -233,9 +404,10 @@ def hamiltonian_evol(
233
404
  psi0 = backend.cast(psi0, dtypestr)
234
405
  es, u = backend.eigh(h)
235
406
  u = backend.cast(u, dtypestr)
236
- utpsi0 = backend.reshape(
237
- backend.transpose(u) @ backend.reshape(psi0, [-1, 1]), [-1]
238
- )
407
+ utpsi0 = backend.convert_to_tensor(
408
+ backend.transpose(u) @ backend.reshape(psi0, [-1, 1])
409
+ ) # in case np.matrix...
410
+ utpsi0 = backend.reshape(utpsi0, [-1])
239
411
  es = backend.cast(es, dtypestr)
240
412
  tlist = backend.cast(backend.convert_to_tensor(tlist), dtypestr)
241
413
 
@@ -255,6 +427,7 @@ def hamiltonian_evol(
255
427
  ed_evol = hamiltonian_evol
256
428
 
257
429
 
430
+ @partial(arg_alias, alias_dict={"h_fun": ["hamiltonian"], "t": ["times"]})
258
431
  def evol_local(
259
432
  c: Circuit,
260
433
  index: Sequence[int],
@@ -279,16 +452,58 @@ def evol_local(
279
452
  :return: _description_
280
453
  :rtype: Circuit
281
454
  """
455
+ s = c.state()
456
+ n = int(np.log2(s.shape[-1]) + 1e-7)
457
+ if isinstance(t, float):
458
+ t = backend.stack([0.0, t])
459
+ s1 = ode_evol_local(h_fun, s, t, index, None, *args, **solver_kws)
460
+ return type(c)(n, inputs=s1[-1])
461
+
462
+
463
+ def ode_evol_local(
464
+ hamiltonian: Callable[..., Tensor],
465
+ initial_state: Tensor,
466
+ times: Tensor,
467
+ index: Sequence[int],
468
+ callback: Optional[Callable[..., Tensor]] = None,
469
+ *args: Any,
470
+ **solver_kws: Any,
471
+ ) -> Tensor:
472
+ """
473
+ ODE-based time evolution for a time-dependent Hamiltonian acting on a subsystem of qubits.
474
+
475
+ This function solves the time-dependent Schrodinger equation using numerical ODE integration.
476
+ The Hamiltonian is applied only to a specific subset of qubits (indices) in the system.
477
+
478
+ Note: This function currently only supports the JAX backend.
479
+
480
+ :param hamiltonian: A function that returns a dense Hamiltonian matrix for the specified
481
+ subsystem size. The function signature should be hamiltonian(time, *args) -> Tensor.
482
+ :type hamiltonian: Callable[..., Tensor]
483
+ :param initial_state: The initial quantum state vector of the full system.
484
+ :type initial_state: Tensor
485
+ :param times: Time points for which to compute the evolution. Should be a 1D array of times.
486
+ :type times: Tensor
487
+ :param index: Indices of qubits where the Hamiltonian is applied.
488
+ :type index: Sequence[int]
489
+ :param callback: Optional function to apply to the state at each time step.
490
+ :type callback: Optional[Callable[..., Tensor]]
491
+ :param args: Additional arguments to pass to the Hamiltonian function.
492
+ :param solver_kws: Additional keyword arguments to pass to the ODE solver.
493
+ :return: Evolved quantum states at the specified time points. If callback is provided,
494
+ returns the callback results; otherwise returns the state vectors.
495
+ :rtype: Tensor
496
+ """
282
497
  from jax.experimental.ode import odeint
283
498
 
284
- s = c.state()
285
- n = c._nqubits
499
+ s = initial_state
500
+ n = int(np.log2(backend.shape_tuple(initial_state)[-1]) + 1e-7)
286
501
  l = len(index)
287
502
 
288
503
  def f(y: Tensor, t: Tensor, *args: Any) -> Tensor:
289
504
  y = backend.reshape2(y)
290
505
  y = Gate(y)
291
- h = -1.0j * h_fun(t, *args)
506
+ h = -1.0j * hamiltonian(t, *args)
292
507
  h = backend.reshape2(h)
293
508
  h = Gate(h)
294
509
  edges = []
@@ -302,15 +517,15 @@ def evol_local(
302
517
  y = contractor([y, h], output_edge_order=edges)
303
518
  return backend.reshape(y.tensor, [-1])
304
519
 
305
- ts = backend.stack([0.0, t])
520
+ ts = backend.convert_to_tensor(times)
306
521
  ts = backend.cast(ts, dtype=rdtypestr)
307
522
  s1 = odeint(f, s, ts, *args, **solver_kws)
308
- return type(c)(n, inputs=s1[-1])
309
-
310
-
311
- ode_evol_local = evol_local
523
+ if not callback:
524
+ return s1
525
+ return backend.stack([callback(s1[i]) for i in range(len(s1))])
312
526
 
313
527
 
528
+ @partial(arg_alias, alias_dict={"h_fun": ["hamiltonian"], "t": ["times"]})
314
529
  def evol_global(
315
530
  c: Circuit, h_fun: Callable[..., Tensor], t: float, *args: Any, **solver_kws: Any
316
531
  ) -> Circuit:
@@ -328,19 +543,269 @@ def evol_global(
328
543
  :return: _description_
329
544
  :rtype: Circuit
330
545
  """
331
- from jax.experimental.ode import odeint
332
-
333
546
  s = c.state()
334
547
  n = c._nqubits
548
+ if isinstance(t, float):
549
+ t = backend.stack([0.0, t])
550
+ s1 = ode_evol_global(h_fun, s, t, None, *args, **solver_kws)
551
+ return type(c)(n, inputs=s1[-1])
552
+
553
+
554
+ def ode_evol_global(
555
+ hamiltonian: Callable[..., Tensor],
556
+ initial_state: Tensor,
557
+ times: Tensor,
558
+ callback: Optional[Callable[..., Tensor]] = None,
559
+ *args: Any,
560
+ **solver_kws: Any,
561
+ ) -> Tensor:
562
+ """
563
+ ODE-based time evolution for a time-dependent Hamiltonian acting on the entire system.
564
+
565
+ This function solves the time-dependent Schrodinger equation using numerical ODE integration.
566
+ The Hamiltonian is applied to the full system and should be provided in sparse matrix format
567
+ for efficiency.
568
+
569
+ Note: This function currently only supports the JAX backend.
570
+
571
+ :param hamiltonian: A function that returns a sparse Hamiltonian matrix for the full system.
572
+ The function signature should be hamiltonian(time, *args) -> Tensor.
573
+ :type hamiltonian: Callable[..., Tensor]
574
+ :param initial_state: The initial quantum state vector.
575
+ :type initial_state: Tensor
576
+ :param times: Time points for which to compute the evolution. Should be a 1D array of times.
577
+ :type times: Tensor
578
+ :param callback: Optional function to apply to the state at each time step.
579
+ :type callback: Optional[Callable[..., Tensor]]
580
+ :param args: Additional arguments to pass to the Hamiltonian function.
581
+ :param solver_kws: Additional keyword arguments to pass to the ODE solver.
582
+ :return: Evolved quantum states at the specified time points. If callback is provided,
583
+ returns the callback results; otherwise returns the state vectors.
584
+ :rtype: Tensor
585
+ """
586
+ from jax.experimental.ode import odeint
587
+
588
+ s = initial_state
589
+ ts = backend.convert_to_tensor(times)
590
+ ts = backend.cast(ts, dtype=rdtypestr)
335
591
 
336
592
  def f(y: Tensor, t: Tensor, *args: Any) -> Tensor:
337
- h = -1.0j * h_fun(t, *args)
593
+ h = -1.0j * hamiltonian(t, *args)
338
594
  return backend.sparse_dense_matmul(h, y)
339
595
 
340
- ts = backend.stack([0.0, t])
341
- ts = backend.cast(ts, dtype=rdtypestr)
342
596
  s1 = odeint(f, s, ts, *args, **solver_kws)
343
- return type(c)(n, inputs=s1[-1])
597
+ if not callback:
598
+ return s1
599
+ return backend.stack([callback(s1[i]) for i in range(len(s1))])
600
+
601
+
602
+ def chebyshev_evol(
603
+ hamiltonian: Any,
604
+ initial_state: Tensor,
605
+ t: float,
606
+ spectral_bounds: Tuple[float, float],
607
+ k: int,
608
+ M: int,
609
+ ) -> Any:
610
+ """
611
+ Chebyshev evolution method by expanding the time evolution exponential operator
612
+ in Chebyshev series.
613
+ Note the state returned is not normalized. But the norm should be very close to 1 for
614
+ sufficiently large k and M, which can serve as a accuracy check of the final result.
615
+
616
+ :param hamiltonian: Hamiltonian matrix (sparse or dense)
617
+ :type hamiltonian: Any
618
+ :param initial_state: Initial state vector
619
+ :type initial_state: Tensor
620
+ :param time: Time to evolve
621
+ :type time: float
622
+ :param spectral_bounds: Spectral bounds for the Hamiltonian (Emax, Emin)
623
+ :type spectral_bounds: Tuple[float, float]
624
+ :param k: Number of Chebyshev coefficients, a good estimate is k > t*(Emax-Emin)/2
625
+ :type k: int
626
+ :param M: Number of iterations to estimate Bessel function, a good estimate is given
627
+ by `estimate_M` helper method.
628
+ :type M: int
629
+ :return: Evolved state
630
+ :rtype: Tensor
631
+ """
632
+ # TODO(@refraction-ray): no support for tf backend as bessel function has no implementation
633
+ E_max, E_min = spectral_bounds
634
+ if E_max <= E_min:
635
+ raise ValueError("E_max must be > E_min.")
636
+
637
+ a = (E_max - E_min) / 2.0
638
+ b = (E_max + E_min) / 2.0
639
+ tau = a * t # Rescaled time parameter
640
+
641
+ if backend.is_sparse(hamiltonian):
642
+ hamiltonian = backend.sparse_csr_from_coo(hamiltonian)
643
+
644
+ def apply_h_norm(psi: Any) -> Any:
645
+ """Applies the normalized Hamiltonian to a state."""
646
+ return ((hamiltonian @ psi) - b * psi) / a
647
+
648
+ # Handle edge case where no evolution is needed.
649
+ if k == 0:
650
+ # The phase factor still applies even for zero evolution of the series part.
651
+ phase = backend.exp(-1j * b * t)
652
+ return phase * backend.zeros_like(initial_state)
653
+
654
+ # --- 2. Calculate Chebyshev Expansion Coefficients ---
655
+ k_indices = backend.arange(k)
656
+ bessel_vals = backend.special_jv(k, tau, M)
657
+
658
+ # Prefactor is 1 for k=0 and 2 for k>0.
659
+ prefactor = backend.ones([k])
660
+ if k > 1:
661
+ # Using concat for backend compatibility (vs. jax's .at[1:].set(2.0))
662
+ prefactor = backend.concat(
663
+ [backend.ones([1]), backend.ones([k - 1]) * 2.0], axis=0
664
+ )
665
+
666
+ ik_powers = backend.power(0 - 1j, k_indices)
667
+ coeffs = prefactor * ik_powers * bessel_vals
668
+
669
+ # --- 3. Iteratively build the result using a scan ---
670
+
671
+ # Handle the simple case of k=1 separately.
672
+ if k == 1:
673
+ psi_unphased = coeffs[0] * initial_state
674
+ else: # k >= 2, use the scan operation.
675
+ # Initialize the first two Chebyshev vectors and the initial sum.
676
+ T0 = initial_state
677
+ T1 = apply_h_norm(T0)
678
+ initial_sum = coeffs[0] * T0 + coeffs[1] * T1
679
+
680
+ # The carry for the scan holds the state needed for the next iteration:
681
+ # (current vector T_k, previous vector T_{k-1}, and the running sum).
682
+ initial_carry = (T1, T0, initial_sum)
683
+
684
+ def scan_body(carry, i): # type: ignore
685
+ """The body of the scan operation."""
686
+ Tk, Tkm1, current_sum = carry
687
+
688
+ # Calculate the next Chebyshev vector using the recurrence relation.
689
+ Tkp1 = 2 * apply_h_norm(Tk) - Tkm1
690
+
691
+ # Add its contribution to the running sum.
692
+ new_sum = current_sum + coeffs[i] * Tkp1
693
+
694
+ # Return the updated carry for the next step. No intermediate output is needed.
695
+ return (Tkp1, Tk, new_sum)
696
+
697
+ # Run the scan over the remaining coefficients (from index 2 to k-1).
698
+ final_carry = backend.scan(scan_body, backend.arange(2, k), initial_carry)
699
+
700
+ # The final result is the sum accumulated in the last carry state.
701
+ psi_unphased = final_carry[2]
702
+
703
+ # --- 4. Final Step: Apply Phase Correction ---
704
+ # This undoes the energy shift from the Hamiltonian normalization.
705
+ phase = backend.exp(-1j * b * t)
706
+ psi_final = phase * psi_unphased
707
+
708
+ return psi_final
709
+
710
+
711
+ def estimate_k(t: float, spectral_bounds: Tuple[float, float]) -> int:
712
+ """
713
+ estimate k for chebyshev expansion
714
+
715
+ :param t: time
716
+ :type t: float
717
+ :param spectral_bounds: spectral bounds (Emax, Emin)
718
+ :type spectral_bounds: Tuple[float, float]
719
+ :return: k
720
+ :rtype: int
721
+ """
722
+ E_max, E_min = spectral_bounds
723
+ a = (E_max - E_min) / 2.0
724
+ tau = a * t # tau is now a scalar
725
+ return max(int(1.1 * tau), int(tau + 20))
726
+
727
+
728
+ def estimate_M(t: float, spectral_bounds: Tuple[float, float], k: int) -> int:
729
+ """
730
+ estimate M for Bessel function iterations
731
+
732
+ :param t: time
733
+ :type t: float
734
+ :param spectral_bounds: spectral bounds (Emax, Emin)
735
+ :type spectral_bounds: Tuple[float, float]
736
+ :param k: k
737
+ :type k: int
738
+ :return: M
739
+ :rtype: int
740
+ """
741
+ E_max, E_min = spectral_bounds
742
+ a = (E_max - E_min) / 2.0
743
+ tau = a * t # tau is now a scalar
744
+ safety_factor = 15
745
+ M = max(k, int(abs(tau))) + int(safety_factor * np.sqrt(abs(tau)))
746
+ M = max(M, k + 30)
747
+ return M
748
+
749
+
750
+ def estimate_spectral_bounds(
751
+ h: Any, n_iter: int = 30, psi0: Optional[Any] = None
752
+ ) -> Tuple[float, float]:
753
+ """
754
+ Lanczos algorithm to estimate the spectral bounds of a Hamiltonian.
755
+ Just for quick run before `chebyshev_evol`, non jit-able.
756
+
757
+ :param h: Hamiltonian matrix.
758
+ :type h: Any
759
+ :param n_iter: iteration number.
760
+ :type n_iter: int
761
+ :param psi0: Optional initial state.
762
+ :type psi0: Optional[Any]
763
+ :return: (E_max, E_min)。
764
+ """
765
+ shape = h.shape
766
+ D = shape[-1]
767
+ if psi0 is None:
768
+ psi0 = np.random.normal(size=[D])
769
+
770
+ psi0 = backend.convert_to_tensor(psi0) / backend.norm(psi0)
771
+ psi0 = backend.cast(psi0, dtypestr)
772
+
773
+ # Lanczos
774
+ alphas = []
775
+ betas = []
776
+ q_prev = backend.zeros(psi0.shape, dtype=psi0.dtype)
777
+ q = psi0
778
+ beta = 0
779
+
780
+ for _ in range(n_iter):
781
+ r = h @ q
782
+ r = backend.convert_to_tensor(r) # in case np.matrix
783
+ r = backend.reshape(r, [-1])
784
+ if beta != 0:
785
+ r -= backend.cast(beta, dtypestr) * q_prev
786
+
787
+ alpha = backend.real(backend.sum(backend.conj(q) * r))
788
+
789
+ alphas.append(alpha)
790
+
791
+ r -= backend.cast(alpha, dtypestr) * q
792
+
793
+ q_prev = q
794
+ beta = backend.norm(r)
795
+ q = r / beta
796
+ beta = backend.abs(beta)
797
+ betas.append(beta)
798
+ if beta < 1e-8:
799
+ break
800
+
801
+ alphas = backend.stack(alphas)
802
+ betas = backend.stack(betas)
803
+ T = (
804
+ backend.diagflat(alphas)
805
+ + backend.diagflat(betas[:-1], k=1)
806
+ + backend.diagflat(betas[:-1], k=-1)
807
+ )
344
808
 
809
+ ritz_values, _ = backend.eigh(T)
345
810
 
346
- ode_evol_global = evol_global
811
+ return backend.max(ritz_values), backend.min(ritz_values)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tensorcircuit-nightly
3
- Version: 1.3.0.dev20250729
3
+ Version: 1.3.0.dev20250731
4
4
  Summary: nightly release for tensorcircuit
5
5
  Home-page: https://github.com/refraction-ray/tensorcircuit-dev
6
6
  Author: TensorCircuit Authors
@@ -70,7 +70,7 @@ TensorCircuit-NG is the actively maintained official version and a [fully compat
70
70
 
71
71
  Please begin with [Quick Start](/docs/source/quickstart.rst) in the [full documentation](https://tensorcircuit-ng.readthedocs.io/).
72
72
 
73
- For more information on software usage, sota algorithm implementation and engineer paradigm demonstration, please refer to 80+ [example scripts](/examples) and 30+ [tutorial notebooks](https://tensorcircuit-ng.readthedocs.io/en/latest/#tutorials). API docstrings and test cases in [tests](/tests) are also informative. One can also refer to AI-native docs for tensorcircuit-ng: [Devin Deepwiki](https://deepwiki.com/tensorcircuit/tensorcircuit-ng) and [Context7 MCP](https://context7.com/tensorcircuit/tensorcircuit-ng).
73
+ For more information on software usage, sota algorithm implementation and engineer paradigm demonstration, please refer to 90+ [example scripts](/examples) and 30+ [tutorial notebooks](https://tensorcircuit-ng.readthedocs.io/en/latest/#tutorials). API docstrings and test cases in [tests](/tests) are also informative. One can also refer to AI-native docs for tensorcircuit-ng: [Devin Deepwiki](https://deepwiki.com/tensorcircuit/tensorcircuit-ng) and [Context7 MCP](https://context7.com/tensorcircuit/tensorcircuit-ng).
74
74
 
75
75
  For beginners, please refer to [quantum computing lectures with TC-NG](https://github.com/sxzgroup/qc_lecture) to learn both quantum computing basics and representative usage of TensorCircuit-NG.
76
76