tensorcircuit-nightly 1.3.0.dev20250730__py3-none-any.whl → 1.3.0.dev20250801__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tensorcircuit-nightly might be problematic. Click here for more details.

tensorcircuit/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "1.3.0.dev20250730"
1
+ __version__ = "1.3.0.dev20250801"
2
2
  __author__ = "TensorCircuit Authors"
3
3
  __creator__ = "refraction-ray"
4
4
 
@@ -808,6 +808,21 @@ class ExtendedBackend:
808
808
  "Backend '{}' has not implemented `solve`.".format(self.name)
809
809
  )
810
810
 
811
+ def special_jv(self: Any, v: int, z: Tensor, M: int) -> Tensor:
812
+ """
813
+ Special function: Bessel function of the first kind.
814
+
815
+ :param v: The order of the Bessel function.
816
+ :type v: int
817
+ :param z: The argument of the Bessel function.
818
+ :type z: Tensor
819
+ :return: The value of the Bessel function [J_0, ...J_{v-1}(z)].
820
+ :rtype: Tensor
821
+ """
822
+ raise NotImplementedError(
823
+ "Backend '{}' has not implemented `special_jv`.".format(self.name)
824
+ )
825
+
811
826
  def searchsorted(self: Any, a: Tensor, v: Tensor, side: str = "left") -> Tensor:
812
827
  """
813
828
  Find indices where elements should be inserted to maintain order.
@@ -1251,6 +1266,26 @@ class ExtendedBackend:
1251
1266
  "Backend '{}' has not implemented `sparse_dense_matmul`.".format(self.name)
1252
1267
  )
1253
1268
 
1269
+ def sparse_csr_from_coo(self: Any, coo: Tensor, strict: bool = False) -> Tensor:
1270
+ """
1271
+ transform a coo matrix to a csr matrix
1272
+
1273
+ :param coo: a coo matrix
1274
+ :type coo: Tensor
1275
+ :param strict: whether to enforce the transform, defaults to False,
1276
+ corresponding to return the coo matrix if there is no implementation for specific backend.
1277
+ :type strict: bool, optional
1278
+ :return: a csr matrix
1279
+ :rtype: Tensor
1280
+ """
1281
+ if strict:
1282
+ raise NotImplementedError(
1283
+ "Backend '{}' has not implemented `sparse_csr_from_coo`.".format(
1284
+ self.name
1285
+ )
1286
+ )
1287
+ return coo
1288
+
1254
1289
  def to_dense(self: Any, sp_a: Tensor) -> Tensor:
1255
1290
  """
1256
1291
  Convert a sparse matrix to dense tensor.
@@ -1400,10 +1435,37 @@ class ExtendedBackend:
1400
1435
  carry = f(carry, x)
1401
1436
 
1402
1437
  return carry
1403
- # carry = init
1404
- # for x in xs:
1405
- # carry = f(carry, x)
1406
- # return carry
1438
+
1439
+ def jaxy_scan(
1440
+ self: Any, f: Callable[[Tensor, Tensor], Tensor], init: Tensor, xs: Tensor
1441
+ ) -> Tensor:
1442
+ """
1443
+ This API follows jax scan style. TF use plain for loop
1444
+
1445
+ :param f: _description_
1446
+ :type f: Callable[[Tensor, Tensor], Tensor]
1447
+ :param init: _description_
1448
+ :type init: Tensor
1449
+ :param xs: _description_
1450
+ :type xs: Tensor
1451
+ :raises ValueError: _description_
1452
+ :return: _description_
1453
+ :rtype: Tensor
1454
+ """
1455
+ if xs is None:
1456
+ raise ValueError("Either xs or length must be provided.")
1457
+ if xs is not None:
1458
+ length = len(xs)
1459
+ carry, outputs_to_stack = init, []
1460
+ for i in range(length):
1461
+ if isinstance(xs, (tuple, list)):
1462
+ x = [ele[i] for ele in xs]
1463
+ else:
1464
+ x = xs[i]
1465
+ new_carry, y = f(carry, x)
1466
+ carry = new_carry
1467
+ outputs_to_stack.append(y)
1468
+ return carry, self.stack(outputs_to_stack)
1407
1469
 
1408
1470
  def stop_gradient(self: Any, a: Tensor) -> Tensor:
1409
1471
  """
@@ -418,6 +418,11 @@ class JaxBackend(jax_backend.JaxBackend, ExtendedBackend): # type: ignore
418
418
  def solve(self, A: Tensor, b: Tensor, assume_a: str = "gen") -> Tensor: # type: ignore
419
419
  return jsp.linalg.solve(A, b, assume_a=assume_a)
420
420
 
421
+ def special_jv(self, v: int, z: Tensor, M: int) -> Tensor:
422
+ from .jax_ops import bessel_jv_jax_rescaled
423
+
424
+ return bessel_jv_jax_rescaled(v, z, M)
425
+
421
426
  def searchsorted(self, a: Tensor, v: Tensor, side: str = "left") -> Tensor:
422
427
  if not self.is_tensor(a):
423
428
  a = self.convert_to_tensor(a)
@@ -615,6 +620,11 @@ class JaxBackend(jax_backend.JaxBackend, ExtendedBackend): # type: ignore
615
620
  carry, _ = libjax.lax.scan(f_jax, init, xs)
616
621
  return carry
617
622
 
623
+ def jaxy_scan(
624
+ self, f: Callable[[Tensor, Tensor], Tensor], init: Tensor, xs: Tensor
625
+ ) -> Tensor:
626
+ return libjax.lax.scan(f, init, xs)
627
+
618
628
  def scatter(self, operand: Tensor, indices: Tensor, updates: Tensor) -> Tensor:
619
629
  # updates = jnp.reshape(updates, indices.shape)
620
630
  # return operand.at[indices].set(updates)
@@ -639,11 +649,20 @@ class JaxBackend(jax_backend.JaxBackend, ExtendedBackend): # type: ignore
639
649
  ) -> Tensor:
640
650
  return sp_a @ b
641
651
 
652
+ def sparse_csr_from_coo(self, coo: Tensor, strict: bool = False) -> Tensor:
653
+ try:
654
+ return sparse.BCSR.from_bcoo(coo) # type: ignore
655
+ except AttributeError as e:
656
+ if not strict:
657
+ return coo
658
+ else:
659
+ raise e
660
+
642
661
  def to_dense(self, sp_a: Tensor) -> Tensor:
643
662
  return sp_a.todense()
644
663
 
645
664
  def is_sparse(self, a: Tensor) -> bool:
646
- return isinstance(a, sparse.BCOO) # type: ignore
665
+ return isinstance(a, sparse.JAXSparse) # type: ignore
647
666
 
648
667
  def device(self, a: Tensor) -> str:
649
668
  (dev,) = a.devices()
@@ -3,8 +3,11 @@ Customized ops for ML framework
3
3
  """
4
4
 
5
5
  # pylint: disable=invalid-name
6
+ # pylint: disable=unused-variable
7
+
6
8
 
7
9
  from typing import Any, Tuple, Sequence
10
+ from functools import partial
8
11
 
9
12
  import jax
10
13
  import jax.numpy as jnp
@@ -174,3 +177,108 @@ def jaxeigh_bwd(r: Array, tangents: Array) -> Array:
174
177
 
175
178
  adaware_eigh.defvjp(jaxeigh_fwd, jaxeigh_bwd)
176
179
  adaware_eigh_jit = jax.jit(adaware_eigh)
180
+
181
+
182
+ @partial(jax.jit, static_argnums=[0, 2])
183
+ def bessel_jv_jax_rescaled(k: int, x: jnp.ndarray, M: int) -> jnp.ndarray:
184
+ """
185
+ Computes Bessel function Jv using Miller's algorithm with dynamic rescaling,
186
+ implemented in JAX.
187
+ """
188
+ if M <= k:
189
+ raise ValueError(
190
+ f"Recurrence length M ({M}) must be greater than the required order k ({k})."
191
+ )
192
+
193
+ # Use vmap to handle array inputs for x efficiently.
194
+ # We map _bessel_jv_scalar_rescaled over the last dimension of x.
195
+ return _bessel_jv_scalar_rescaled(k, M, x)
196
+
197
+
198
+ def _bessel_jv_scalar_rescaled(k: int, M: int, x_val: jnp.ndarray) -> jnp.ndarray:
199
+ """
200
+ JAX implementation for a scalar input x_val.
201
+ This function will be vmapped for array inputs.
202
+ """
203
+ rescale_threshold = 1e250
204
+
205
+ # Define the body of the recurrence loop
206
+ def recurrence_body(i, state): # type: ignore
207
+ # M - i is the current 'm' value in the original loop.
208
+ # Loop from M down to 1. jax.lax.fori_loop goes from lower to upper-1.
209
+ # So for m from M down to 1, we map i from 0 to M-1.
210
+ # Current m_val = M - i
211
+ # The loop range for m in numpy was `range(M, 0, -1)`, which means m goes from M, M-1, ..., 1.
212
+ # For lax.fori_loop (start, stop, body_fn, init_val), start is inclusive, stop is exclusive.
213
+ # So to iterate M times for m from M down to 1, we do i from 0 to M-1.
214
+ # m_val = M - i means that for i=0, m_val=M; for i=M-1, m_val=1.
215
+ m_val = M - i
216
+ f_m, f_m_p1, f_vals = state
217
+
218
+ # If x_val is near zero, this division could be an issue,
219
+ # but the outer lax.cond handles the x_val near zero case before this loop runs.
220
+ f_m_m1 = (2.0 * m_val / x_val) * f_m - f_m_p1
221
+
222
+ # --- Rescaling Step ---
223
+ # jax.lax.cond requires all branches to return the exact same type and shape.
224
+ def rescale_branch(vals): # type: ignore
225
+ f_m_val, f_m_p1_val, f_vals_arr = vals
226
+ scale_factor = f_m_m1
227
+ # Return new f_m, f_m_p1, updated f_vals_arr, and the new f_m_m1 value (which is 1.0)
228
+ return (
229
+ f_m_val / scale_factor,
230
+ f_m_p1_val / scale_factor,
231
+ f_vals_arr / scale_factor,
232
+ 1.0,
233
+ )
234
+
235
+ def no_rescale_branch(vals): # type: ignore
236
+ f_m_val, f_m_p1_val, f_vals_arr = (
237
+ vals # Unpack to keep signatures consistent
238
+ )
239
+ # Return original f_m, f_m_p1, original f_vals_arr, and the computed f_m_m1
240
+ return (f_m_val, f_m_p1_val, f_vals_arr, f_m_m1)
241
+
242
+ f_m_rescaled, f_m_p1_rescaled, f_vals_rescaled, f_m_m1_effective = jax.lax.cond(
243
+ jnp.abs(f_m_m1) > rescale_threshold,
244
+ rescale_branch,
245
+ no_rescale_branch,
246
+ (f_m, f_m_p1, f_vals), # Arguments passed to branches
247
+ )
248
+
249
+ # Update f_vals at index m_val - 1. JAX uses .at[idx].set(val) for non-in-place updates.
250
+ f_vals_updated = f_vals_rescaled.at[m_val - 1].set(f_m_m1_effective)
251
+
252
+ # Return new state for the next iteration: (new f_m, new f_m_p1, updated f_vals)
253
+ return (f_m_m1_effective, f_m_rescaled, f_vals_updated)
254
+
255
+ # Initial state for the recurrence loop
256
+ f_m_p1_init = 0.0
257
+ f_m_init = 1e-30 # Start with a very small number
258
+ f_vals_init = jnp.zeros(M + 1).at[M].set(f_m_init)
259
+
260
+ # Use jax.lax.fori_loop for the backward recurrence
261
+ # Loop from i = 0 to M-1 (total M iterations)
262
+ # The 'body' function gets current 'i' and 'state', returns 'new_state'.
263
+ # We don't need the final f_m_p1, only f_m and f_vals.
264
+ final_f_m, _, f_vals = jax.lax.fori_loop(
265
+ 0, M, recurrence_body, (f_m_init, f_m_p1_init, f_vals_init)
266
+ )
267
+
268
+ # Normalization using Neumann's sum rule
269
+ even_sum = jnp.sum(f_vals[2::2])
270
+ norm_const = f_vals[0] + 2.0 * even_sum
271
+
272
+ # Handle division by near-zero normalization constant
273
+ norm_const_safe = jnp.where(jnp.abs(norm_const) < 1e-12, 1e-12, norm_const)
274
+
275
+ # Conditional logic for x_val close to zero
276
+ def x_is_zero_case() -> jnp.ndarray:
277
+ # For x=0, J_0(0)=1, J_k(0)=0 for k>0
278
+ return jnp.zeros(k).at[0].set(1.0)
279
+
280
+ def x_is_not_zero_case() -> jnp.ndarray:
281
+ return f_vals[:k] / norm_const_safe # type: ignore
282
+
283
+ # Use lax.cond to select between the two cases based on x_val
284
+ return jax.lax.cond(jnp.abs(x_val) < 1e-12, x_is_zero_case, x_is_not_zero_case) # type: ignore
@@ -17,7 +17,7 @@ except ImportError: # np2.0 compatibility
17
17
 
18
18
  import tensornetwork
19
19
  from scipy.linalg import expm, solve, schur
20
- from scipy.special import softmax, expit
20
+ from scipy.special import softmax, expit, jv
21
21
  from scipy.sparse import coo_matrix, issparse
22
22
  from tensornetwork.backends.numpy import numpy_backend
23
23
  from .abstract_backend import ExtendedBackend
@@ -245,6 +245,9 @@ class NumpyBackend(numpy_backend.NumPyBackend, ExtendedBackend): # type: ignore
245
245
  # https://stackoverflow.com/questions/44672029/difference-between-numpy-linalg-solve-and-numpy-linalg-lu-solve/44710451
246
246
  return solve(A, b, assume_a=assume_a)
247
247
 
248
+ def special_jv(self, v: int, z: Tensor, M: int) -> Tensor:
249
+ return jv(np.arange(v), z)
250
+
248
251
  def searchsorted(self, a: Tensor, v: Tensor, side: str = "left") -> Tensor:
249
252
  return np.searchsorted(a, v, side=side) # type: ignore
250
253
 
@@ -330,6 +333,9 @@ class NumpyBackend(numpy_backend.NumPyBackend, ExtendedBackend): # type: ignore
330
333
  ) -> Tensor:
331
334
  return sp_a @ b
332
335
 
336
+ def sparse_csr_from_coo(self, coo: Tensor, strict: bool = False) -> Tensor:
337
+ return coo.tocsr()
338
+
333
339
  def to_dense(self, sp_a: Tensor) -> Tensor:
334
340
  return sp_a.todense()
335
341
 
@@ -360,6 +360,38 @@ tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.rq = _rq_
360
360
  tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd = _svd_tf
361
361
 
362
362
 
363
+ def sparse_tensor_matmul(self: Tensor, other: Tensor) -> Tensor:
364
+ """
365
+ An implementation of matrix multiplication (@) for tf.SparseTensor.
366
+
367
+ This function is designed to be monkey-patched onto the tf.SparseTensor class.
368
+ It handles multiplication with a dense vector (rank-1 Tensor) by temporarily
369
+ promoting it to a matrix (rank-2 Tensor) for the underlying TensorFlow call.
370
+ """
371
+ # Ensure the 'other' tensor is of a compatible dtype
372
+ if not other.dtype.is_compatible_with(self.dtype):
373
+ other = tf.cast(other, self.dtype)
374
+
375
+ # tf.sparse.sparse_dense_matmul requires the dense tensor to be a 2D matrix.
376
+ # If we get a 1D vector, we need to reshape it.
377
+ is_vector = len(other.shape) == 1
378
+
379
+ if is_vector:
380
+ # Promote the vector to a column matrix [N] -> [N, 1]
381
+ other_matrix = tf.expand_dims(other, axis=1)
382
+ else:
383
+ other_matrix = other
384
+
385
+ # Perform the actual multiplication
386
+ result_matrix = tf.sparse.sparse_dense_matmul(self, other_matrix)
387
+
388
+ if is_vector:
389
+ # Demote the result matrix back to a vector [M, 1] -> [M]
390
+ return tf.squeeze(result_matrix, axis=1)
391
+ else:
392
+ return result_matrix
393
+
394
+
363
395
  class TensorFlowBackend(tensorflow_backend.TensorFlowBackend, ExtendedBackend): # type: ignore
364
396
  """
365
397
  See the original backend API at `tensorflow backend
@@ -378,6 +410,8 @@ class TensorFlowBackend(tensorflow_backend.TensorFlowBackend, ExtendedBackend):
378
410
  )
379
411
  tf = tensorflow
380
412
  tf.sparse.SparseTensor.__add__ = tf.sparse.add
413
+ tf.SparseTensor.__matmul__ = sparse_tensor_matmul
414
+
381
415
  self.minor = int(tf.__version__.split(".")[1])
382
416
  self.name = "tensorflow"
383
417
  logger = tf.get_logger() # .setLevel('ERROR')
tensorcircuit/fgs.py CHANGED
@@ -28,7 +28,6 @@ def onehot_matrix(i: int, j: int, N: int) -> Tensor:
28
28
 
29
29
  # TODO(@refraction-ray): efficiency benchmark with jit
30
30
  # TODO(@refraction-ray): FGS mixed state support?
31
- # TODO(@refraction-ray): overlap?
32
31
  # TODO(@refraction-ray): fermionic logarithmic negativity
33
32
 
34
33
 
@@ -227,7 +226,7 @@ class FGSSimulator:
227
226
  return self.alpha
228
227
 
229
228
  def get_cmatrix(self, now_i: bool = True, now_j: bool = True) -> Tensor:
230
- """
229
+ r"""
231
230
  Calculates the correlation matrix.
232
231
 
233
232
  The correlation matrix is defined as :math:`C_{ij} = \langle c_i^\dagger c_j \rangle`.
@@ -509,7 +508,7 @@ class FGSSimulator:
509
508
 
510
509
  @staticmethod
511
510
  def hopping(chi: Tensor, i: int, j: int, L: int) -> Tensor:
512
- """
511
+ r"""
513
512
  Constructs the hopping Hamiltonian between two sites.
514
513
 
515
514
  The hopping Hamiltonian is given by :math:`\chi c_i^\dagger c_j + h.c.`.
@@ -550,7 +549,7 @@ class FGSSimulator:
550
549
 
551
550
  @staticmethod
552
551
  def chemical_potential(chi: Tensor, i: int, L: int) -> Tensor:
553
- """
552
+ r"""
554
553
  Constructs the chemical potential Hamiltonian for a single site.
555
554
 
556
555
  The chemical potential Hamiltonian is given by :math:`\chi c_i^\dagger c_i`.
@@ -572,7 +571,7 @@ class FGSSimulator:
572
571
 
573
572
  @staticmethod
574
573
  def sc_pairing(chi: Tensor, i: int, j: int, L: int) -> Tensor:
575
- """
574
+ r"""
576
575
  Constructs the superconducting pairing Hamiltonian between two sites.
577
576
 
578
577
  The superconducting pairing Hamiltonian is given by :math:`\chi c_i^\dagger c_j^\dagger + h.c.`.
@@ -637,7 +636,7 @@ class FGSSimulator:
637
636
  self.evol_ihamiltonian(self.chemical_potential(chi, i, self.L))
638
637
 
639
638
  def get_bogoliubov_uv(self) -> Tuple[Tensor, Tensor]:
640
- """
639
+ r"""
641
640
  Returns the u and v matrices of the Bogoliubov transformation.
642
641
 
643
642
  The Bogoliubov transformation is defined as:
tensorcircuit/quantum.py CHANGED
@@ -1433,7 +1433,7 @@ def PauliStringSum2Dense(
1433
1433
  return sparsem.todense()
1434
1434
  sparsem = backend.coo_sparse_matrix_from_numpy(sparsem)
1435
1435
  densem = backend.to_dense(sparsem)
1436
- return densem
1436
+ return backend.convert_to_tensor(densem)
1437
1437
 
1438
1438
 
1439
1439
  # already implemented as backend method
@@ -96,10 +96,12 @@ class StabilizerCircuit(AbstractCircuit):
96
96
 
97
97
  if name.lower() in self.gate_map:
98
98
  # self._stim_circuit.append(gate_map[name.lower()], list(index))
99
- instruction = f"{self.gate_map[name.lower()]} {' '.join(map(str, index))}"
99
+ gn = self.gate_map[name.lower()]
100
+ instruction = f"{gn} {' '.join(map(str, index))}"
100
101
  self._stim_circuit.append_from_stim_program_text(instruction)
101
102
  # append is much slower
102
- self.current_sim.do(stim.Circuit(instruction))
103
+ # self.current_sim.do(stim.Circuit(instruction))
104
+ getattr(self.current_sim, gn.lower())(*index)
103
105
  else:
104
106
  raise ValueError(f"Gate {name} is not supported in stabilizer simulation")
105
107
 
@@ -15,6 +15,7 @@ from typing import (
15
15
  Union,
16
16
  TYPE_CHECKING,
17
17
  cast,
18
+ Set,
18
19
  )
19
20
 
20
21
  logger = logging.getLogger(__name__)
@@ -1446,3 +1447,54 @@ class CustomizeLattice(AbstractLattice):
1446
1447
  logger.info(
1447
1448
  f"{len(ids_to_remove)} sites removed. Lattice now has {self.num_sites} sites."
1448
1449
  )
1450
+
1451
+
1452
+ def get_compatible_layers(bonds: List[Tuple[int, int]]) -> List[List[Tuple[int, int]]]:
1453
+ """
1454
+ Partitions a list of pairs (bonds) into compatible layers for parallel
1455
+ gate application using a greedy edge-coloring algorithm.
1456
+
1457
+ This function takes a list of pairs, representing connections like
1458
+ nearest-neighbor (NN) or next-nearest-neighbor (NNN) bonds, and
1459
+ partitions them into the minimum number of sets ("layers") where no two
1460
+ pairs in a set share an index. This is a general utility for scheduling
1461
+ non-overlapping operations.
1462
+
1463
+ :Example:
1464
+
1465
+ >>> from tensorcircuit.templates.lattice import SquareLattice
1466
+ >>> sq_lattice = SquareLattice(size=(2, 2), pbc=False)
1467
+ >>> nn_bonds = sq_lattice.get_neighbor_pairs(k=1, unique=True)
1468
+
1469
+ >>> gate_layers = get_compatible_layers(nn_bonds)
1470
+ >>> print(gate_layers)
1471
+ [[[0, 1], [2, 3]], [[0, 2], [1, 3]]]
1472
+
1473
+ :param bonds: A list of tuples, where each tuple represents a bond (i, j)
1474
+ of site indices to be scheduled.
1475
+ :type bonds: List[Tuple[int, int]]
1476
+ :return: A list of layers. Each layer is a list of tuples, where each
1477
+ tuple represents a bond. All bonds within a layer are non-overlapping.
1478
+ :rtype: List[List[Tuple[int, int]]]
1479
+ """
1480
+ uncolored_edges: Set[Tuple[int, int]] = {(min(bond), max(bond)) for bond in bonds}
1481
+
1482
+ layers: List[List[Tuple[int, int]]] = []
1483
+
1484
+ while uncolored_edges:
1485
+ current_layer: List[Tuple[int, int]] = []
1486
+ qubits_in_this_layer: Set[int] = set()
1487
+
1488
+ edges_to_process = sorted(list(uncolored_edges))
1489
+
1490
+ for edge in edges_to_process:
1491
+ i, j = edge
1492
+ if i not in qubits_in_this_layer and j not in qubits_in_this_layer:
1493
+ current_layer.append(edge)
1494
+ qubits_in_this_layer.add(i)
1495
+ qubits_in_this_layer.add(j)
1496
+
1497
+ uncolored_edges -= set(current_layer)
1498
+ layers.append(sorted(current_layer))
1499
+
1500
+ return layers
tensorcircuit/timeevol.py CHANGED
@@ -32,6 +32,8 @@ def lanczos_iteration_scan(
32
32
  :rtype: Tuple[Tensor, Tensor]
33
33
  """
34
34
  state_size = backend.shape_tuple(initial_vector)[0]
35
+ if backend.is_sparse(hamiltonian):
36
+ hamiltonian = backend.sparse_csr_from_coo(hamiltonian)
35
37
 
36
38
  # Main scan body for the outer loop (iterating j)
37
39
  def lanczos_step(carry: Tuple[Any, ...], j: int) -> Tuple[Any, ...]:
@@ -196,6 +198,9 @@ def lanczos_iteration(
196
198
  # Add first basis vector
197
199
  basis_vectors.append(vector)
198
200
 
201
+ if backend.is_sparse(hamiltonian):
202
+ hamiltonian = backend.sparse_csr_from_coo(hamiltonian)
203
+
199
204
  # Lanczos iteration (fixed number of iterations for JIT compatibility)
200
205
  for j in range(subspace_dimension):
201
206
  # Calculate H|v_j>
@@ -592,3 +597,215 @@ def ode_evol_global(
592
597
  if not callback:
593
598
  return s1
594
599
  return backend.stack([callback(s1[i]) for i in range(len(s1))])
600
+
601
+
602
+ def chebyshev_evol(
603
+ hamiltonian: Any,
604
+ initial_state: Tensor,
605
+ t: float,
606
+ spectral_bounds: Tuple[float, float],
607
+ k: int,
608
+ M: int,
609
+ ) -> Any:
610
+ """
611
+ Chebyshev evolution method by expanding the time evolution exponential operator
612
+ in Chebyshev series.
613
+ Note the state returned is not normalized. But the norm should be very close to 1 for
614
+ sufficiently large k and M, which can serve as a accuracy check of the final result.
615
+
616
+ :param hamiltonian: Hamiltonian matrix (sparse or dense)
617
+ :type hamiltonian: Any
618
+ :param initial_state: Initial state vector
619
+ :type initial_state: Tensor
620
+ :param time: Time to evolve
621
+ :type time: float
622
+ :param spectral_bounds: Spectral bounds for the Hamiltonian (Emax, Emin)
623
+ :type spectral_bounds: Tuple[float, float]
624
+ :param k: Number of Chebyshev coefficients, a good estimate is k > t*(Emax-Emin)/2
625
+ :type k: int
626
+ :param M: Number of iterations to estimate Bessel function, a good estimate is given
627
+ by `estimate_M` helper method.
628
+ :type M: int
629
+ :return: Evolved state
630
+ :rtype: Tensor
631
+ """
632
+ # TODO(@refraction-ray): no support for tf backend as bessel function has no implementation
633
+ E_max, E_min = spectral_bounds
634
+ if E_max <= E_min:
635
+ raise ValueError("E_max must be > E_min.")
636
+
637
+ a = (E_max - E_min) / 2.0
638
+ b = (E_max + E_min) / 2.0
639
+ tau = a * t # Rescaled time parameter
640
+
641
+ if backend.is_sparse(hamiltonian):
642
+ hamiltonian = backend.sparse_csr_from_coo(hamiltonian)
643
+
644
+ def apply_h_norm(psi: Any) -> Any:
645
+ """Applies the normalized Hamiltonian to a state."""
646
+ return ((hamiltonian @ psi) - b * psi) / a
647
+
648
+ # Handle edge case where no evolution is needed.
649
+ if k == 0:
650
+ # The phase factor still applies even for zero evolution of the series part.
651
+ phase = backend.exp(-1j * b * t)
652
+ return phase * backend.zeros_like(initial_state)
653
+
654
+ # --- 2. Calculate Chebyshev Expansion Coefficients ---
655
+ k_indices = backend.arange(k)
656
+ bessel_vals = backend.special_jv(k, tau, M)
657
+
658
+ # Prefactor is 1 for k=0 and 2 for k>0.
659
+ prefactor = backend.ones([k])
660
+ if k > 1:
661
+ # Using concat for backend compatibility (vs. jax's .at[1:].set(2.0))
662
+ prefactor = backend.concat(
663
+ [backend.ones([1]), backend.ones([k - 1]) * 2.0], axis=0
664
+ )
665
+
666
+ ik_powers = backend.power(0 - 1j, k_indices)
667
+ coeffs = prefactor * ik_powers * bessel_vals
668
+
669
+ # --- 3. Iteratively build the result using a scan ---
670
+
671
+ # Handle the simple case of k=1 separately.
672
+ if k == 1:
673
+ psi_unphased = coeffs[0] * initial_state
674
+ else: # k >= 2, use the scan operation.
675
+ # Initialize the first two Chebyshev vectors and the initial sum.
676
+ T0 = initial_state
677
+ T1 = apply_h_norm(T0)
678
+ initial_sum = coeffs[0] * T0 + coeffs[1] * T1
679
+
680
+ # The carry for the scan holds the state needed for the next iteration:
681
+ # (current vector T_k, previous vector T_{k-1}, and the running sum).
682
+ initial_carry = (T1, T0, initial_sum)
683
+
684
+ def scan_body(carry, i): # type: ignore
685
+ """The body of the scan operation."""
686
+ Tk, Tkm1, current_sum = carry
687
+
688
+ # Calculate the next Chebyshev vector using the recurrence relation.
689
+ Tkp1 = 2 * apply_h_norm(Tk) - Tkm1
690
+
691
+ # Add its contribution to the running sum.
692
+ new_sum = current_sum + coeffs[i] * Tkp1
693
+
694
+ # Return the updated carry for the next step. No intermediate output is needed.
695
+ return (Tkp1, Tk, new_sum)
696
+
697
+ # Run the scan over the remaining coefficients (from index 2 to k-1).
698
+ final_carry = backend.scan(scan_body, backend.arange(2, k), initial_carry)
699
+
700
+ # The final result is the sum accumulated in the last carry state.
701
+ psi_unphased = final_carry[2]
702
+
703
+ # --- 4. Final Step: Apply Phase Correction ---
704
+ # This undoes the energy shift from the Hamiltonian normalization.
705
+ phase = backend.exp(-1j * b * t)
706
+ psi_final = phase * psi_unphased
707
+
708
+ return psi_final
709
+
710
+
711
+ def estimate_k(t: float, spectral_bounds: Tuple[float, float]) -> int:
712
+ """
713
+ estimate k for chebyshev expansion
714
+
715
+ :param t: time
716
+ :type t: float
717
+ :param spectral_bounds: spectral bounds (Emax, Emin)
718
+ :type spectral_bounds: Tuple[float, float]
719
+ :return: k
720
+ :rtype: int
721
+ """
722
+ E_max, E_min = spectral_bounds
723
+ a = (E_max - E_min) / 2.0
724
+ tau = a * t # tau is now a scalar
725
+ return max(int(1.1 * tau), int(tau + 20))
726
+
727
+
728
+ def estimate_M(t: float, spectral_bounds: Tuple[float, float], k: int) -> int:
729
+ """
730
+ estimate M for Bessel function iterations
731
+
732
+ :param t: time
733
+ :type t: float
734
+ :param spectral_bounds: spectral bounds (Emax, Emin)
735
+ :type spectral_bounds: Tuple[float, float]
736
+ :param k: k
737
+ :type k: int
738
+ :return: M
739
+ :rtype: int
740
+ """
741
+ E_max, E_min = spectral_bounds
742
+ a = (E_max - E_min) / 2.0
743
+ tau = a * t # tau is now a scalar
744
+ safety_factor = 15
745
+ M = max(k, int(abs(tau))) + int(safety_factor * np.sqrt(abs(tau)))
746
+ M = max(M, k + 30)
747
+ return M
748
+
749
+
750
+ def estimate_spectral_bounds(
751
+ h: Any, n_iter: int = 30, psi0: Optional[Any] = None
752
+ ) -> Tuple[float, float]:
753
+ """
754
+ Lanczos algorithm to estimate the spectral bounds of a Hamiltonian.
755
+ Just for quick run before `chebyshev_evol`, non jit-able.
756
+
757
+ :param h: Hamiltonian matrix.
758
+ :type h: Any
759
+ :param n_iter: iteration number.
760
+ :type n_iter: int
761
+ :param psi0: Optional initial state.
762
+ :type psi0: Optional[Any]
763
+ :return: (E_max, E_min)。
764
+ """
765
+ shape = h.shape
766
+ D = shape[-1]
767
+ if psi0 is None:
768
+ psi0 = np.random.normal(size=[D])
769
+
770
+ psi0 = backend.convert_to_tensor(psi0) / backend.norm(psi0)
771
+ psi0 = backend.cast(psi0, dtypestr)
772
+
773
+ # Lanczos
774
+ alphas = []
775
+ betas = []
776
+ q_prev = backend.zeros(psi0.shape, dtype=psi0.dtype)
777
+ q = psi0
778
+ beta = 0
779
+
780
+ for _ in range(n_iter):
781
+ r = h @ q
782
+ r = backend.convert_to_tensor(r) # in case np.matrix
783
+ r = backend.reshape(r, [-1])
784
+ if beta != 0:
785
+ r -= backend.cast(beta, dtypestr) * q_prev
786
+
787
+ alpha = backend.real(backend.sum(backend.conj(q) * r))
788
+
789
+ alphas.append(alpha)
790
+
791
+ r -= backend.cast(alpha, dtypestr) * q
792
+
793
+ q_prev = q
794
+ beta = backend.norm(r)
795
+ q = r / beta
796
+ beta = backend.abs(beta)
797
+ betas.append(beta)
798
+ if beta < 1e-8:
799
+ break
800
+
801
+ alphas = backend.stack(alphas)
802
+ betas = backend.stack(betas)
803
+ T = (
804
+ backend.diagflat(alphas)
805
+ + backend.diagflat(betas[:-1], k=1)
806
+ + backend.diagflat(betas[:-1], k=-1)
807
+ )
808
+
809
+ ritz_values, _ = backend.eigh(T)
810
+
811
+ return backend.max(ritz_values), backend.min(ritz_values)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tensorcircuit-nightly
3
- Version: 1.3.0.dev20250730
3
+ Version: 1.3.0.dev20250801
4
4
  Summary: nightly release for tensorcircuit
5
5
  Home-page: https://github.com/refraction-ray/tensorcircuit-dev
6
6
  Author: TensorCircuit Authors
@@ -70,7 +70,7 @@ TensorCircuit-NG is the actively maintained official version and a [fully compat
70
70
 
71
71
  Please begin with [Quick Start](/docs/source/quickstart.rst) in the [full documentation](https://tensorcircuit-ng.readthedocs.io/).
72
72
 
73
- For more information on software usage, sota algorithm implementation and engineer paradigm demonstration, please refer to 80+ [example scripts](/examples) and 30+ [tutorial notebooks](https://tensorcircuit-ng.readthedocs.io/en/latest/#tutorials). API docstrings and test cases in [tests](/tests) are also informative. One can also refer to AI-native docs for tensorcircuit-ng: [Devin Deepwiki](https://deepwiki.com/tensorcircuit/tensorcircuit-ng) and [Context7 MCP](https://context7.com/tensorcircuit/tensorcircuit-ng).
73
+ For more information on software usage, sota algorithm implementation and engineer paradigm demonstration, please refer to 90+ [example scripts](/examples) and 30+ [tutorial notebooks](https://tensorcircuit-ng.readthedocs.io/en/latest/#tutorials). API docstrings and test cases in [tests](/tests) are also informative. One can also refer to AI-native docs for tensorcircuit-ng: [Devin Deepwiki](https://deepwiki.com/tensorcircuit/tensorcircuit-ng) and [Context7 MCP](https://context7.com/tensorcircuit/tensorcircuit-ng).
74
74
 
75
75
  For beginners, please refer to [quantum computing lectures with TC-NG](https://github.com/sxzgroup/qc_lecture) to learn both quantum computing basics and representative usage of TensorCircuit-NG.
76
76
 
@@ -1,4 +1,4 @@
1
- tensorcircuit/__init__.py,sha256=DrH3d7oEWpI2MmrSmabmwoovMMYBNoLZeCk6SFC360M,2055
1
+ tensorcircuit/__init__.py,sha256=IL3iNxbN-D630LW977RNmxFHWyPUPXy8CxLIq0cZ8f0,2055
2
2
  tensorcircuit/about.py,sha256=DazTswU2nAwOmASTaDII3L04PVtaQ7oiWPty5YMI3Wk,5267
3
3
  tensorcircuit/abstractcircuit.py,sha256=0osacPqq7B1EJki-cI1aLYoVRmjFaG9q3XevWMs7SsA,44125
4
4
  tensorcircuit/asciiart.py,sha256=neY1OWFwtoW5cHPNwkQHgRPktDniQvdlP9QKHkk52fM,8236
@@ -8,17 +8,17 @@ tensorcircuit/circuit.py,sha256=mE4b_9xRu3ydoB8iDffdx35V9GZLhAQD_tkjZDLnLjg,3910
8
8
  tensorcircuit/cons.py,sha256=uYKBeYKkDoJEqJTNrOZPRM31tBtkqe5aAg8GtVidJ1Y,33014
9
9
  tensorcircuit/densitymatrix.py,sha256=VqMBnWCxO5-OsOp6LOdc5RS2AzmB3U4-w40Vn_lqygo,14865
10
10
  tensorcircuit/experimental.py,sha256=TGK4FaS6TS_ZhtjcIZgYVuAkGdRW50LN0DdXp-h4bos,29906
11
- tensorcircuit/fgs.py,sha256=pzaZuzPIFPpfr5Z-UsBQ_Yp0x7mbSM2sUc4dO2SUmVs,49543
11
+ tensorcircuit/fgs.py,sha256=J1TjAiiqZk9KO1xYX_V0xsgKlYZaUQ7Enm4s5zkRM50,49514
12
12
  tensorcircuit/gates.py,sha256=x-wA7adVpP7o0AQLt_xYUScFKj8tU_wUOV2mR1GyrPc,29322
13
13
  tensorcircuit/keras.py,sha256=5OF4dfhEeS8sRYglpqYtQsWPeqp7uK0i7-P-6RRJ7zQ,10126
14
14
  tensorcircuit/mps_base.py,sha256=UZ-v8vsr_rAsKrfun8prVgbXJ-qsdqKy2DZIHpq3sxo,15400
15
15
  tensorcircuit/mpscircuit.py,sha256=COO9xzvA2Whe7Ncp6OqrgtXKmahHgTHxXTELAVHzFSY,36777
16
16
  tensorcircuit/noisemodel.py,sha256=vzxpoYEZbHVC4a6g7_Jk4dxsHi4wvhpRFwud8b616Qo,11878
17
- tensorcircuit/quantum.py,sha256=LNkIv5cJ2KG6puC18zTuXi-5cojW1Tnz-N-WjZ0Qu5Q,90217
17
+ tensorcircuit/quantum.py,sha256=1fZJJJ_o3NF7LaPPPU5Abd82NmMSnNQ8uUp_ClZdp3c,90244
18
18
  tensorcircuit/shadows.py,sha256=6XmWNubbuaxFNvZVWu-RXd0lN9Jkk-xwong_K8o8_KE,17014
19
19
  tensorcircuit/simplify.py,sha256=O11G3UYiVAc30GOfwXXmhLXwGZrQ8OVwLTMQMZp_XBc,9414
20
- tensorcircuit/stabilizercircuit.py,sha256=yNqcEKtYzRYrgqGil8QEyKN4OEMp9g6uOG2zuRaU8uc,15465
21
- tensorcircuit/timeevol.py,sha256=8p4C3nhUQ9eC2wYfZ9w5BGFIt25NPEJTXmZU_iZy4tM,21607
20
+ tensorcircuit/stabilizercircuit.py,sha256=KbrBVSo2pXnf5JHIrxwRPSPTm7bJVMIcyE4d7-dIfCM,15545
21
+ tensorcircuit/timeevol.py,sha256=GDD6IgzkRhZIyvLkSKJ7ZihnkHQBqoWOBsmBwRAC1UI,28574
22
22
  tensorcircuit/torchnn.py,sha256=z_QpM0QC3mydGyWpyp877j-tSFCPyzynCwqrTWaw-IA,4637
23
23
  tensorcircuit/translation.py,sha256=VnU7DnYmbk1cWjqa7N68WNLNDn3DwENrMzmbG4_CQco,28611
24
24
  tensorcircuit/utils.py,sha256=nEDR1wTh1WF_yV6UyZYlifqOPWdKk_Krr4HjhrWHnGQ,7228
@@ -40,15 +40,15 @@ tensorcircuit/applications/physics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQe
40
40
  tensorcircuit/applications/physics/baseline.py,sha256=RWrzMGnC0PtmpYSFkvCE7r1llR88gncXuCakAAhFE-w,1775
41
41
  tensorcircuit/applications/physics/fss.py,sha256=ny3U9ZDmT459PXjA1oUGfarBOlSKSy6fs04vD9s1XH4,3633
42
42
  tensorcircuit/backends/__init__.py,sha256=WiUmbUFzM29w3hKfhuKxVUk3PpqDFiXf4za9g0ctpZA,80
43
- tensorcircuit/backends/abstract_backend.py,sha256=pmuf8NFmtRXnlBTsRrv2s6weCNiq3cd9gA7wiKvIobM,59589
43
+ tensorcircuit/backends/abstract_backend.py,sha256=fAzyKvZ-1Mw2wvbyuR5IScVGhwZjY7AxBVtRo6viVY0,61743
44
44
  tensorcircuit/backends/backend_factory.py,sha256=Z0aQ-RnxOnQzp-SRw8sefAH8XyBSlj2NXZwOlHinbfY,1713
45
45
  tensorcircuit/backends/cupy_backend.py,sha256=4vgO3lnQnsvWL5hukhskjJp37EAHqio6z6TVXTQcdjs,15077
46
- tensorcircuit/backends/jax_backend.py,sha256=dkDQ380CJHIdlt1fZvlN_g8DIowWPEcTTV_XBcs0YB0,26088
47
- tensorcircuit/backends/jax_ops.py,sha256=o7tLlQMRnaKWcr5rVnOMqwG6KZVpR8M8ryNQ-ceXVxs,4789
48
- tensorcircuit/backends/numpy_backend.py,sha256=PhbpXeATQ6X4kZ3xA-RvQVO_dKrpFh5vyXcCKKDMC7U,14197
46
+ tensorcircuit/backends/jax_backend.py,sha256=GIWsJwhlz0PD_KcypRkNWcjkegdNdoFMsmo0u0RQqrk,26704
47
+ tensorcircuit/backends/jax_ops.py,sha256=WyUGavch2R9uEFsI1Ap7eP1UcU4s2TItBgGsrVS3Hzs,9320
48
+ tensorcircuit/backends/numpy_backend.py,sha256=hhjrm0GK0d05TBYHNCZrBBBIJQ7V6qN99m1KLl0WKro,14408
49
49
  tensorcircuit/backends/pytorch_backend.py,sha256=yhfZSrm99yNW-dmijk8t6zAkbVgLRd4b_aIWKrpT7bY,24230
50
50
  tensorcircuit/backends/pytorch_ops.py,sha256=lLxpK6OqfpVwifyFlgsqhpnt-oIn4R5paPMVg51WaW0,3826
51
- tensorcircuit/backends/tensorflow_backend.py,sha256=T2BmFxOyl2QU4dSIwUMCLyPspLrNFLx3hVfD7TD1No0,36598
51
+ tensorcircuit/backends/tensorflow_backend.py,sha256=CQhdWjUoqCNHxmJgfYOhUVpwDqjSo0RYrmsiaVWmxCU,37842
52
52
  tensorcircuit/backends/tf_ops.py,sha256=FJwDU7LhZrt0VUIx12DJU0gZnWhMv7B7r9sAKG710As,3378
53
53
  tensorcircuit/cloud/__init__.py,sha256=n0Lx07GYF6YbdIa6AJCLJk4zlAm5CqaeHszvkxxuoI4,139
54
54
  tensorcircuit/cloud/abstraction.py,sha256=6aSxbz0MP21jBVdFbSMrvJPLQH117vGz9sSHbMFoodE,14582
@@ -84,12 +84,12 @@ tensorcircuit/templates/conversions.py,sha256=D3chiKDr7G1ekCJngiol91k9iqrMag1DZQ
84
84
  tensorcircuit/templates/dataset.py,sha256=ldPvCUlwjHU_S98E2ISQp34KqJzJPpPHmDIKJ4K-qYo,1933
85
85
  tensorcircuit/templates/graphs.py,sha256=cPYrxjoem0xZ-Is9dZKAvEzWZL_FejfIRiCEOTA4qd4,3935
86
86
  tensorcircuit/templates/hamiltonians.py,sha256=Ag8djD6lckTeU7I99gCbXiQAb2VYqzm_p7-hpXo-5u4,5554
87
- tensorcircuit/templates/lattice.py,sha256=F35ebANk0DSmSHLR0-Q_hUbcznyCmZjb4fKmvCMywmA,58575
87
+ tensorcircuit/templates/lattice.py,sha256=P64OGUedE3o8vWekhM8XAs5nUe5CdG-gojLlTGA20TI,60534
88
88
  tensorcircuit/templates/measurements.py,sha256=pzc5Aa9S416Ilg4aOY77Z6ZhUlYcXnAkQNQFTuHjFFs,10943
89
- tensorcircuit_nightly-1.3.0.dev20250730.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
89
+ tensorcircuit_nightly-1.3.0.dev20250801.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
90
90
  tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
91
91
  tests/conftest.py,sha256=J9nHlLE3Zspz1rMyzadEuBWhaS5I4Q9sq0lnWybcdIA,1457
92
- tests/test_backends.py,sha256=rClxb2gyAoGeXd_ZYVSAJ0zEvJ7z_2btAeFM_Iy_wwY,33925
92
+ tests/test_backends.py,sha256=MLxpRWavWF0qWcjVI61lIa3CYGbztvTO8ITFOYX47ao,38312
93
93
  tests/test_calibrating.py,sha256=D1Tlv8mucUhg3ULvB5QlYyaDfw7aEERwq69-aGSb1A4,3805
94
94
  tests/test_channels.py,sha256=BL4CirU8ku9-_NrI6PZAS5xZ0wrL1UEC1S3wPI9dYQM,12628
95
95
  tests/test_circuit.py,sha256=IsSIFEs7hUCSYexMb-ESt1ZUpztHtLA0qz0CZolGdc4,52240
@@ -102,7 +102,7 @@ tests/test_gates.py,sha256=rAIV2QFpFsA5bT1QivTSkhdarvwu5t0N3IOz4SEDrzg,4593
102
102
  tests/test_hamiltonians.py,sha256=E0E5ABhUeG7XLMLRkb3AIAPi7aJgnIeMWTgqzF1Q6yc,5724
103
103
  tests/test_interfaces.py,sha256=iJPmes8S8HkA9_PGjsu4Ike-vCXYyS1EMgnNKKXDNaU,16938
104
104
  tests/test_keras.py,sha256=U453jukavmx0RMeTSDEgPzrNdHNEfK1CW0CqO3XCNKo,4841
105
- tests/test_lattice.py,sha256=_ptDVK3EhS-X5fCQWiP8sHk3azdyGFuwqg6KMkBTkDE,65789
105
+ tests/test_lattice.py,sha256=DJoQ3Dr6uAHrdaKofEApc2LD8FgjYAH_a3Ux0cIkgO8,68917
106
106
  tests/test_miscs.py,sha256=4fXKsW0kYu2JYO0iGlwWLAYlkFD1rfeVc4xG4Zjn5FQ,8935
107
107
  tests/test_mpscircuit.py,sha256=mDXX8oQeFeHr_PdZvwqyDs_tVcVAqLmCERqlTAU7590,10552
108
108
  tests/test_noisemodel.py,sha256=UYoMtCjwDaB-CCn5kLosofz-qTMiY4KGAFBjVtqqLPE,5637
@@ -115,10 +115,10 @@ tests/test_shadows.py,sha256=1T3kJesVJ5XfZrSncL80xdq-taGCSnTDF3eL15UlavY,5160
115
115
  tests/test_simplify.py,sha256=35tbOu1QANsPvY1buLwNhqPnMkBOsnBtHn82qaukmgI,1175
116
116
  tests/test_stabilizer.py,sha256=MivuZ5pY7GOcEPTanhtrflXostyLBToHyjfPqCU0tG0,5450
117
117
  tests/test_templates.py,sha256=Xm9otFFaaBWG9TZpgJ-nNh9MBfRipTzFWL8fBOnie2k,7192
118
- tests/test_timeevol.py,sha256=N2x5BjmOwDStQ1sd0mkdENt7Y_MNES6_3JyMyOF43Iw,14780
118
+ tests/test_timeevol.py,sha256=zz17x21C-5f8ZvcgkXm30JzLgZMhsKaOCzyHCyS43h0,20333
119
119
  tests/test_torchnn.py,sha256=CHLTfWkF7Ses5_XnGFN_uv_JddfgenFEFzaDtSH8XYU,2848
120
120
  tests/test_van.py,sha256=kAWz860ivlb5zAJuYpzuBe27qccT-Yf0jatf5uXtTo4,3163
121
- tensorcircuit_nightly-1.3.0.dev20250730.dist-info/METADATA,sha256=o5DEwWjsxTEMdi4Rxhl8KHljen9REwS9D_MtlG8SFdU,34922
122
- tensorcircuit_nightly-1.3.0.dev20250730.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
123
- tensorcircuit_nightly-1.3.0.dev20250730.dist-info/top_level.txt,sha256=O_Iqeh2x02lasEYMI9iyPNNNtMzcpg5qvwMOkZQ7n4A,20
124
- tensorcircuit_nightly-1.3.0.dev20250730.dist-info/RECORD,,
121
+ tensorcircuit_nightly-1.3.0.dev20250801.dist-info/METADATA,sha256=nHOy6uHlzXTlcH8Iq1t8lJKer9bJ0bTrkamRBgO2l1Q,34922
122
+ tensorcircuit_nightly-1.3.0.dev20250801.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
123
+ tensorcircuit_nightly-1.3.0.dev20250801.dist-info/top_level.txt,sha256=O_Iqeh2x02lasEYMI9iyPNNNtMzcpg5qvwMOkZQ7n4A,20
124
+ tensorcircuit_nightly-1.3.0.dev20250801.dist-info/RECORD,,
tests/test_backends.py CHANGED
@@ -9,6 +9,7 @@ os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
9
9
  import numpy as np
10
10
  import pytest
11
11
  from pytest_lazyfixture import lazy_fixture as lf
12
+ import scipy
12
13
  import tensorflow as tf
13
14
 
14
15
  thisfile = os.path.abspath(__file__)
@@ -60,6 +61,126 @@ def test_grad_torch(torchb):
60
61
  np.testing.assert_allclose(f(a), np.ones([2]), atol=1e-5)
61
62
 
62
63
 
64
+ @pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")])
65
+ def test_sparse_csr_from_coo(backend):
66
+ # Create a sparse matrix in COO format
67
+ values = tc.backend.convert_to_tensor(np.array([1.0, 2.0, 3.0]))
68
+ values = tc.backend.cast(values, "complex64")
69
+ indices = tc.backend.convert_to_tensor(np.array([[0, 0], [1, 1], [2, 3]]))
70
+ indices = tc.backend.cast(indices, "int64")
71
+ coo_matrix = tc.backend.coo_sparse_matrix(indices, values, shape=[4, 4])
72
+
73
+ # Convert COO to CSR
74
+ csr_matrix = tc.backend.sparse_csr_from_coo(coo_matrix)
75
+
76
+ # Check that the result is still recognized as sparse
77
+ assert tc.backend.is_sparse(csr_matrix) is True
78
+
79
+ # Check that the conversion preserves values by comparing dense representations
80
+ coo_dense = tc.backend.to_dense(coo_matrix)
81
+ csr_dense = tc.backend.to_dense(csr_matrix)
82
+ np.testing.assert_allclose(coo_dense, csr_dense, atol=1e-5)
83
+
84
+
85
+ def test_sparse_tensor_matmul_monkey_patch(tfb):
86
+ """
87
+ Test the monkey-patched __matmul__ method for tf.SparseTensor.
88
+ This test specifically targets the line:
89
+ tf.SparseTensor.__matmul__ = sparse_tensor_matmul
90
+ """
91
+ # Create a sparse matrix in COO format
92
+ indices = tf.constant([[0, 0], [1, 1], [2, 3]], dtype=tf.int64)
93
+ values = tf.constant([1.0, 2.0, 3.0], dtype=tf.complex64)
94
+ shape = [4, 4]
95
+ sparse_matrix = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
96
+
97
+ # Test 1: Matrix-vector multiplication with 1D vector
98
+ vector_1d = tf.constant([1.0, 2.0, 3.0, 4.0], dtype=tf.complex64)
99
+ result_1d = sparse_matrix @ vector_1d # Using the monkey-patched @ operator
100
+
101
+ expected_1d = tf.constant([1.0, 4.0, 12.0, 0.0], dtype=tf.complex64)
102
+
103
+ np.testing.assert_allclose(result_1d, expected_1d, atol=1e-6)
104
+ vector_1d = tc.backend.reshape(vector_1d, [4, 1])
105
+ result_1dn = sparse_matrix @ vector_1d # Using the monkey-patched @ operator
106
+ expected_1d = tc.backend.reshape(expected_1d, [4, 1])
107
+
108
+ np.testing.assert_allclose(result_1dn, expected_1d, atol=1e-6)
109
+
110
+ # Test 2: Matrix-matrix multiplication with 2D matrix
111
+ matrix_2d = tf.constant(
112
+ [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]], dtype=tf.complex64
113
+ )
114
+ result_2d = sparse_matrix @ matrix_2d # Using the monkey-patched @ operator
115
+
116
+ expected_2d = tf.sparse.sparse_dense_matmul(sparse_matrix, matrix_2d)
117
+
118
+ np.testing.assert_allclose(result_2d.numpy(), expected_2d.numpy(), atol=1e-6)
119
+
120
+ # Test 3: Verify that the operation is consistent with sparse_dense_matmul
121
+
122
+ reference_result = tc.backend.sparse_dense_matmul(sparse_matrix, vector_1d)
123
+ reference_result_squeezed = tc.backend.reshape(reference_result, [-1])
124
+
125
+ np.testing.assert_allclose(result_1d, reference_result_squeezed, atol=1e-6)
126
+
127
+
128
+ @pytest.mark.parametrize("backend", [lf("npb"), lf("jaxb")])
129
+ def test_backend_jv(backend, highp):
130
+ def calculate_M(k, x_val):
131
+ safety_factor = 15
132
+ M = max(k, int(abs(x_val))) + int(safety_factor * np.sqrt(abs(x_val)))
133
+ M = max(M, k + 30)
134
+ return M
135
+
136
+ k_values = [5, 20, 50, 200, 500, 3000]
137
+ x_values = [0.0, 0.1, 1.0, 10.0, 100, 1000, 6000]
138
+ for k in k_values:
139
+ for x_val in x_values:
140
+ M = calculate_M(k, x_val)
141
+ f_vals = tc.backend.special_jv(k, x_val, M)
142
+ np.testing.assert_allclose(
143
+ f_vals, scipy.special.jv(np.arange(k), x_val), atol=1e-6
144
+ )
145
+
146
+
147
+ @pytest.mark.parametrize("backend", [lf("npb"), lf("jaxb")])
148
+ def test_backend_jaxy_scan(backend):
149
+ def body_fun(carry, x):
150
+ counter, decrementor = carry
151
+
152
+ # 更新状态
153
+ new_counter = counter + 1
154
+ new_decrementor = decrementor - 1
155
+ new_carry = (new_counter, new_decrementor)
156
+
157
+ y = counter + decrementor
158
+
159
+ return new_carry, y
160
+
161
+ init_val = (0, 100)
162
+
163
+ final_carry, stacked_ys = tc.backend.jaxy_scan(
164
+ f=body_fun,
165
+ init=init_val,
166
+ xs=tc.backend.arange(5),
167
+ )
168
+
169
+ expected_final_carry = (5, 95)
170
+ expected_stacked_ys = np.array([100, 100, 100, 100, 100])
171
+
172
+ assert final_carry == expected_final_carry
173
+
174
+ np.testing.assert_array_equal(np.asarray(stacked_ys), expected_stacked_ys)
175
+
176
+
177
+ def test_backend_jv_grad(jaxb, highp):
178
+ def f(x):
179
+ return tc.backend.sum(tc.backend.special_jv(5, x, 100))
180
+
181
+ print(tc.backend.jit(tc.backend.value_and_grad(f))(0.2))
182
+
183
+
63
184
  @pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")])
64
185
  def test_backend_scatter(backend):
65
186
  np.testing.assert_allclose(
tests/test_lattice.py CHANGED
@@ -23,6 +23,8 @@ from tensorcircuit.templates.lattice import (
23
23
  RectangularLattice,
24
24
  SquareLattice,
25
25
  TriangularLattice,
26
+ AbstractLattice,
27
+ get_compatible_layers,
26
28
  )
27
29
 
28
30
 
@@ -1664,3 +1666,85 @@ class TestDistanceMatrix:
1664
1666
  # "The specialized PBC implementation is significantly slower "
1665
1667
  # "than the general-purpose implementation."
1666
1668
  # )
1669
+
1670
+
1671
+ def _validate_layers(bonds, layers) -> None:
1672
+ """
1673
+ A helper function to scientifically validate the output of get_compatible_layers.
1674
+ """
1675
+ # MODIFICATION: This function now takes the original bonds list for comparison.
1676
+ expected_edges = set(tuple(sorted(b)) for b in bonds)
1677
+ actual_edges = set(tuple(sorted(edge)) for layer in layers for edge in layer)
1678
+
1679
+ assert (
1680
+ expected_edges == actual_edges
1681
+ ), "Completeness check failed: The set of all edges in the layers must "
1682
+ "exactly match the input bonds."
1683
+
1684
+ for i, layer in enumerate(layers):
1685
+ qubits_in_layer: set[int] = set()
1686
+ for edge in layer:
1687
+ q1, q2 = edge
1688
+ assert (
1689
+ q1 not in qubits_in_layer
1690
+ ), f"Compatibility check failed: Qubit {q1} is reused in layer {i}."
1691
+ qubits_in_layer.add(q1)
1692
+ assert (
1693
+ q2 not in qubits_in_layer
1694
+ ), f"Compatibility check failed: Qubit {q2} is reused in layer {i}."
1695
+ qubits_in_layer.add(q2)
1696
+
1697
+
1698
+ @pytest.mark.parametrize(
1699
+ "lattice_instance",
1700
+ [
1701
+ SquareLattice(size=(3, 2), pbc=False),
1702
+ SquareLattice(size=(3, 3), pbc=True),
1703
+ HoneycombLattice(size=(2, 2), pbc=False),
1704
+ ],
1705
+ ids=[
1706
+ "SquareLattice_3x2_OBC",
1707
+ "SquareLattice_3x3_PBC",
1708
+ "HoneycombLattice_2x2_OBC",
1709
+ ],
1710
+ )
1711
+ def test_layering_on_various_lattices(lattice_instance: AbstractLattice):
1712
+ """Tests gate layering for various standard lattice types."""
1713
+ bonds = lattice_instance.get_neighbor_pairs(k=1, unique=True)
1714
+ layers = get_compatible_layers(bonds)
1715
+
1716
+ assert len(layers) > 0, "Layers should not be empty for non-trivial lattices."
1717
+ _validate_layers(bonds, layers)
1718
+
1719
+
1720
+ def test_layering_on_1d_chain_pbc():
1721
+ """Test layering on a 1D chain with periodic boundaries (a cycle graph)."""
1722
+ lattice_even = ChainLattice(size=(6,), pbc=True)
1723
+ bonds_even = lattice_even.get_neighbor_pairs(k=1, unique=True)
1724
+ layers_even = get_compatible_layers(bonds_even)
1725
+ _validate_layers(bonds_even, layers_even)
1726
+
1727
+ lattice_odd = ChainLattice(size=(5,), pbc=True)
1728
+ bonds_odd = lattice_odd.get_neighbor_pairs(k=1, unique=True)
1729
+ layers_odd = get_compatible_layers(bonds_odd)
1730
+ assert len(layers_odd) == 3, "A 5-site cycle graph should be 3-colorable."
1731
+ _validate_layers(bonds_odd, layers_odd)
1732
+
1733
+
1734
+ def test_layering_on_custom_star_graph():
1735
+ """Test layering on a custom lattice forming a star graph."""
1736
+ star_edges = [(0, 1), (0, 2), (0, 3)]
1737
+ layers = get_compatible_layers(star_edges)
1738
+ assert len(layers) == 3, "A star graph S_4 requires 3 layers."
1739
+ _validate_layers(star_edges, layers)
1740
+
1741
+
1742
+ def test_layering_on_edge_cases():
1743
+ """Test various edge cases: empty, single-site, and no-edge lattices."""
1744
+ layers_empty = get_compatible_layers([])
1745
+ assert layers_empty == [], "Layers should be empty for an empty set of bonds."
1746
+
1747
+ single_edge = [(0, 1)]
1748
+ layers_single = get_compatible_layers(single_edge)
1749
+ assert layers_single == [[(0, 1)]]
1750
+ _validate_layers(single_edge, layers_single)
tests/test_timeevol.py CHANGED
@@ -279,7 +279,6 @@ def test_krylov_evol_heisenberg_6_sites(backend):
279
279
 
280
280
  # Generate Heisenberg Hamiltonian
281
281
  h = tc.quantum.heisenberg_hamiltonian(g, hzz=1.0, hxx=1.0, hyy=1.0, sparse=False)
282
- print(h.dtype)
283
282
  # Initial state - all spins up except last one down
284
283
  psi0 = np.zeros((2**n,))
285
284
  psi0[62] = 1.0
@@ -452,3 +451,191 @@ def test_krylov_evol_gradient(backend):
452
451
  grad_fn = tc.backend.jit(tc.backend.grad(loss_function))
453
452
  gradient = grad_fn(t)
454
453
  print(gradient)
454
+
455
+
456
+ @pytest.mark.parametrize(
457
+ "backend, sparse",
458
+ [[lf("npb"), True], [lf("npb"), False], [lf("jaxb"), True], [lf("jaxb"), False]],
459
+ )
460
+ def test_chebyshev_evol_basic(backend, highp, sparse):
461
+ n = 6
462
+ # Create a 1D chain graph
463
+ g = tc.templates.graphs.Line1D(n, pbc=False)
464
+
465
+ # Generate Heisenberg Hamiltonian (dense for better compatibility)
466
+ h = tc.quantum.heisenberg_hamiltonian(
467
+ g, hzz=1.0, hxx=1.0, hyy=1.0, hx=0.2, sparse=sparse
468
+ )
469
+
470
+ # Initial Neel state: |↑↓↑↓⟩
471
+ c = tc.Circuit(n)
472
+ c.x([1, 3, 5]) # Apply X gates to qubits 1 and 3
473
+ psi0 = c.state()
474
+
475
+ # Evolution time
476
+ t = 2.0
477
+
478
+ # Estimate spectral bounds
479
+ e_max, e_min = tc.timeevol.estimate_spectral_bounds(h, n_iter=30)
480
+
481
+ # Estimate parameters
482
+ k = tc.timeevol.estimate_k(t, (e_max, e_min))
483
+ m = tc.timeevol.estimate_M(t, (e_max, e_min), k)
484
+
485
+ # Evolve using Chebyshev method
486
+ psi_chebyshev = tc.timeevol.chebyshev_evol(
487
+ h, psi0, t, (float(e_max) + 0.1, float(e_min) - 0.1), k, m
488
+ )
489
+
490
+ # Check that state is normalized (or close to it)
491
+ norm = tc.backend.norm(psi_chebyshev)
492
+ np.testing.assert_allclose(norm, 1.0, atol=1e-3)
493
+
494
+ # Compare with exact evolution for small system
495
+ if sparse is True:
496
+ h = tc.backend.to_dense(h)
497
+ psi_exact = tc.timeevol.ed_evol(h, psi0, 1.0j * tc.backend.convert_to_tensor([t]))[
498
+ 0
499
+ ]
500
+
501
+ # States should be close (up to global phase)
502
+ fidelity = np.abs(np.vdot(np.asarray(psi_exact), np.asarray(psi_chebyshev))) ** 2
503
+ assert fidelity > 0.95 # Should be close, but not exact due to approximations
504
+
505
+
506
+ def test_chebyshev_evol_vmap_on_t(jaxb, highp):
507
+ n = 4
508
+ # Create a 1D chain graph
509
+ g = tc.templates.graphs.Line1D(n, pbc=False)
510
+
511
+ # Generate Heisenberg Hamiltonian
512
+ h = tc.quantum.heisenberg_hamiltonian(g, hzz=1.0, hxx=1.0, hyy=1.0, sparse=False)
513
+
514
+ # Initial Neel state
515
+ c = tc.Circuit(n)
516
+ c.x([1, 3])
517
+ psi0 = c.state()
518
+
519
+ # Estimate spectral bounds
520
+ e_max, e_min = tc.timeevol.estimate_spectral_bounds(h, n_iter=20)
521
+
522
+ # Fixed parameters
523
+ k = 50
524
+ m = 150
525
+
526
+ # Define vectorized evolution function
527
+ def evolve_single_time(t):
528
+ return tc.timeevol.chebyshev_evol(
529
+ h, psi0, t, (float(e_max) + 0.1, float(e_min) - 0.1), k, m
530
+ )
531
+
532
+ # Vectorize over times
533
+ times = tc.backend.convert_to_tensor([0.5, 1.0, 1.5])
534
+ vmap_evolve = tc.backend.jit(tc.backend.vmap(evolve_single_time))
535
+ states_vmap = vmap_evolve(times)
536
+
537
+ # Check output shape
538
+ assert states_vmap.shape == (3, 2**n)
539
+
540
+ # Compare with sequential execution
541
+ states_sequential = []
542
+ for t in times:
543
+ state = tc.timeevol.chebyshev_evol(
544
+ h, psi0, float(t), (e_max + 0.1, e_min - 0.1), k, m
545
+ )
546
+ states_sequential.append(state)
547
+
548
+ states_sequential = tc.backend.stack(states_sequential)
549
+
550
+ # Results should be the same
551
+ np.testing.assert_allclose(states_vmap, states_sequential, atol=1e-5)
552
+
553
+
554
+ def test_chebyshev_evol_jit_on_psi(jaxb, highp):
555
+ """Test JIT compilation capability of chebyshev_evol on psi parameter"""
556
+ n = 4
557
+ # Create a 1D chain graph
558
+ g = tc.templates.graphs.Line1D(n, pbc=False)
559
+
560
+ # Generate Heisenberg Hamiltonian
561
+ h = tc.quantum.heisenberg_hamiltonian(g, hzz=1.0, hxx=0.6, hyy=1.0, sparse=True)
562
+
563
+ # Estimate spectral bounds
564
+ e_max, e_min = tc.timeevol.estimate_spectral_bounds(h, n_iter=20)
565
+
566
+ # Fixed parameters
567
+ t = 1.0
568
+ k = 50
569
+ m = 150
570
+
571
+ # Define JIT-compiled evolution function with psi as argument
572
+ def evolve_state(psi):
573
+ return tc.timeevol.chebyshev_evol(
574
+ h, psi, t, (float(e_max) + 0.1, float(e_min) - 0.1), k, m
575
+ )
576
+
577
+ jit_evolve = tc.backend.jit(evolve_state)
578
+
579
+ # Test with different initial states
580
+ c1 = tc.Circuit(n)
581
+ c1.x([0, 2])
582
+ psi1 = c1.state()
583
+
584
+ c2 = tc.Circuit(n)
585
+ c2.h(0)
586
+ for i in range(n - 1):
587
+ c2.cnot(i, i + 1)
588
+ psi2 = c2.state()
589
+
590
+ # Run JIT-compiled evolution
591
+ result1_jit = jit_evolve(psi1)
592
+ result2_jit = jit_evolve(psi2)
593
+
594
+ # Run regular evolution for comparison
595
+ result1_regular = tc.timeevol.chebyshev_evol(
596
+ h, psi1, t, (e_max + 0.1, e_min - 0.1), k, m
597
+ )
598
+ result2_regular = tc.timeevol.chebyshev_evol(
599
+ h, psi2, t, (e_max + 0.1, e_min - 0.1), k, m
600
+ )
601
+ print(result1_jit)
602
+ # Results should be the same
603
+ np.testing.assert_allclose(result1_jit, result1_regular, atol=1e-5)
604
+ np.testing.assert_allclose(result2_jit, result2_regular, atol=1e-5)
605
+
606
+
607
+ def test_chebyshev_evol_ad_on_t(jaxb, highp):
608
+ n = 5
609
+ # Create a 1D chain graph
610
+ g = tc.templates.graphs.Line1D(n, pbc=True)
611
+
612
+ # Generate Heisenberg Hamiltonian
613
+ h = tc.quantum.heisenberg_hamiltonian(g, hzz=1.0, hxx=1.0, hyy=1.0, sparse=True)
614
+
615
+ # Initial state
616
+ c = tc.Circuit(n)
617
+ c.x([1, 3])
618
+ psi0 = c.state()
619
+
620
+ # Estimate spectral bounds
621
+ e_max, e_min = tc.timeevol.estimate_spectral_bounds(h, n_iter=20)
622
+
623
+ # Fixed parameters
624
+ k = 50
625
+ m = 100
626
+
627
+ # Define loss function for gradient computation
628
+ def loss_function(t):
629
+ psi_t = tc.timeevol.chebyshev_evol(
630
+ h, psi0, t, (float(e_max) + 0.1, float(e_min) - 0.1), k, m
631
+ )
632
+ c = tc.Circuit(5, inputs=psi_t)
633
+ return tc.backend.real(c.expectation_ps(z=[2]))
634
+
635
+ # Compute gradient
636
+ grad_fn = tc.backend.jit(tc.backend.grad(loss_function))
637
+ t_test = tc.backend.convert_to_tensor(1.0)
638
+ gradient = grad_fn(t_test)
639
+ print(gradient)
640
+ # Gradient should be a scalar
641
+ assert gradient.shape == ()