tensorcircuit-nightly 1.4.0.dev20250915__py3-none-any.whl → 1.4.0.dev20250917__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tensorcircuit-nightly might be problematic. Click here for more details.

tensorcircuit/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "1.4.0.dev20250915"
1
+ __version__ = "1.4.0.dev20250917"
2
2
  __author__ = "TensorCircuit Authors"
3
3
  __creator__ = "refraction-ray"
4
4
 
@@ -1,5 +1,6 @@
1
1
  """
2
2
  Analog-Digital Hybrid Circuit class wrapper
3
+ only support jax backend
3
4
  """
4
5
 
5
6
  from typing import Any, List, Optional, Callable, Dict, Tuple, Union, Sequence
@@ -21,7 +22,9 @@ Tensor = Any
21
22
 
22
23
  @dataclass
23
24
  class AnalogBlock:
24
- """A data structure to hold information about an analog evolution block."""
25
+ """
26
+ A data structure to hold information about an analog evolution block.
27
+ """
25
28
 
26
29
  hamiltonian_func: Callable[[Tensor], Tensor]
27
30
  time: float
@@ -45,7 +48,18 @@ class AnalogCircuit:
45
48
  """
46
49
  Initializes the hybrid circuit.
47
50
 
48
- :param num_qubits: The number of qubits in the circuit.
51
+ :param nqubits: The number of qubits in the circuit.
52
+ :type nqubits: int
53
+ :param dim: The local Hilbert space dimension per site. Qudit is supported for 2 <= d <= 36.
54
+ :type dim: If None, the dimension of the circuit will be `2`, which is a qubit system.
55
+ :param inputs: If not None, the initial state of the circuit is taken as ``inputs``
56
+ instead of :math:`\vert 0 \rangle^n` qubits, defaults to None.
57
+ :type inputs: Optional[Tensor], optional
58
+ :param mps_inputs: QuVector for a MPS like initial wavefunction.
59
+ :type mps_inputs: Optional[QuOperator]
60
+ :param split: dict if two qubit gate is ready for split, including parameters for at least one of
61
+ ``max_singular_values`` and ``max_truncation_err``.
62
+ :type split: Optional[Dict[str, Any]]
49
63
  """
50
64
  self.num_qubits, self._nqubits = nqubits, nqubits
51
65
  self.dim = 2**self.num_qubits
@@ -57,7 +71,7 @@ class AnalogCircuit:
57
71
 
58
72
  # List of digital circuits, starting with one empty circuit.
59
73
  self.digital_circuits: List[Circuit] = [
60
- Circuit(self.num_qubits, self.inputs, mps_inputs, split, dim)
74
+ Circuit(self.num_qubits, inputs, mps_inputs, split, dim)
61
75
  ]
62
76
 
63
77
  # List of analog blocks, each containing the Hamiltonian function, time, and solver options.
@@ -66,18 +80,25 @@ class AnalogCircuit:
66
80
  self._solver_options: Dict[str, Any] = {}
67
81
 
68
82
  def set_solver_options(self, **kws: Any) -> None:
83
+ """
84
+ set solver options globally for this circuit object
85
+ """
69
86
  self._solver_options = kws
70
87
 
71
88
  @property
72
89
  def effective_circuit(self) -> Circuit:
73
- """Returns the effective circuit after all blocks have been added."""
90
+ """
91
+ Returns the effective circuit after all blocks have been added.
92
+ """
74
93
  if self._effective_circuit is None:
75
94
  self.state()
76
95
  return self._effective_circuit # type: ignore
77
96
 
78
97
  @property
79
98
  def current_digital_circuit(self) -> Circuit:
80
- """Returns the last (currently active) digital circuit."""
99
+ """
100
+ Returns the last (currently active) digital circuit.
101
+ """
81
102
  return self.digital_circuits[-1]
82
103
 
83
104
  def add_analog_block(
@@ -94,11 +115,14 @@ class AnalogCircuit:
94
115
 
95
116
  :param hamiltonian_func: A function H(t) that takes a time `t` (from 0 to `time`)
96
117
  and returns the Hamiltonian matrix at that instant.
118
+ :type hamiltonian_func: Callable[[float], np.ndarray]
97
119
  :param time: The total evolution time 'T'.
120
+ :type time: float
98
121
  :param index: The indices of the qubits to apply the analog evolution to. Defaults None for
99
122
  global application.
100
- :param solver_options: Keyword arguments passed directly to `scipy.integrate.solve_ivp`.
101
- (e.g., method='RK45', rtol=1e-6, atol=1e-8)
123
+ :type index: Optional[List[int]]
124
+ :param solver_options: Keyword arguments passed directly to `tc.timeevol.ode_evolve`
125
+ :type solver_options: Dict[str, Any]
102
126
  """
103
127
  # Create and store the analog block information
104
128
  time = backend.convert_to_tensor(time, dtype=rdtypestr)
@@ -152,14 +176,12 @@ class AnalogCircuit:
152
176
  :return: The final state vector after the full evolution
153
177
  :rtype: Tensor
154
178
  """
155
- psi = self.inputs
156
-
157
179
  # Propagate the state through the alternating circuit blocks
158
180
  for i, analog_block in enumerate(self.analog_blocks):
159
181
  # 1. Apply Digital Block i
160
182
  digital_c = self.digital_circuits[i]
161
183
  if i > 0:
162
- digital_c.replace_inputs(psi)
184
+ digital_c.replace_inputs(psi) # type: ignore
163
185
  psi = digital_c.wavefunction()
164
186
 
165
187
  if analog_block.index is None:
@@ -181,8 +203,11 @@ class AnalogCircuit:
181
203
  # TODO(@refraction-ray): support more time evol methods
182
204
 
183
205
  # 3. Apply the final digital circuit
184
- self.digital_circuits[-1].replace_inputs(psi)
185
- psi = self.digital_circuits[-1].wavefunction()
206
+ if self.analog_blocks:
207
+ self.digital_circuits[-1].replace_inputs(psi)
208
+ psi = self.digital_circuits[-1].wavefunction()
209
+ else:
210
+ psi = self.digital_circuits[-1].wavefunction()
186
211
  self._effective_circuit = Circuit(self.num_qubits, inputs=psi)
187
212
 
188
213
  return psi
@@ -382,7 +407,7 @@ class AnalogCircuit:
382
407
  if i < len(self.analog_blocks):
383
408
  block = self.analog_blocks[i]
384
409
  s += f"--- Analog Block {i} (T={block.time}) ---\n"
385
- s += f" H(t) function: '{block.hamiltonian_func.__name__}'\n"
410
+ s += f" H(t) function: '{block.hamiltonian_func.__name__}'\n"
386
411
 
387
412
  s += "=" * 40
388
413
  return s
@@ -175,6 +175,27 @@ def _eigh_jax(self: Any, tensor: Tensor) -> Tensor:
175
175
  return adaware_eigh(tensor)
176
176
 
177
177
 
178
+ def bcsr_scalar_mul(self: Tensor, other: Tensor) -> Tensor:
179
+ """
180
+ Implements scalar multiplication for BCSR matrices (self * scalar).
181
+ """
182
+ import jax.numpy as jnp
183
+ from jax.experimental.sparse import BCSR
184
+
185
+ if jnp.isscalar(other):
186
+ # The core logic: only the data array is affected by scalar multiplication.
187
+ # The sparsity pattern (indices, indptr) remains the same.
188
+ new_data = self.data * other
189
+
190
+ # Return a new BCSR instance with the scaled data.
191
+ return BCSR((new_data, self.indices, self.indptr), shape=self.shape)
192
+
193
+ # For any other type of multiplication (e.g., element-wise with another matrix),
194
+ # return NotImplemented. This allows Python to try other operations,
195
+ # like other.__rmul__(self).
196
+ return NotImplemented
197
+
198
+
178
199
  tensornetwork.backends.jax.jax_backend.JaxBackend.convert_to_tensor = (
179
200
  _convert_to_tensor_jax
180
201
  )
@@ -224,6 +245,11 @@ class JaxBackend(jax_backend.JaxBackend, ExtendedBackend): # type: ignore
224
245
 
225
246
  self.name = "jax"
226
247
 
248
+ # --- Monkey-patch the BCSR class ---
249
+
250
+ sparse.BCSR.__mul__ = bcsr_scalar_mul # type: ignore
251
+ sparse.BCSR.__rmul__ = bcsr_scalar_mul # type: ignore
252
+
227
253
  # it is already child of numpy backend, and self.np = self.jax.np
228
254
  def eye(
229
255
  self, N: int, dtype: Optional[str] = None, M: Optional[int] = None
@@ -9,6 +9,7 @@ from typing import Any, Callable, Optional, Sequence, Tuple, Union
9
9
  from operator import mul
10
10
  from functools import reduce, partial
11
11
 
12
+ from scipy.sparse import coo_matrix
12
13
  import tensornetwork
13
14
  from tensornetwork.backends.pytorch import pytorch_backend
14
15
  from .abstract_backend import ExtendedBackend
@@ -23,7 +24,6 @@ logger = logging.getLogger(__name__)
23
24
 
24
25
  # TODO(@refraction-ray): lack stateful random methods implementation for now
25
26
  # TODO(@refraction-ray): lack scatter impl for now
26
- # TODO(@refraction-ray): lack sparse relevant methods for now
27
27
  # To be added once pytorch backend is ready
28
28
 
29
29
 
@@ -302,6 +302,9 @@ class PyTorchBackend(pytorch_backend.PyTorchBackend, ExtendedBackend): # type:
302
302
  return torchlib.kron(a, b)
303
303
 
304
304
  def numpy(self, a: Tensor) -> Tensor:
305
+ if self.is_sparse(a):
306
+ a = a.coalesce()
307
+ return coo_matrix((a.values().numpy(), a.indices().numpy()), shape=a.shape)
305
308
  a = a.cpu()
306
309
  if a.is_conj():
307
310
  return a.resolve_conj().numpy()
@@ -381,6 +384,9 @@ class PyTorchBackend(pytorch_backend.PyTorchBackend, ExtendedBackend): # type:
381
384
  def sort(self, a: Tensor, axis: int = -1) -> Tensor:
382
385
  return torchlib.sort(a, dim=axis).values
383
386
 
387
+ def argsort(self, a: Tensor, axis: int = -1) -> Tensor:
388
+ return torchlib.argsort(a, dim=axis)
389
+
384
390
  def all(self, tensor: Tensor, axis: Optional[Sequence[int]] = None) -> Tensor:
385
391
  """
386
392
  Corresponds to torch.all.
@@ -467,6 +473,39 @@ class PyTorchBackend(pytorch_backend.PyTorchBackend, ExtendedBackend): # type:
467
473
  def reverse(self, a: Tensor) -> Tensor:
468
474
  return torchlib.flip(a, dims=(-1,))
469
475
 
476
+ def coo_sparse_matrix(
477
+ self, indices: Tensor, values: Tensor, shape: Tensor
478
+ ) -> Tensor:
479
+ # Convert COO format to PyTorch sparse tensor
480
+ indices = self.convert_to_tensor(indices)
481
+ return torchlib.sparse_coo_tensor(self.transpose(indices), values, shape)
482
+
483
+ def sparse_dense_matmul(
484
+ self,
485
+ sp_a: Tensor,
486
+ b: Tensor,
487
+ ) -> Tensor:
488
+ # Matrix multiplication between sparse and dense tensor
489
+ return torchlib.sparse.mm(sp_a, b)
490
+
491
+ def sparse_csr_from_coo(self, coo: Tensor, strict: bool = False) -> Tensor:
492
+ try:
493
+ # Convert COO to CSR format if supported
494
+ return coo.to_sparse_csr()
495
+ except AttributeError as e:
496
+ if not strict:
497
+ return coo
498
+ else:
499
+ raise e
500
+
501
+ def to_dense(self, sp_a: Tensor) -> Tensor:
502
+ # Convert sparse tensor to dense
503
+ return sp_a.to_dense()
504
+
505
+ def is_sparse(self, a: Tensor) -> bool:
506
+ # Check if tensor is sparse
507
+ return a.is_sparse or a.is_sparse_csr # type: ignore
508
+
470
509
  def tree_map(self, f: Callable[..., Any], *pytrees: Any) -> Any:
471
510
  # torch native tree_map not support multiple pytree args
472
511
  # return torchlib.utils._pytree.tree_map(f, *pytrees)
tensorcircuit/circuit.py CHANGED
@@ -475,7 +475,7 @@ class Circuit(BaseCircuit):
475
475
  raise ValueError("no `get_gate_from_index` implementation is provided")
476
476
  g = get_gate_from_index(r, kraus)
477
477
  g = backend.reshape(g, [self._d for _ in range(sites * 2)])
478
- self.any(*index, unitary=g, name=name) # type: ignore
478
+ self.any(*index, unitary=g, name=name, dim=self._d) # type: ignore
479
479
  return r
480
480
 
481
481
  def _general_kraus_tf(
@@ -600,9 +600,13 @@ class Circuit(BaseCircuit):
600
600
  for w, k in zip(prob, kraus_tensor)
601
601
  ]
602
602
  pick = self.unitary_kraus(
603
- new_kraus, *index, prob=prob, status=status, name=name
603
+ new_kraus,
604
+ *index,
605
+ prob=prob,
606
+ status=status,
607
+ name=name,
604
608
  )
605
- if with_prob is False:
609
+ if not with_prob:
606
610
  return pick
607
611
  else:
608
612
  return pick, prob
@@ -633,7 +637,11 @@ class Circuit(BaseCircuit):
633
637
  :type status: Optional[float], optional
634
638
  """
635
639
  return self._general_kraus_2(
636
- kraus, *index, status=status, with_prob=with_prob, name=name
640
+ kraus,
641
+ *index,
642
+ status=status,
643
+ with_prob=with_prob,
644
+ name=name,
637
645
  )
638
646
 
639
647
  apply_general_kraus = general_kraus
@@ -144,7 +144,7 @@ class QuditCircuit:
144
144
  else:
145
145
  raise ValueError(f"Unsupported gate/arity: {name} on {len(indices)} qudits")
146
146
 
147
- def any(self, *indices: int, unitary: Tensor, name: str = "any") -> None:
147
+ def any(self, *indices: int, unitary: Tensor, name: Optional[str] = None) -> None:
148
148
  """
149
149
  Apply an arbitrary unitary on one or two qudits.
150
150
 
@@ -155,6 +155,7 @@ class QuditCircuit:
155
155
  :param name: Optional label stored in the circuit history.
156
156
  :type name: str
157
157
  """
158
+ name = "any" if name is None else name
158
159
  self._circ.unitary(*indices, unitary=unitary, name=name, dim=self._d) # type: ignore
159
160
 
160
161
  unitary = any
@@ -668,3 +669,65 @@ class QuditCircuit:
668
669
  :rtype: List[Gate]
669
670
  """
670
671
  return self._circ.amplitude_before(l)
672
+
673
+ def general_kraus(
674
+ self,
675
+ kraus: Sequence[Gate],
676
+ *index: int,
677
+ status: Optional[float] = None,
678
+ with_prob: bool = False,
679
+ name: Optional[str] = None,
680
+ ) -> Tensor:
681
+ """
682
+ Monte Carlo trajectory simulation of general Kraus channel whose Kraus operators cannot be
683
+ amplified to unitary operators. For unitary operators composed Kraus channel, :py:meth:`unitary_kraus`
684
+ is much faster.
685
+
686
+ This function is jittable in theory. But only jax+GPU combination is recommended for jit
687
+ since the graph building time is too long for other backend options; though the running
688
+ time of the function is very fast for every case.
689
+
690
+ :param kraus: A list of ``tn.Node`` for Kraus operators.
691
+ :type kraus: Sequence[Gate]
692
+ :param index: The qubits index that Kraus channel is applied on.
693
+ :type index: int
694
+ :param status: Random tensor uniformly between 0 or 1, defaults to be None,
695
+ when the random number will be generated automatically
696
+ :type status: Optional[float], optional
697
+ """
698
+ return self._circ.general_kraus(
699
+ kraus,
700
+ *index,
701
+ status=status,
702
+ with_prob=with_prob,
703
+ name=name,
704
+ )
705
+
706
+ def unitary_kraus(
707
+ self,
708
+ kraus: Sequence[Gate],
709
+ *index: int,
710
+ prob: Optional[Sequence[float]] = None,
711
+ status: Optional[float] = None,
712
+ name: Optional[str] = None,
713
+ ) -> Tensor:
714
+ """
715
+ Apply unitary gates in ``kraus`` randomly based on corresponding ``prob``.
716
+ If ``prob`` is ``None``, this is reduced to kraus channel language.
717
+
718
+ :param kraus: List of ``tc.gates.Gate`` or just Tensors
719
+ :type kraus: Sequence[Gate]
720
+ :param prob: prob list with the same size as ``kraus``, defaults to None
721
+ :type prob: Optional[Sequence[float]], optional
722
+ :param status: random seed between 0 to 1, defaults to None
723
+ :type status: Optional[float], optional
724
+ :return: shape [] int dtype tensor indicates which kraus gate is actually applied
725
+ :rtype: Tensor
726
+ """
727
+ return self._circ.unitary_kraus(
728
+ kraus,
729
+ *index,
730
+ prob=prob,
731
+ status=status,
732
+ name=name,
733
+ )
tensorcircuit/timeevol.py CHANGED
@@ -616,7 +616,7 @@ def ode_evol_global(
616
616
 
617
617
  def f(y: Tensor, t: Tensor, *args: Any) -> Tensor:
618
618
  h = -1.0j * hamiltonian(t, *args)
619
- return backend.sparse_dense_matmul(h, y)
619
+ return h @ y
620
620
 
621
621
  s1 = _solve_ode(f, initial_state, times, args, solver_kws)
622
622
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tensorcircuit-nightly
3
- Version: 1.4.0.dev20250915
3
+ Version: 1.4.0.dev20250917
4
4
  Summary: High performance unified quantum computing framework for the NISQ era
5
5
  Author-email: TensorCircuit Authors <znfesnpbh@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -62,9 +62,9 @@ Dynamic: license-file
62
62
 
63
63
  TensorCircuit-NG is the next-generation open-source high-performance quantum software framework, built upon tensornetwork engines, supporting for automatic differentiation, just-in-time compiling, hardware acceleration, vectorized parallelism and distributed training, providing unified infrastructures and interfaces for quantum programming. It can compose quantum circuits, neural networks and tensor networks seamlessly with high simulation efficiency and flexibility.
64
64
 
65
- TensorCircuit-NG is built on top of modern machine learning frameworks: Jax, TensorFlow, and PyTorch. It is specifically suitable for large-scale simulations of quantum-classical hybrid paradigm and variational quantum algorithms in ideal, noisy, Clifford, qudit, approximate, analog, and fermionic cases. It also supports quantum hardware access and provides CPU/GPU/QPU hybrid deployment solutions.
65
+ TensorCircuit-NG is built on top of modern machine learning frameworks: Jax, TensorFlow, and PyTorch. It is specifically suitable for large-scale simulations of quantum-classical hybrid paradigm and variational quantum algorithms in ideal (`Circuit`), noisy (`DMCircuit`), Clifford (`StabilizerCircuit`), qudit (`QuditCircuit`), approximate (`MPSCircuit`), analog (`AnalogCircuit`), and fermionic (`FGSCircuit`) cases. It also supports quantum hardware access and provides CPU/GPU/QPU hybrid deployment solutions.
66
66
 
67
- TensorCircuit-NG is the actively maintained official version and a [fully compatible](https://tensorcircuit-ng.readthedocs.io/en/latest/faq.html#what-is-the-relation-between-tensorcircuit-and-tensorcircuit-ng) successor to TensorCircuit with more new features (stabilizer circuit, multi-card distributed simulation, etc.) and bug fixes (support latest `numpy>2` and `qiskit>1`).
67
+ TensorCircuit-NG is the only actively maintained official version and a [fully compatible](https://tensorcircuit-ng.readthedocs.io/en/latest/faq.html#what-is-the-relation-between-tensorcircuit-and-tensorcircuit-ng) successor to TensorCircuit with more new features (stabilizer circuit, qudit circuit, analog circuit, multi-GPU distributed simulation, etc.) and bug fixes (support latest `numpy>2` and `qiskit>1`).
68
68
 
69
69
  ## Getting Started
70
70
 
@@ -1,11 +1,11 @@
1
- tensorcircuit/__init__.py,sha256=xGBMPFkh2rhkka6tb67P4w_moRr52axSOur3x2yM_zY,2160
1
+ tensorcircuit/__init__.py,sha256=KUUkDUVruLM_vxiun9uZ-8WLTRNf0zjchcyqLemu5mo,2160
2
2
  tensorcircuit/about.py,sha256=DazTswU2nAwOmASTaDII3L04PVtaQ7oiWPty5YMI3Wk,5267
3
3
  tensorcircuit/abstractcircuit.py,sha256=DwBmXhejVEqyhwoDJn8nOswJQBmvDR28n276wlhesJY,44224
4
- tensorcircuit/analogcircuit.py,sha256=ZelahNZ0tcIeC3nPuY-0SlSOOxrbF9XWWS3El462oE0,14522
4
+ tensorcircuit/analogcircuit.py,sha256=4BzIC631MZ2m05CXuk2T6HQ8RTmHBE6NszaOLuxmlEc,15639
5
5
  tensorcircuit/asciiart.py,sha256=neY1OWFwtoW5cHPNwkQHgRPktDniQvdlP9QKHkk52fM,8236
6
6
  tensorcircuit/basecircuit.py,sha256=9I0Es2P5VdGisx5_t0AKSYtgSb15RB6fXCZg4eEr5es,39138
7
7
  tensorcircuit/channels.py,sha256=CFQxWI-JmkIxexslCBdjp_RSxUbHs6eAJv4LvlXXXCY,28637
8
- tensorcircuit/circuit.py,sha256=wF3UKCoUOuQG-S10JGX7U1nGwflcML1D8fT94WcGfmg,39921
8
+ tensorcircuit/circuit.py,sha256=lETz1SvUh_60ZMFtvSPMWOF6zWMMyQU4TyB_VwhkVHM,40027
9
9
  tensorcircuit/cons.py,sha256=V0wjevtDkESCIWMJaysgPVorQlPAIT0vtRWvIZkEWcE,33065
10
10
  tensorcircuit/densitymatrix.py,sha256=C8Q2fHXZ78S9ZaPqCIKl6_v_sILqbBgqBOUYUQ1QmFI,15020
11
11
  tensorcircuit/experimental.py,sha256=TGK4FaS6TS_ZhtjcIZgYVuAkGdRW50LN0DdXp-h4bos,29906
@@ -16,12 +16,12 @@ tensorcircuit/mps_base.py,sha256=UZ-v8vsr_rAsKrfun8prVgbXJ-qsdqKy2DZIHpq3sxo,154
16
16
  tensorcircuit/mpscircuit.py,sha256=CPWlsb-kybZE-lh4iUkVMDn45qhHtFHUnxATP6TsaVk,38802
17
17
  tensorcircuit/noisemodel.py,sha256=vzxpoYEZbHVC4a6g7_Jk4dxsHi4wvhpRFwud8b616Qo,11878
18
18
  tensorcircuit/quantum.py,sha256=asuA3rCfi2Y4knWz1ObkveCdSv8EeaSsf1xfPVowvT0,110628
19
- tensorcircuit/quditcircuit.py,sha256=6KdHPRNzeBO8TxhXNNYnfN7EouTy_rX9Vu01gJM8yD0,23724
19
+ tensorcircuit/quditcircuit.py,sha256=Ll1Nb0tQYKzq7rlPJA64GjcyBqTSydvCBBKlbhEb38A,26122
20
20
  tensorcircuit/quditgates.py,sha256=PR5n9NLNhMPyoanFYjuDioW-0U7VGUiJf_OvxR_Twq0,20925
21
21
  tensorcircuit/shadows.py,sha256=KQM19KnXnn6d3HgaqdRs33RWC2uCIiY5cEGnH1CVdGw,17012
22
22
  tensorcircuit/simplify.py,sha256=EuEyQenFit-hgQhEJecL7t7jJ8m8zQ4KuL_sEvPNu-I,9488
23
23
  tensorcircuit/stabilizercircuit.py,sha256=KbrBVSo2pXnf5JHIrxwRPSPTm7bJVMIcyE4d7-dIfCM,15545
24
- tensorcircuit/timeevol.py,sha256=bMZXPnCC4Q470f-vWeBz62yM11QcXaxAGOYUVZlYNxA,31921
24
+ tensorcircuit/timeevol.py,sha256=Er3rMFEX61G1Zvt-iNVMpw1IIJ1lwD5HZURpowvCfR4,31893
25
25
  tensorcircuit/torchnn.py,sha256=z_QpM0QC3mydGyWpyp877j-tSFCPyzynCwqrTWaw-IA,4637
26
26
  tensorcircuit/translation.py,sha256=VnU7DnYmbk1cWjqa7N68WNLNDn3DwENrMzmbG4_CQco,28611
27
27
  tensorcircuit/utils.py,sha256=nEDR1wTh1WF_yV6UyZYlifqOPWdKk_Krr4HjhrWHnGQ,7228
@@ -46,10 +46,10 @@ tensorcircuit/backends/__init__.py,sha256=WiUmbUFzM29w3hKfhuKxVUk3PpqDFiXf4za9g0
46
46
  tensorcircuit/backends/abstract_backend.py,sha256=ox8gWb1ui21DPA8bnLYEg7HOp0NwWFLAhYBjQZj8p2o,70288
47
47
  tensorcircuit/backends/backend_factory.py,sha256=Z0aQ-RnxOnQzp-SRw8sefAH8XyBSlj2NXZwOlHinbfY,1713
48
48
  tensorcircuit/backends/cupy_backend.py,sha256=KG5fqP29wnngkPsi-TnOk0pHsr9lyD7hx6_Y56fCQuY,15172
49
- tensorcircuit/backends/jax_backend.py,sha256=H43ofYIgdGpz3ubOIIAhbqMGHs6m12NXI3iJc_z3CZ0,28112
49
+ tensorcircuit/backends/jax_backend.py,sha256=luLhZ7zyj8d6ARYxzGsvhxZnbownbqgeUMpUQw6F5Yw,29080
50
50
  tensorcircuit/backends/jax_ops.py,sha256=WyUGavch2R9uEFsI1Ap7eP1UcU4s2TItBgGsrVS3Hzs,9320
51
51
  tensorcircuit/backends/numpy_backend.py,sha256=0N7Z6slwDsAkWBislzsy0YhKTxa2Woq_xaCCX_SFuHI,15613
52
- tensorcircuit/backends/pytorch_backend.py,sha256=ixHFpSJhPw0gJS5pEprmjDkYNLanqtcENqJAHRJyDVM,25686
52
+ tensorcircuit/backends/pytorch_backend.py,sha256=XUSPh_AMbB1BeX2h9HHpccNubrLg85ouotjoXDc7N7g,26996
53
53
  tensorcircuit/backends/pytorch_ops.py,sha256=lLxpK6OqfpVwifyFlgsqhpnt-oIn4R5paPMVg51WaW0,3826
54
54
  tensorcircuit/backends/tensorflow_backend.py,sha256=9SAfcWEoKvyJG4sM0I89ozW16aa3VMxMfcOUeDljShE,39813
55
55
  tensorcircuit/backends/tf_ops.py,sha256=FJwDU7LhZrt0VUIx12DJU0gZnWhMv7B7r9sAKG710As,3378
@@ -89,8 +89,8 @@ tensorcircuit/templates/graphs.py,sha256=cPYrxjoem0xZ-Is9dZKAvEzWZL_FejfIRiCEOTA
89
89
  tensorcircuit/templates/hamiltonians.py,sha256=Guvqqi-V47w8xeZDmca4_mU4mW9V4c3AplsBOrRtxFo,6308
90
90
  tensorcircuit/templates/lattice.py,sha256=IvFyNgsFMfj82g-tpJraI3lMbI-EIZ0Cghq9v7tZ6Wg,72851
91
91
  tensorcircuit/templates/measurements.py,sha256=pzc5Aa9S416Ilg4aOY77Z6ZhUlYcXnAkQNQFTuHjFFs,10943
92
- tensorcircuit_nightly-1.4.0.dev20250915.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
93
- tensorcircuit_nightly-1.4.0.dev20250915.dist-info/METADATA,sha256=6JMh1OMAJjTx04UQkSB8DOrdWvz9plnW_FYzlqZ0V4I,38135
94
- tensorcircuit_nightly-1.4.0.dev20250915.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
95
- tensorcircuit_nightly-1.4.0.dev20250915.dist-info/top_level.txt,sha256=9dcuK5488dWpVauYz8cdvx743z_La1h7zIQCsEEgu7o,14
96
- tensorcircuit_nightly-1.4.0.dev20250915.dist-info/RECORD,,
92
+ tensorcircuit_nightly-1.4.0.dev20250917.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
93
+ tensorcircuit_nightly-1.4.0.dev20250917.dist-info/METADATA,sha256=3U-yDgQyIi8Am3mOErKb4ip5Ghr4PNOLhetQabOu0Ds,38283
94
+ tensorcircuit_nightly-1.4.0.dev20250917.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
95
+ tensorcircuit_nightly-1.4.0.dev20250917.dist-info/top_level.txt,sha256=9dcuK5488dWpVauYz8cdvx743z_La1h7zIQCsEEgu7o,14
96
+ tensorcircuit_nightly-1.4.0.dev20250917.dist-info/RECORD,,