tensorcircuit-nightly 1.3.0.dev20250827__py3-none-any.whl → 1.3.0.dev20250829__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tensorcircuit-nightly might be problematic. Click here for more details.
- tensorcircuit/__init__.py +1 -1
- tensorcircuit/backends/abstract_backend.py +74 -0
- tensorcircuit/backends/jax_backend.py +6 -0
- tensorcircuit/backends/numpy_backend.py +6 -0
- tensorcircuit/backends/pytorch_backend.py +6 -0
- tensorcircuit/backends/tensorflow_backend.py +8 -0
- tensorcircuit/timeevol.py +147 -73
- {tensorcircuit_nightly-1.3.0.dev20250827.dist-info → tensorcircuit_nightly-1.3.0.dev20250829.dist-info}/METADATA +27 -5
- {tensorcircuit_nightly-1.3.0.dev20250827.dist-info → tensorcircuit_nightly-1.3.0.dev20250829.dist-info}/RECORD +12 -12
- {tensorcircuit_nightly-1.3.0.dev20250827.dist-info → tensorcircuit_nightly-1.3.0.dev20250829.dist-info}/WHEEL +0 -0
- {tensorcircuit_nightly-1.3.0.dev20250827.dist-info → tensorcircuit_nightly-1.3.0.dev20250829.dist-info}/licenses/LICENSE +0 -0
- {tensorcircuit_nightly-1.3.0.dev20250827.dist-info → tensorcircuit_nightly-1.3.0.dev20250829.dist-info}/top_level.txt +0 -0
tensorcircuit/__init__.py
CHANGED
|
@@ -9,6 +9,7 @@ from functools import reduce, partial
|
|
|
9
9
|
from operator import mul
|
|
10
10
|
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union
|
|
11
11
|
|
|
12
|
+
import math
|
|
12
13
|
import numpy as np
|
|
13
14
|
from ..utils import return_partial
|
|
14
15
|
|
|
@@ -405,6 +406,31 @@ class ExtendedBackend:
|
|
|
405
406
|
a = self.reshape(a, [2 for _ in range(nleg)])
|
|
406
407
|
return a
|
|
407
408
|
|
|
409
|
+
def reshaped(self: Any, a: Tensor, d: int) -> Tensor:
|
|
410
|
+
"""
|
|
411
|
+
Reshape a tensor to the [d, d, ...] shape.
|
|
412
|
+
|
|
413
|
+
:param a: Input tensor
|
|
414
|
+
:type a: Tensor
|
|
415
|
+
:param d: edge length for each dimension
|
|
416
|
+
:type d: int
|
|
417
|
+
:return: the reshaped tensor
|
|
418
|
+
:rtype: Tensor
|
|
419
|
+
"""
|
|
420
|
+
if not isinstance(d, int) or d <= 0:
|
|
421
|
+
raise ValueError("d must be a positive integer.")
|
|
422
|
+
|
|
423
|
+
size = self.sizen(a)
|
|
424
|
+
if size == 0:
|
|
425
|
+
return self.reshape(a, (0,))
|
|
426
|
+
|
|
427
|
+
nleg_float = math.log(size, d)
|
|
428
|
+
nleg = int(round(nleg_float))
|
|
429
|
+
if d**nleg != size:
|
|
430
|
+
raise ValueError(f"cannot reshape: size {size} is not a power of d={d}")
|
|
431
|
+
|
|
432
|
+
return self.reshape(a, (d,) * nleg)
|
|
433
|
+
|
|
408
434
|
def reshapem(self: Any, a: Tensor) -> Tensor:
|
|
409
435
|
"""
|
|
410
436
|
Reshape a tensor to the [l, l] shape.
|
|
@@ -839,6 +865,54 @@ class ExtendedBackend:
|
|
|
839
865
|
"Backend '{}' has not implemented `mod`.".format(self.name)
|
|
840
866
|
)
|
|
841
867
|
|
|
868
|
+
def floor(self: Any, x: Tensor) -> Tensor:
|
|
869
|
+
"""
|
|
870
|
+
Compute the element-wise floor of the input tensor.
|
|
871
|
+
|
|
872
|
+
This operation returns a new tensor with the largest integers
|
|
873
|
+
less than or equal to each element of the input tensor,
|
|
874
|
+
i.e. it rounds each value down towards negative infinity.
|
|
875
|
+
|
|
876
|
+
:param x: Input tensor containing numeric values.
|
|
877
|
+
:type x: Tensor
|
|
878
|
+
:return: A tensor with the same shape as `x`, where each element
|
|
879
|
+
is the floored value of the corresponding element in `x`.
|
|
880
|
+
:rtype: Tensor
|
|
881
|
+
|
|
882
|
+
:raises NotImplementedError: If the backend does not provide an
|
|
883
|
+
implementation for `floor`.
|
|
884
|
+
"""
|
|
885
|
+
raise NotImplementedError(
|
|
886
|
+
"Backend '{}' has not implemented `floor`.".format(self.name)
|
|
887
|
+
)
|
|
888
|
+
|
|
889
|
+
def clip(self: Any, a: Tensor, a_min: Tensor, a_max: Tensor) -> Tensor:
|
|
890
|
+
"""
|
|
891
|
+
Clip (limit) the values of a tensor element-wise to the range [a_min, a_max].
|
|
892
|
+
|
|
893
|
+
Each element in the input tensor `a` is compared against the corresponding
|
|
894
|
+
bounds `a_min` and `a_max`. If a value in `a` is less than `a_min`, it is set
|
|
895
|
+
to `a_min`; if greater than `a_max`, it is set to `a_max`. Otherwise, the
|
|
896
|
+
value is left unchanged. The result preserves the dtype and device of the input.
|
|
897
|
+
|
|
898
|
+
:param a: Input tensor containing values to be clipped.
|
|
899
|
+
:type a: Tensor
|
|
900
|
+
:param a_min: Lower bound (minimum value) for clipping. Can be a scalar tensor
|
|
901
|
+
or broadcastable to the shape of `a`.
|
|
902
|
+
:type a_min: Tensor
|
|
903
|
+
:param a_max: Upper bound (maximum value) for clipping. Can be a scalar tensor
|
|
904
|
+
or broadcastable to the shape of `a`.
|
|
905
|
+
:type a_max: Tensor
|
|
906
|
+
:return: A tensor with the same shape as `a`, where all values are clipped
|
|
907
|
+
to lie within the interval [a_min, a_max].
|
|
908
|
+
:rtype: Tensor
|
|
909
|
+
|
|
910
|
+
:raises NotImplementedError: If the backend does not implement `clip`.
|
|
911
|
+
"""
|
|
912
|
+
raise NotImplementedError(
|
|
913
|
+
"Backend '{}' has not implemented `clip`.".format(self.name)
|
|
914
|
+
)
|
|
915
|
+
|
|
842
916
|
def reverse(self: Any, a: Tensor) -> Tensor:
|
|
843
917
|
"""
|
|
844
918
|
return ``a[::-1]``, only 1D tensor is guaranteed for consistent behavior
|
|
@@ -349,6 +349,12 @@ class JaxBackend(jax_backend.JaxBackend, ExtendedBackend): # type: ignore
|
|
|
349
349
|
def mod(self, x: Tensor, y: Tensor) -> Tensor:
|
|
350
350
|
return jnp.mod(x, y)
|
|
351
351
|
|
|
352
|
+
def floor(self, a: Tensor) -> Tensor:
|
|
353
|
+
return jnp.floor(a)
|
|
354
|
+
|
|
355
|
+
def clip(self, a: Tensor, a_min: Tensor, a_max: Tensor) -> Tensor:
|
|
356
|
+
return jnp.clip(a, a_min, a_max)
|
|
357
|
+
|
|
352
358
|
def right_shift(self, x: Tensor, y: Tensor) -> Tensor:
|
|
353
359
|
return jnp.right_shift(x, y)
|
|
354
360
|
|
|
@@ -250,6 +250,12 @@ class NumpyBackend(numpy_backend.NumPyBackend, ExtendedBackend): # type: ignore
|
|
|
250
250
|
def mod(self, x: Tensor, y: Tensor) -> Tensor:
|
|
251
251
|
return np.mod(x, y)
|
|
252
252
|
|
|
253
|
+
def floor(self, a: Tensor) -> Tensor:
|
|
254
|
+
return np.floor(a)
|
|
255
|
+
|
|
256
|
+
def clip(self, a: Tensor, a_min: Tensor, a_max: Tensor) -> Tensor:
|
|
257
|
+
return np.clip(a, a_min, a_max)
|
|
258
|
+
|
|
253
259
|
def right_shift(self, x: Tensor, y: Tensor) -> Tensor:
|
|
254
260
|
return np.right_shift(x, y)
|
|
255
261
|
|
|
@@ -429,6 +429,12 @@ class PyTorchBackend(pytorch_backend.PyTorchBackend, ExtendedBackend): # type:
|
|
|
429
429
|
def mod(self, x: Tensor, y: Tensor) -> Tensor:
|
|
430
430
|
return torchlib.fmod(x, y)
|
|
431
431
|
|
|
432
|
+
def floor(self, a: Tensor) -> Tensor:
|
|
433
|
+
return torchlib.floor(a)
|
|
434
|
+
|
|
435
|
+
def clip(self, a: Tensor, a_min: Tensor, a_max: Tensor) -> Tensor:
|
|
436
|
+
return torchlib.clamp(a, a_min, a_max)
|
|
437
|
+
|
|
432
438
|
def right_shift(self, x: Tensor, y: Tensor) -> Tensor:
|
|
433
439
|
return torchlib.bitwise_right_shift(x, y)
|
|
434
440
|
|
|
@@ -573,6 +573,14 @@ class TensorFlowBackend(tensorflow_backend.TensorFlowBackend, ExtendedBackend):
|
|
|
573
573
|
def stack(self, a: Sequence[Tensor], axis: int = 0) -> Tensor:
|
|
574
574
|
return tf.stack(a, axis=axis)
|
|
575
575
|
|
|
576
|
+
def clip(self, a: Tensor, a_min: Tensor, a_max: Tensor) -> Tensor:
|
|
577
|
+
return tf.clip_by_value(a, a_min, a_max)
|
|
578
|
+
|
|
579
|
+
def floor(self, a: Tensor) -> Tensor:
|
|
580
|
+
if a.dtype.is_integer:
|
|
581
|
+
return a
|
|
582
|
+
return tf.math.floor(a)
|
|
583
|
+
|
|
576
584
|
def concat(self, a: Sequence[Tensor], axis: int = 0) -> Tensor:
|
|
577
585
|
return tf.concat(a, axis=axis)
|
|
578
586
|
|
tensorcircuit/timeevol.py
CHANGED
|
@@ -2,8 +2,9 @@
|
|
|
2
2
|
Analog time evolution engines
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
from typing import Any, Tuple, Optional, Callable, List, Sequence
|
|
5
|
+
from typing import Any, Tuple, Optional, Callable, List, Sequence, Dict
|
|
6
6
|
from functools import partial
|
|
7
|
+
import warnings
|
|
7
8
|
|
|
8
9
|
import numpy as np
|
|
9
10
|
|
|
@@ -427,37 +428,58 @@ def hamiltonian_evol(
|
|
|
427
428
|
ed_evol = hamiltonian_evol
|
|
428
429
|
|
|
429
430
|
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
"""
|
|
440
|
-
|
|
441
|
-
[only jax backend support for now]
|
|
431
|
+
def _solve_ode(
|
|
432
|
+
f: Callable[..., Tensor],
|
|
433
|
+
s: Tensor,
|
|
434
|
+
times: Tensor,
|
|
435
|
+
args: Any,
|
|
436
|
+
solver_kws: Dict[str, Any],
|
|
437
|
+
) -> Tensor:
|
|
438
|
+
rtol = solver_kws.get("rtol", 1e-12)
|
|
439
|
+
atol = solver_kws.get("atol", 1e-12)
|
|
440
|
+
ode_backend = solver_kws.get("ode_backend", "jaxode")
|
|
441
|
+
max_steps = solver_kws.get("max_steps", 10000)
|
|
442
442
|
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
443
|
+
ts = backend.convert_to_tensor(times)
|
|
444
|
+
ts = backend.cast(ts, dtype=rdtypestr)
|
|
445
|
+
|
|
446
|
+
if ode_backend == "jaxode":
|
|
447
|
+
from jax.experimental.ode import odeint
|
|
448
|
+
|
|
449
|
+
s1 = odeint(f, s, ts, rtol=rtol, atol=atol, mxstep=max_steps, *args)
|
|
450
|
+
return s1
|
|
451
|
+
|
|
452
|
+
import diffrax
|
|
453
|
+
|
|
454
|
+
# Ignore complex warning
|
|
455
|
+
warnings.simplefilter("ignore", category=UserWarning, append=True)
|
|
456
|
+
|
|
457
|
+
solver = solver_kws.get("solver", "Tsit5")
|
|
458
|
+
dt0 = solver_kws.get("dt0", 0.01)
|
|
459
|
+
all_solvers = {
|
|
460
|
+
"Dopri5": diffrax.Dopri5,
|
|
461
|
+
"Tsit5": diffrax.Tsit5,
|
|
462
|
+
"Dopri8": diffrax.Dopri8,
|
|
463
|
+
"Kvaerno5": diffrax.Kvaerno5,
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
# ODE
|
|
467
|
+
term = diffrax.ODETerm(lambda t, y, args: f(y, t, *args))
|
|
468
|
+
|
|
469
|
+
# solve ODE
|
|
470
|
+
s1 = diffrax.diffeqsolve(
|
|
471
|
+
terms=term,
|
|
472
|
+
solver=all_solvers[solver](),
|
|
473
|
+
t0=times[0],
|
|
474
|
+
t1=times[-1],
|
|
475
|
+
dt0=dt0,
|
|
476
|
+
y0=s,
|
|
477
|
+
saveat=diffrax.SaveAt(ts=times),
|
|
478
|
+
args=args,
|
|
479
|
+
stepsize_controller=diffrax.PIDController(rtol=rtol, atol=atol),
|
|
480
|
+
max_steps=max_steps,
|
|
481
|
+
).ys
|
|
482
|
+
return s1
|
|
461
483
|
|
|
462
484
|
|
|
463
485
|
def ode_evol_local(
|
|
@@ -475,6 +497,9 @@ def ode_evol_local(
|
|
|
475
497
|
This function solves the time-dependent Schrodinger equation using numerical ODE integration.
|
|
476
498
|
The Hamiltonian is applied only to a specific subset of qubits (indices) in the system.
|
|
477
499
|
|
|
500
|
+
The ode_backend parameter defaults to 'jaxode' (which uses `jax.experimental.ode.odeint` with a default solver
|
|
501
|
+
of 'Dopri5');if set to 'diffrax', it uses `diffrax.diffeqsolve` instead (with a default solver of 'Tsit5').
|
|
502
|
+
|
|
478
503
|
Note: This function currently only supports the JAX backend.
|
|
479
504
|
|
|
480
505
|
:param hamiltonian: A function that returns a dense Hamiltonian matrix for the specified
|
|
@@ -490,13 +515,20 @@ def ode_evol_local(
|
|
|
490
515
|
:type callback: Optional[Callable[..., Tensor]]
|
|
491
516
|
:param args: Additional arguments to pass to the Hamiltonian function.
|
|
492
517
|
:param solver_kws: Additional keyword arguments to pass to the ODE solver.
|
|
518
|
+
ode_backend='jaxode'(default) uses `jax.experimental.ode.odeint`; ode_backend='diffrax'
|
|
519
|
+
uses `diffrax.diffeqsolve`.
|
|
520
|
+
rtol (default: 1e-12) and atol (default: 1e-12) are used to determine how accurately you would
|
|
521
|
+
like the numerical approximation to your equation.
|
|
522
|
+
The solver parameter accepts one of {'Tsit5' (default), 'Dopri5', 'Dopri8', 'Kvaerno5'}
|
|
523
|
+
and only works when ode_backend='diffrax'.
|
|
524
|
+
dt0 (default: 0.01) specifies the initial step size and only works when ode_backend='diffrax'.
|
|
525
|
+
max_steps (default: 10000) The maximum number of steps to take before quitting the computation
|
|
526
|
+
unconditionally and only works when ode_backend='diffrax'.
|
|
493
527
|
:return: Evolved quantum states at the specified time points. If callback is provided,
|
|
494
528
|
returns the callback results; otherwise returns the state vectors.
|
|
495
529
|
:rtype: Tensor
|
|
496
530
|
"""
|
|
497
|
-
from jax.experimental.ode import odeint
|
|
498
531
|
|
|
499
|
-
s = initial_state
|
|
500
532
|
n = int(np.log2(backend.shape_tuple(initial_state)[-1]) + 1e-7)
|
|
501
533
|
l = len(index)
|
|
502
534
|
|
|
@@ -517,38 +549,11 @@ def ode_evol_local(
|
|
|
517
549
|
y = contractor([y, h], output_edge_order=edges)
|
|
518
550
|
return backend.reshape(y.tensor, [-1])
|
|
519
551
|
|
|
520
|
-
|
|
521
|
-
ts = backend.cast(ts, dtype=rdtypestr)
|
|
522
|
-
s1 = odeint(f, s, ts, *args, **solver_kws)
|
|
523
|
-
if not callback:
|
|
524
|
-
return s1
|
|
525
|
-
return backend.stack([callback(s1[i]) for i in range(len(s1))])
|
|
552
|
+
s1 = _solve_ode(f, initial_state, times, args, solver_kws)
|
|
526
553
|
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
c: Circuit, h_fun: Callable[..., Tensor], t: float, *args: Any, **solver_kws: Any
|
|
531
|
-
) -> Circuit:
|
|
532
|
-
"""
|
|
533
|
-
ode evolution of time dependent Hamiltonian on circuit of all qubits
|
|
534
|
-
[only jax backend support for now]
|
|
535
|
-
|
|
536
|
-
:param c: _description_
|
|
537
|
-
:type c: Circuit
|
|
538
|
-
:param h_fun: h_fun should return a **SPARSE** Hamiltonian matrix
|
|
539
|
-
with input arguments time and *args
|
|
540
|
-
:type h_fun: Callable[..., Tensor]
|
|
541
|
-
:param t: _description_
|
|
542
|
-
:type t: float
|
|
543
|
-
:return: _description_
|
|
544
|
-
:rtype: Circuit
|
|
545
|
-
"""
|
|
546
|
-
s = c.state()
|
|
547
|
-
n = c._nqubits
|
|
548
|
-
if isinstance(t, float):
|
|
549
|
-
t = backend.stack([0.0, t])
|
|
550
|
-
s1 = ode_evol_global(h_fun, s, t, None, *args, **solver_kws)
|
|
551
|
-
return type(c)(n, inputs=s1[-1])
|
|
554
|
+
if callback is None:
|
|
555
|
+
return s1
|
|
556
|
+
return backend.stack([callback(a_state) for a_state in s1])
|
|
552
557
|
|
|
553
558
|
|
|
554
559
|
def ode_evol_global(
|
|
@@ -564,7 +569,10 @@ def ode_evol_global(
|
|
|
564
569
|
|
|
565
570
|
This function solves the time-dependent Schrodinger equation using numerical ODE integration.
|
|
566
571
|
The Hamiltonian is applied to the full system and should be provided in sparse matrix format
|
|
567
|
-
|
|
572
|
+
for efficiency.
|
|
573
|
+
|
|
574
|
+
The ode_backend parameter defaults to 'jaxode' (which uses `jax.experimental.ode.odeint` with a default solver
|
|
575
|
+
of 'Dopri5');if set to 'diffrax', it uses `diffrax.diffeqsolve` instead (with a default solver of 'Tsit5').
|
|
568
576
|
|
|
569
577
|
Note: This function currently only supports the JAX backend.
|
|
570
578
|
|
|
@@ -578,25 +586,91 @@ def ode_evol_global(
|
|
|
578
586
|
:param callback: Optional function to apply to the state at each time step.
|
|
579
587
|
:type callback: Optional[Callable[..., Tensor]]
|
|
580
588
|
:param args: Additional arguments to pass to the Hamiltonian function.
|
|
589
|
+
:type args: tuple | list
|
|
581
590
|
:param solver_kws: Additional keyword arguments to pass to the ODE solver.
|
|
591
|
+
ode_backend='jaxode'(default) uses `jax.experimental.ode.odeint`; ode_backend='diffrax'
|
|
592
|
+
uses `diffrax.diffeqsolve`.
|
|
593
|
+
rtol (default: 1e-12) and atol (default: 1e-12) are used to determine how accurately you would
|
|
594
|
+
like the numerical approximation to your equation.
|
|
595
|
+
The solver parameter accepts one of {'Tsit5' (default), 'Dopri5', 'Dopri8', 'Kvaerno5'}
|
|
596
|
+
and only works when ode_backend='diffrax'.
|
|
597
|
+
dt0 (default: 0.01) specifies the initial step size and only works when ode_backend='diffrax'.
|
|
598
|
+
max_steps (default: 10000) The maximum number of steps to take before quitting the computation
|
|
599
|
+
unconditionally and only works when ode_backend='diffrax'.
|
|
600
|
+
:type solver_kws: dict
|
|
582
601
|
:return: Evolved quantum states at the specified time points. If callback is provided,
|
|
583
602
|
returns the callback results; otherwise returns the state vectors.
|
|
584
603
|
:rtype: Tensor
|
|
585
604
|
"""
|
|
586
|
-
from jax.experimental.ode import odeint
|
|
587
|
-
|
|
588
|
-
s = initial_state
|
|
589
|
-
ts = backend.convert_to_tensor(times)
|
|
590
|
-
ts = backend.cast(ts, dtype=rdtypestr)
|
|
591
605
|
|
|
592
606
|
def f(y: Tensor, t: Tensor, *args: Any) -> Tensor:
|
|
593
607
|
h = -1.0j * hamiltonian(t, *args)
|
|
594
608
|
return backend.sparse_dense_matmul(h, y)
|
|
595
609
|
|
|
596
|
-
s1 =
|
|
597
|
-
|
|
610
|
+
s1 = _solve_ode(f, initial_state, times, args, solver_kws)
|
|
611
|
+
|
|
612
|
+
if callback is None:
|
|
598
613
|
return s1
|
|
599
|
-
return backend.stack([callback(
|
|
614
|
+
return backend.stack([callback(a_state) for a_state in s1])
|
|
615
|
+
|
|
616
|
+
|
|
617
|
+
@partial(arg_alias, alias_dict={"h_fun": ["hamiltonian"], "t": ["times"]})
|
|
618
|
+
def evol_local(
|
|
619
|
+
c: Circuit,
|
|
620
|
+
index: Sequence[int],
|
|
621
|
+
h_fun: Callable[..., Tensor],
|
|
622
|
+
t: float,
|
|
623
|
+
*args: Any,
|
|
624
|
+
**solver_kws: Any,
|
|
625
|
+
) -> Circuit:
|
|
626
|
+
"""
|
|
627
|
+
ode evolution of time dependent Hamiltonian on circuit of given indices
|
|
628
|
+
[only jax backend support for now]
|
|
629
|
+
|
|
630
|
+
:param c: _description_
|
|
631
|
+
:type c: Circuit
|
|
632
|
+
:param index: qubit sites to evolve
|
|
633
|
+
:type index: Sequence[int]
|
|
634
|
+
:param h_fun: h_fun should return a dense Hamiltonian matrix
|
|
635
|
+
with input arguments time and *args
|
|
636
|
+
:type h_fun: Callable[..., Tensor]
|
|
637
|
+
:param t: evolution time
|
|
638
|
+
:type t: float
|
|
639
|
+
:return: _description_
|
|
640
|
+
:rtype: Circuit
|
|
641
|
+
"""
|
|
642
|
+
s = c.state()
|
|
643
|
+
n = int(np.log2(s.shape[-1]) + 1e-7)
|
|
644
|
+
if isinstance(t, float):
|
|
645
|
+
t = backend.stack([0.0, t])
|
|
646
|
+
s1 = ode_evol_local(h_fun, s, t, index, None, *args, **solver_kws)
|
|
647
|
+
return type(c)(n, inputs=s1[-1])
|
|
648
|
+
|
|
649
|
+
|
|
650
|
+
@partial(arg_alias, alias_dict={"h_fun": ["hamiltonian"], "t": ["times"]})
|
|
651
|
+
def evol_global(
|
|
652
|
+
c: Circuit, h_fun: Callable[..., Tensor], t: float, *args: Any, **solver_kws: Any
|
|
653
|
+
) -> Circuit:
|
|
654
|
+
"""
|
|
655
|
+
ode evolution of time dependent Hamiltonian on circuit of all qubits
|
|
656
|
+
[only jax backend support for now]
|
|
657
|
+
|
|
658
|
+
:param c: _description_
|
|
659
|
+
:type c: Circuit
|
|
660
|
+
:param h_fun: h_fun should return a **SPARSE** Hamiltonian matrix
|
|
661
|
+
with input arguments time and *args
|
|
662
|
+
:type h_fun: Callable[..., Tensor]
|
|
663
|
+
:param t: _description_
|
|
664
|
+
:type t: float
|
|
665
|
+
:return: _description_
|
|
666
|
+
:rtype: Circuit
|
|
667
|
+
"""
|
|
668
|
+
s = c.state()
|
|
669
|
+
n = c._nqubits
|
|
670
|
+
if isinstance(t, float):
|
|
671
|
+
t = backend.stack([0.0, t])
|
|
672
|
+
s1 = ode_evol_global(h_fun, s, t, None, *args, **solver_kws)
|
|
673
|
+
return type(c)(n, inputs=s1[-1])
|
|
600
674
|
|
|
601
675
|
|
|
602
676
|
def chebyshev_evol(
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tensorcircuit-nightly
|
|
3
|
-
Version: 1.3.0.
|
|
3
|
+
Version: 1.3.0.dev20250829
|
|
4
4
|
Summary: High performance unified quantum computing framework for the NISQ era
|
|
5
5
|
Author-email: TensorCircuit Authors <znfesnpbh@gmail.com>
|
|
6
6
|
License-Expression: Apache-2.0
|
|
@@ -341,8 +341,10 @@ TensorCircuit-NG is open source, released under the Apache License, Version 2.0.
|
|
|
341
341
|
<td align="center" valign="top" width="16.66%"><a href="https://adeshpande.gitlab.io"><img src="https://avatars.githubusercontent.com/u/6169877?v=4?s=100" width="100px;" alt="Abhinav Deshpande"/><br /><sub><b>Abhinav Deshpande</b></sub></a><br /><a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=abhinavd" title="Code">💻</a></td>
|
|
342
342
|
</tr>
|
|
343
343
|
<tr>
|
|
344
|
-
<td align="center" valign="top" width="16.66%"><a href="https://github.com/Stellogic"><img src="https://avatars.githubusercontent.com/u/186928579?v=4?s=100" width="100px;" alt="Stellogic"/><br /><sub><b>Stellogic</b></sub></a><br /><a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Stellogic" title="Code">💻</a> <a href="#example-Stellogic" title="Examples">💡</a> <a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Stellogic" title="Tests">⚠️</a></td>
|
|
344
|
+
<td align="center" valign="top" width="16.66%"><a href="https://github.com/Stellogic"><img src="https://avatars.githubusercontent.com/u/186928579?v=4?s=100" width="100px;" alt="Stellogic"/><br /><sub><b>Stellogic</b></sub></a><br /><a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Stellogic" title="Code">💻</a> <a href="#example-Stellogic" title="Examples">💡</a> <a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Stellogic" title="Tests">⚠️</a> <a href="#tutorial-Stellogic" title="Tutorials">✅</a></td>
|
|
345
345
|
<td align="center" valign="top" width="16.66%"><a href="https://github.com/Charlespkuer"><img src="https://avatars.githubusercontent.com/u/112697147?v=4?s=100" width="100px;" alt="Huang"/><br /><sub><b>Huang</b></sub></a><br /><a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Charlespkuer" title="Code">💻</a> <a href="#example-Charlespkuer" title="Examples">💡</a> <a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Charlespkuer" title="Tests">⚠️</a></td>
|
|
346
|
+
<td align="center" valign="top" width="16.66%"><a href="https://github.com/Huang-Xu-Yang"><img src="https://avatars.githubusercontent.com/u/227286661?v=4?s=100" width="100px;" alt="Huang-Xu-Yang"/><br /><sub><b>Huang-Xu-Yang</b></sub></a><br /><a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Huang-Xu-Yang" title="Code">💻</a> <a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Huang-Xu-Yang" title="Tests">⚠️</a></td>
|
|
347
|
+
<td align="center" valign="top" width="16.66%"><a href="https://github.com/WeiguoMa"><img src="https://avatars.githubusercontent.com/u/108172530?v=4?s=100" width="100px;" alt="Weiguo_M"/><br /><sub><b>Weiguo_M</b></sub></a><br /><a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=WeiguoMa" title="Code">💻</a> <a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=WeiguoMa" title="Tests">⚠️</a></td>
|
|
346
348
|
</tr>
|
|
347
349
|
</tbody>
|
|
348
350
|
</table>
|
|
@@ -423,6 +425,18 @@ For the simulation code and data for variational optimization of simutaneous exc
|
|
|
423
425
|
|
|
424
426
|
Reference paper: https://arxiv.org/abs/2504.21459.
|
|
425
427
|
|
|
428
|
+
### Quantum Machine Unlearning
|
|
429
|
+
|
|
430
|
+
For the simulation code for the work "superior resilience to poisoning and amenability to unlearning in quantum machine learning", see the [project repo](https://github.com/yutuer21/quantum-machine-unlearning).
|
|
431
|
+
|
|
432
|
+
Reference paper: https://arxiv.org/abs/2508.02422.
|
|
433
|
+
|
|
434
|
+
### Low Weight Pauli Propagation Simulation
|
|
435
|
+
|
|
436
|
+
For the simulation code and data for the work on low weight Pauli propagation in the context of variational quantum algorithms, see the [project repo](https://github.com/ZongliangLi/lwpp_init).
|
|
437
|
+
|
|
438
|
+
Reference paper: https://arxiv.org/abs/2508.06358.
|
|
439
|
+
|
|
426
440
|
### More works
|
|
427
441
|
|
|
428
442
|
<details>
|
|
@@ -468,20 +482,28 @@ Reference paper: https://arxiv.org/abs/2504.21459.
|
|
|
468
482
|
|
|
469
483
|
- Variational post-selection for ground states and thermal states simulation: https://arxiv.org/abs/2402.07605 (published in QST).
|
|
470
484
|
|
|
471
|
-
- Subsystem information capacity in random circuits and Hamiltonian dynamics: https://arxiv.org/abs/2405.05076. Code implementation: https://github.com/sxzgroup/subsystem_information_capacity.
|
|
485
|
+
- Subsystem information capacity in random circuits and Hamiltonian dynamics: https://arxiv.org/abs/2405.05076 (published in Quantum). Code implementation: https://github.com/sxzgroup/subsystem_information_capacity.
|
|
472
486
|
|
|
473
487
|
- Symmetry restoration and quantum Mpemba effect in symmetric random circuits: https://arxiv.org/abs/2403.08459 (published in PRL).
|
|
474
488
|
|
|
475
489
|
- Quantum Mpemba effects in many-body localization systems: https://arxiv.org/abs/2408.07750.
|
|
476
490
|
|
|
477
|
-
- Supersymmetry dynamics on Rydberg atom arrays: https://arxiv.org/abs/2410.21386.
|
|
491
|
+
- Supersymmetry dynamics on Rydberg atom arrays: https://arxiv.org/abs/2410.21386 (published in PRB).
|
|
478
492
|
|
|
479
493
|
- Dynamic parameterized quantum circuits: expressive and barren-plateau free: https://arxiv.org/abs/2411.05760.
|
|
480
494
|
|
|
481
|
-
- Holographic deep thermalization: https://arxiv.org/abs/2411.03587.
|
|
495
|
+
- Holographic deep thermalization: https://arxiv.org/abs/2411.03587 (published in Nature Communications).
|
|
482
496
|
|
|
483
497
|
- Quantum deep generative prior with programmable quantum circuits: https://www.nature.com/articles/s42005-024-01765-9 (published in Communications Physics).
|
|
484
498
|
|
|
499
|
+
- Symmetry Breaking Dynamics in Quantum Many-Body Systems: https://arxiv.org/abs/2501.13459.
|
|
500
|
+
|
|
501
|
+
- Entanglement growth and information capacity in a quasiperiodic system with a single-particle mobility edge: https://arxiv.org/abs/2506.18076.
|
|
502
|
+
|
|
503
|
+
- Hilbert subspace imprint: a new mechanism for non-thermalization: https://arxiv.org/abs/2506.11922.
|
|
504
|
+
|
|
505
|
+
- A Neural-Guided Variational Quantum Algorithm for Efficient Sign Structure Learning in Hybrid Architectures: https://arxiv.org/abs/2507.07555.
|
|
506
|
+
|
|
485
507
|
</details>
|
|
486
508
|
|
|
487
509
|
If you want to highlight your research work or projects here, feel free to add by opening PR.
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
tensorcircuit/__init__.py,sha256=
|
|
1
|
+
tensorcircuit/__init__.py,sha256=LEOfft6DW_tzSmZTmjA_Pzd0XljJyi8pTRi1pT7gR_4,2055
|
|
2
2
|
tensorcircuit/about.py,sha256=DazTswU2nAwOmASTaDII3L04PVtaQ7oiWPty5YMI3Wk,5267
|
|
3
3
|
tensorcircuit/abstractcircuit.py,sha256=0osacPqq7B1EJki-cI1aLYoVRmjFaG9q3XevWMs7SsA,44125
|
|
4
4
|
tensorcircuit/asciiart.py,sha256=neY1OWFwtoW5cHPNwkQHgRPktDniQvdlP9QKHkk52fM,8236
|
|
@@ -18,7 +18,7 @@ tensorcircuit/quantum.py,sha256=68SZ1lXJZSKVr5xe3_uvU-_25GOFnSPGlw4Ziaj1nBI,1046
|
|
|
18
18
|
tensorcircuit/shadows.py,sha256=6XmWNubbuaxFNvZVWu-RXd0lN9Jkk-xwong_K8o8_KE,17014
|
|
19
19
|
tensorcircuit/simplify.py,sha256=O11G3UYiVAc30GOfwXXmhLXwGZrQ8OVwLTMQMZp_XBc,9414
|
|
20
20
|
tensorcircuit/stabilizercircuit.py,sha256=KbrBVSo2pXnf5JHIrxwRPSPTm7bJVMIcyE4d7-dIfCM,15545
|
|
21
|
-
tensorcircuit/timeevol.py,sha256=
|
|
21
|
+
tensorcircuit/timeevol.py,sha256=dmFXDqBFvQI54QK9ViJ9XEVVha9QIE8Bl0sZ1mN85PI,31695
|
|
22
22
|
tensorcircuit/torchnn.py,sha256=z_QpM0QC3mydGyWpyp877j-tSFCPyzynCwqrTWaw-IA,4637
|
|
23
23
|
tensorcircuit/translation.py,sha256=VnU7DnYmbk1cWjqa7N68WNLNDn3DwENrMzmbG4_CQco,28611
|
|
24
24
|
tensorcircuit/utils.py,sha256=nEDR1wTh1WF_yV6UyZYlifqOPWdKk_Krr4HjhrWHnGQ,7228
|
|
@@ -40,15 +40,15 @@ tensorcircuit/applications/physics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQe
|
|
|
40
40
|
tensorcircuit/applications/physics/baseline.py,sha256=RWrzMGnC0PtmpYSFkvCE7r1llR88gncXuCakAAhFE-w,1775
|
|
41
41
|
tensorcircuit/applications/physics/fss.py,sha256=ny3U9ZDmT459PXjA1oUGfarBOlSKSy6fs04vD9s1XH4,3633
|
|
42
42
|
tensorcircuit/backends/__init__.py,sha256=WiUmbUFzM29w3hKfhuKxVUk3PpqDFiXf4za9g0ctpZA,80
|
|
43
|
-
tensorcircuit/backends/abstract_backend.py,sha256=
|
|
43
|
+
tensorcircuit/backends/abstract_backend.py,sha256=PFfiWLkZ6izj34rRTTNeSp0VesSwFPXM_SIDqQ_viLA,69210
|
|
44
44
|
tensorcircuit/backends/backend_factory.py,sha256=Z0aQ-RnxOnQzp-SRw8sefAH8XyBSlj2NXZwOlHinbfY,1713
|
|
45
45
|
tensorcircuit/backends/cupy_backend.py,sha256=KG5fqP29wnngkPsi-TnOk0pHsr9lyD7hx6_Y56fCQuY,15172
|
|
46
|
-
tensorcircuit/backends/jax_backend.py,sha256=
|
|
46
|
+
tensorcircuit/backends/jax_backend.py,sha256=PjvPbSItMwYpW83sKOmZb2QKz6LjClFIMbAzQWCK124,28013
|
|
47
47
|
tensorcircuit/backends/jax_ops.py,sha256=WyUGavch2R9uEFsI1Ap7eP1UcU4s2TItBgGsrVS3Hzs,9320
|
|
48
|
-
tensorcircuit/backends/numpy_backend.py,sha256=
|
|
49
|
-
tensorcircuit/backends/pytorch_backend.py,sha256=
|
|
48
|
+
tensorcircuit/backends/numpy_backend.py,sha256=yNBj45W9-VMo-61ihzAAFS5jaj-bHJz0OJB9gZjwYUA,15515
|
|
49
|
+
tensorcircuit/backends/pytorch_backend.py,sha256=6nTIG3NO6N2ChzfSOq3Wmti3n5hxkcMwvp7NQ7XEG2w,25582
|
|
50
50
|
tensorcircuit/backends/pytorch_ops.py,sha256=lLxpK6OqfpVwifyFlgsqhpnt-oIn4R5paPMVg51WaW0,3826
|
|
51
|
-
tensorcircuit/backends/tensorflow_backend.py,sha256=
|
|
51
|
+
tensorcircuit/backends/tensorflow_backend.py,sha256=VgigYIGkwluHv1I2dS01cU11VPrZnMaAp2R1sZKOFnk,39714
|
|
52
52
|
tensorcircuit/backends/tf_ops.py,sha256=FJwDU7LhZrt0VUIx12DJU0gZnWhMv7B7r9sAKG710As,3378
|
|
53
53
|
tensorcircuit/cloud/__init__.py,sha256=n0Lx07GYF6YbdIa6AJCLJk4zlAm5CqaeHszvkxxuoI4,139
|
|
54
54
|
tensorcircuit/cloud/abstraction.py,sha256=6aSxbz0MP21jBVdFbSMrvJPLQH117vGz9sSHbMFoodE,14582
|
|
@@ -86,8 +86,8 @@ tensorcircuit/templates/graphs.py,sha256=cPYrxjoem0xZ-Is9dZKAvEzWZL_FejfIRiCEOTA
|
|
|
86
86
|
tensorcircuit/templates/hamiltonians.py,sha256=dp1E5kZDxpE4g7df1EujQHP4sBU6kuGTkF4e49X2IPk,6116
|
|
87
87
|
tensorcircuit/templates/lattice.py,sha256=QH6N4sOegQPZGN-5UjTecy220-8AJLFvo1seh7VD3xA,72851
|
|
88
88
|
tensorcircuit/templates/measurements.py,sha256=pzc5Aa9S416Ilg4aOY77Z6ZhUlYcXnAkQNQFTuHjFFs,10943
|
|
89
|
-
tensorcircuit_nightly-1.3.0.
|
|
90
|
-
tensorcircuit_nightly-1.3.0.
|
|
91
|
-
tensorcircuit_nightly-1.3.0.
|
|
92
|
-
tensorcircuit_nightly-1.3.0.
|
|
93
|
-
tensorcircuit_nightly-1.3.0.
|
|
89
|
+
tensorcircuit_nightly-1.3.0.dev20250829.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
|
90
|
+
tensorcircuit_nightly-1.3.0.dev20250829.dist-info/METADATA,sha256=OwvpfaiSv7EJSzYYNcrAg8J46wTlioOwXTS6SQ_Ofyw,37856
|
|
91
|
+
tensorcircuit_nightly-1.3.0.dev20250829.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
92
|
+
tensorcircuit_nightly-1.3.0.dev20250829.dist-info/top_level.txt,sha256=9dcuK5488dWpVauYz8cdvx743z_La1h7zIQCsEEgu7o,14
|
|
93
|
+
tensorcircuit_nightly-1.3.0.dev20250829.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|