emu-sv 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emu_sv/__init__.py +42 -0
- emu_sv/custom_callback_implementations.py +93 -0
- emu_sv/dense_operator.py +199 -0
- emu_sv/hamiltonian.py +157 -0
- emu_sv/state_vector.py +271 -0
- emu_sv/sv_backend.py +123 -0
- emu_sv/sv_config.py +90 -0
- emu_sv/time_evolution.py +32 -0
- emu_sv-1.0.0.dist-info/METADATA +37 -0
- emu_sv-1.0.0.dist-info/RECORD +11 -0
- emu_sv-1.0.0.dist-info/WHEEL +4 -0
emu_sv/__init__.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from emu_sv.state_vector import StateVector, inner
|
|
2
|
+
from emu_sv.dense_operator import DenseOperator
|
|
3
|
+
from emu_sv.sv_backend import SVBackend, SVConfig
|
|
4
|
+
from emu_base.base_classes import Results
|
|
5
|
+
from emu_base.base_classes.callback import AggregationType
|
|
6
|
+
|
|
7
|
+
from emu_base.base_classes import (
|
|
8
|
+
Callback,
|
|
9
|
+
BitStrings,
|
|
10
|
+
CorrelationMatrix,
|
|
11
|
+
Energy,
|
|
12
|
+
EnergyVariance,
|
|
13
|
+
Expectation,
|
|
14
|
+
QubitDensity,
|
|
15
|
+
StateResult,
|
|
16
|
+
SecondMomentOfEnergy,
|
|
17
|
+
Fidelity,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
__all__ = [
|
|
21
|
+
"__version__",
|
|
22
|
+
"StateVector",
|
|
23
|
+
"DenseOperator",
|
|
24
|
+
"inner",
|
|
25
|
+
"SVBackend",
|
|
26
|
+
"SVConfig",
|
|
27
|
+
"Callback",
|
|
28
|
+
"BitStrings",
|
|
29
|
+
"CorrelationMatrix",
|
|
30
|
+
"Energy",
|
|
31
|
+
"EnergyVariance",
|
|
32
|
+
"Expectation",
|
|
33
|
+
"Fidelity",
|
|
34
|
+
"QubitDensity",
|
|
35
|
+
"StateResult",
|
|
36
|
+
"SecondMomentOfEnergy",
|
|
37
|
+
"AggregationType",
|
|
38
|
+
"Results",
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
__version__ = "1.0.0"
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import math
|
|
2
|
+
import torch
|
|
3
|
+
|
|
4
|
+
from emu_base.base_classes.config import BackendConfig
|
|
5
|
+
from emu_base.base_classes.default_callbacks import (
|
|
6
|
+
QubitDensity,
|
|
7
|
+
EnergyVariance,
|
|
8
|
+
SecondMomentOfEnergy,
|
|
9
|
+
CorrelationMatrix,
|
|
10
|
+
)
|
|
11
|
+
from emu_base.base_classes.operator import Operator
|
|
12
|
+
|
|
13
|
+
from emu_sv import StateVector
|
|
14
|
+
from emu_sv.hamiltonian import RydbergHamiltonian
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def qubit_density_sv_impl(
|
|
18
|
+
self: QubitDensity, config: BackendConfig, t: int, state: StateVector, H: Operator
|
|
19
|
+
) -> torch.Tensor:
|
|
20
|
+
"""
|
|
21
|
+
Custom implementation of the qubit density ❬ψ|nᵢ|ψ❭ for the state vector solver.
|
|
22
|
+
"""
|
|
23
|
+
nqubits = int(math.log2(len(state.vector)))
|
|
24
|
+
state_tensor = state.vector.reshape((2,) * nqubits)
|
|
25
|
+
|
|
26
|
+
qubit_density = torch.zeros(nqubits, dtype=torch.float64, device=state_tensor.device)
|
|
27
|
+
for i in range(nqubits):
|
|
28
|
+
qubit_density[i] = state_tensor.select(i, 1).norm() ** 2
|
|
29
|
+
return qubit_density
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def correlation_matrix_sv_impl(
|
|
33
|
+
self: CorrelationMatrix,
|
|
34
|
+
config: BackendConfig,
|
|
35
|
+
t: int,
|
|
36
|
+
state: StateVector,
|
|
37
|
+
H: Operator,
|
|
38
|
+
) -> torch.Tensor:
|
|
39
|
+
"""
|
|
40
|
+
Custom implementation of the density-density correlation ❬ψ|nᵢnⱼ|ψ❭ for the state vector solver.
|
|
41
|
+
|
|
42
|
+
TODO: extend to arbitrary two-point correlation ❬ψ|AᵢBⱼ|ψ❭
|
|
43
|
+
"""
|
|
44
|
+
nqubits = int(math.log2(len(state.vector)))
|
|
45
|
+
state_tensor = state.vector.reshape((2,) * nqubits)
|
|
46
|
+
|
|
47
|
+
correlation = torch.zeros(
|
|
48
|
+
nqubits, nqubits, dtype=torch.float64, device=state_tensor.device
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
for i in range(nqubits):
|
|
52
|
+
select_i = state_tensor.select(i, 1)
|
|
53
|
+
for j in range(i, nqubits): # select the upper triangle
|
|
54
|
+
if i == j:
|
|
55
|
+
value = select_i.norm() ** 2
|
|
56
|
+
else:
|
|
57
|
+
value = select_i.select(j - 1, 1).norm() ** 2
|
|
58
|
+
|
|
59
|
+
correlation[i, j] = value
|
|
60
|
+
correlation[j, i] = value
|
|
61
|
+
return correlation
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def energy_variance_sv_impl(
|
|
65
|
+
self: EnergyVariance,
|
|
66
|
+
config: BackendConfig,
|
|
67
|
+
t: int,
|
|
68
|
+
state: StateVector,
|
|
69
|
+
H: RydbergHamiltonian,
|
|
70
|
+
) -> torch.Tensor:
|
|
71
|
+
"""
|
|
72
|
+
Custom implementation of the energy variance ❬ψ|H²|ψ❭-❬ψ|H|ψ❭² for the state vector solver.
|
|
73
|
+
"""
|
|
74
|
+
hstate = H * state.vector
|
|
75
|
+
h_squared = torch.vdot(hstate, hstate).real
|
|
76
|
+
energy = torch.vdot(state.vector, hstate).real
|
|
77
|
+
energy_variance: torch.Tensor = h_squared - energy**2
|
|
78
|
+
return energy_variance
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def second_moment_sv_impl(
|
|
82
|
+
self: SecondMomentOfEnergy,
|
|
83
|
+
config: BackendConfig,
|
|
84
|
+
t: int,
|
|
85
|
+
state: StateVector,
|
|
86
|
+
H: RydbergHamiltonian,
|
|
87
|
+
) -> torch.Tensor:
|
|
88
|
+
"""
|
|
89
|
+
Custom implementation of the second moment of energy ❬ψ|H²|ψ❭
|
|
90
|
+
for the state vector solver.
|
|
91
|
+
"""
|
|
92
|
+
hstate = H * state.vector
|
|
93
|
+
return torch.vdot(hstate, hstate).real
|
emu_sv/dense_operator.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
import itertools
|
|
3
|
+
from typing import Any, Iterable
|
|
4
|
+
|
|
5
|
+
import torch
|
|
6
|
+
from emu_base.base_classes.operator import FullOp, QuditOp
|
|
7
|
+
from emu_base import Operator, State, DEVICE_COUNT
|
|
8
|
+
from emu_sv.state_vector import StateVector
|
|
9
|
+
|
|
10
|
+
dtype = torch.complex128
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _validate_operator_targets(operations: FullOp, nqubits: int) -> None:
|
|
14
|
+
"""Check for `operator_for_string` method"""
|
|
15
|
+
for tensorop in operations:
|
|
16
|
+
target_qids = (factor[1] for factor in tensorop[1])
|
|
17
|
+
target_qids_list = list(itertools.chain(*target_qids))
|
|
18
|
+
target_qids_set = set(target_qids_list)
|
|
19
|
+
if len(target_qids_set) < len(target_qids_list):
|
|
20
|
+
# Either the qubit id has been defined twice in an operation:
|
|
21
|
+
for qids in target_qids:
|
|
22
|
+
if len(set(qids)) < len(qids):
|
|
23
|
+
raise ValueError("Duplicate atom ids in argument list.")
|
|
24
|
+
# Or it was defined in two different operations
|
|
25
|
+
raise ValueError("Each qubit can be targeted by only one operation.")
|
|
26
|
+
if max(target_qids_set) >= nqubits:
|
|
27
|
+
raise ValueError(
|
|
28
|
+
"The operation targets more qubits than there are in the register."
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class DenseOperator(Operator):
|
|
33
|
+
"""Operators in EMU-SV are dense matrices"""
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
matrix: torch.Tensor,
|
|
38
|
+
*,
|
|
39
|
+
gpu: bool = True,
|
|
40
|
+
):
|
|
41
|
+
device = "cuda" if gpu and DEVICE_COUNT > 0 else "cpu"
|
|
42
|
+
self.matrix = matrix.to(dtype=dtype, device=device)
|
|
43
|
+
|
|
44
|
+
def __repr__(self) -> str:
|
|
45
|
+
return repr(self.matrix)
|
|
46
|
+
|
|
47
|
+
def __matmul__(self, other: Operator) -> DenseOperator:
|
|
48
|
+
"""
|
|
49
|
+
Apply this operator to a other. The ordering is that
|
|
50
|
+
self is applied after other.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
other: the operator to compose with self
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
the composed operator
|
|
57
|
+
"""
|
|
58
|
+
assert isinstance(
|
|
59
|
+
other, DenseOperator
|
|
60
|
+
), "DenseOperator can only be multiplied with Operator"
|
|
61
|
+
|
|
62
|
+
return DenseOperator(self.matrix @ other.matrix)
|
|
63
|
+
|
|
64
|
+
def __add__(self, other: Operator) -> DenseOperator:
|
|
65
|
+
"""
|
|
66
|
+
Returns the sum of two matrices
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
other: the other operator
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
the summed operator
|
|
73
|
+
"""
|
|
74
|
+
assert isinstance(other, DenseOperator), "MPO can only be added to another MPO"
|
|
75
|
+
|
|
76
|
+
return DenseOperator(self.matrix + other.matrix)
|
|
77
|
+
|
|
78
|
+
def __rmul__(self, scalar: complex) -> DenseOperator:
|
|
79
|
+
"""
|
|
80
|
+
Multiply a DenseOperator by scalar.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
scalar: the scale factor to multiply with
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
the scaled MPO
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
return DenseOperator(self.matrix * scalar)
|
|
90
|
+
|
|
91
|
+
def __mul__(self, other: State) -> StateVector:
|
|
92
|
+
"""
|
|
93
|
+
Applies this DenseOperator to the given StateVector.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
other: the state to apply this operator to
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
the resulting state
|
|
100
|
+
"""
|
|
101
|
+
assert isinstance(
|
|
102
|
+
other, StateVector
|
|
103
|
+
), "DenseOperator can only be applied to another DenseOperator"
|
|
104
|
+
|
|
105
|
+
return StateVector(self.matrix @ other.vector)
|
|
106
|
+
|
|
107
|
+
def expect(self, state: State) -> float | complex:
|
|
108
|
+
"""
|
|
109
|
+
Compute the expectation value of self on the given state.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
state: the state with which to compute
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
the expectation
|
|
116
|
+
"""
|
|
117
|
+
assert isinstance(
|
|
118
|
+
state, StateVector
|
|
119
|
+
), "currently, only expectation values of StateVectors are \
|
|
120
|
+
supported"
|
|
121
|
+
|
|
122
|
+
return torch.vdot(state.vector, self.matrix @ state.vector).item()
|
|
123
|
+
|
|
124
|
+
@staticmethod
|
|
125
|
+
def from_operator_string(
|
|
126
|
+
basis: Iterable[str],
|
|
127
|
+
nqubits: int,
|
|
128
|
+
operations: FullOp,
|
|
129
|
+
operators: dict[str, QuditOp] = {},
|
|
130
|
+
/,
|
|
131
|
+
**kwargs: Any,
|
|
132
|
+
) -> DenseOperator:
|
|
133
|
+
"""
|
|
134
|
+
See the base class
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
basis: the eigenstates in the basis to use e.g. ('r', 'g')
|
|
138
|
+
nqubits: how many qubits there are in the state
|
|
139
|
+
operations: which bitstrings make up the state with what weight
|
|
140
|
+
operators: additional symbols to be used in operations
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
the operator in MPO form.
|
|
144
|
+
"""
|
|
145
|
+
|
|
146
|
+
_validate_operator_targets(operations, nqubits)
|
|
147
|
+
|
|
148
|
+
operators_with_tensors: dict[str, torch.Tensor | QuditOp] = dict(operators)
|
|
149
|
+
|
|
150
|
+
basis = set(basis)
|
|
151
|
+
if basis == {"r", "g"}:
|
|
152
|
+
# operators_with_tensors will now contain the basis for single qubit ops,
|
|
153
|
+
# and potentially user defined strings in terms of these
|
|
154
|
+
operators_with_tensors |= {
|
|
155
|
+
"gg": torch.tensor([[1.0, 0.0], [0.0, 0.0]], dtype=torch.complex128),
|
|
156
|
+
"gr": torch.tensor([[0.0, 0.0], [1.0, 0.0]], dtype=torch.complex128),
|
|
157
|
+
"rg": torch.tensor([[0.0, 1.0], [0.0, 0.0]], dtype=torch.complex128),
|
|
158
|
+
"rr": torch.tensor([[0.0, 0.0], [0.0, 1.0]], dtype=torch.complex128),
|
|
159
|
+
}
|
|
160
|
+
elif basis == {"0", "1"}:
|
|
161
|
+
# operators_with_tensors will now contain the basis for single qubit ops,
|
|
162
|
+
# and potentially user defined strings in terms of these
|
|
163
|
+
operators_with_tensors |= {
|
|
164
|
+
"00": torch.tensor([[1.0, 0.0], [0.0, 0.0]], dtype=torch.complex128),
|
|
165
|
+
"01": torch.tensor([[0.0, 0.0], [1.0, 0.0]], dtype=torch.complex128),
|
|
166
|
+
"10": torch.tensor([[0.0, 1.0], [0.0, 0.0]], dtype=torch.complex128),
|
|
167
|
+
"11": torch.tensor([[0.0, 0.0], [0.0, 1.0]], dtype=torch.complex128),
|
|
168
|
+
}
|
|
169
|
+
else:
|
|
170
|
+
raise ValueError("Unsupported basis provided")
|
|
171
|
+
|
|
172
|
+
accum_res = torch.zeros(2**nqubits, 2**nqubits, dtype=torch.complex128)
|
|
173
|
+
for coeff, tensorop in operations:
|
|
174
|
+
# this function will recurse through the operators_with_tensors,
|
|
175
|
+
# and replace any definitions in terms of strings by the computed matrix
|
|
176
|
+
def replace_operator_string(op: QuditOp | torch.Tensor) -> torch.Tensor:
|
|
177
|
+
if isinstance(op, torch.Tensor):
|
|
178
|
+
return op
|
|
179
|
+
|
|
180
|
+
result = torch.zeros(2, 2, dtype=torch.complex128)
|
|
181
|
+
for opstr, coeff in op.items():
|
|
182
|
+
tensor = replace_operator_string(operators_with_tensors[opstr])
|
|
183
|
+
operators_with_tensors[opstr] = tensor
|
|
184
|
+
result += tensor * coeff
|
|
185
|
+
return result
|
|
186
|
+
|
|
187
|
+
total_op_per_qubit = [torch.eye(2, 2, dtype=torch.complex128)] * nqubits
|
|
188
|
+
|
|
189
|
+
for op in tensorop:
|
|
190
|
+
factor = replace_operator_string(op[0])
|
|
191
|
+
for target_qubit in op[1]:
|
|
192
|
+
total_op_per_qubit[target_qubit] = factor
|
|
193
|
+
|
|
194
|
+
dense_op = total_op_per_qubit[0]
|
|
195
|
+
for single_qubit_operator in total_op_per_qubit[1:]:
|
|
196
|
+
dense_op = torch.kron(dense_op, single_qubit_operator)
|
|
197
|
+
|
|
198
|
+
accum_res += coeff * dense_op
|
|
199
|
+
return DenseOperator(accum_res)
|
emu_sv/hamiltonian.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from emu_sv.state_vector import StateVector
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class RydbergHamiltonian:
|
|
6
|
+
"""
|
|
7
|
+
Representation of the Rydberg Hamiltonian with light-matter interaction:
|
|
8
|
+
|
|
9
|
+
H = ∑ⱼΩⱼ/2[cos(ϕⱼ)σˣⱼ + sin(ϕⱼ)σʸⱼ] - ∑ⱼΔⱼnⱼ + ∑ᵢ﹥ⱼUᵢⱼnᵢnⱼ
|
|
10
|
+
|
|
11
|
+
The Hamiltonian is parameterized by driving strengths or amplitudes Ωⱼ (`omegas`), detuning
|
|
12
|
+
values Δⱼ (`deltas`), phases ϕⱼ (`phis`) and interaction terms Uᵢⱼ (`interaction_matrix`).
|
|
13
|
+
Implements an efficient H*|ψ❭ as custom sparse matrix-vector multiplication.
|
|
14
|
+
|
|
15
|
+
Attributes:
|
|
16
|
+
omegas (torch.Tensor): driving strength Ωⱼ for each qubit, scaled by a factor 1/2.
|
|
17
|
+
deltas (torch.Tensor): detuning values Δⱼ for each qubit.
|
|
18
|
+
phis (torch.Tensor): phase values ϕⱼ for each qubit.
|
|
19
|
+
interaction_matrix (torch.Tensor): matrix Uᵢⱼ representing pairwise Rydberg
|
|
20
|
+
interaction strengths between qubits.
|
|
21
|
+
nqubits (int): number of qubits in the system.
|
|
22
|
+
diag (torch.Tensor): diagonal elements of the Hamiltonian,
|
|
23
|
+
calculated based on `deltas` and `interaction_matrix`.
|
|
24
|
+
inds (torch.Tensor): index tensor used for vector manipulations
|
|
25
|
+
in matrix-vector multiplications.
|
|
26
|
+
|
|
27
|
+
Methods:
|
|
28
|
+
__mul__(vec): implements matrix-vector multiplication with a state vector.
|
|
29
|
+
_create_diagonal(): constructs the diagonal elements of the Hamiltonian
|
|
30
|
+
based on `deltas` and `interaction_matrix`.
|
|
31
|
+
_apply_sigma_operators_complex(): apply all driving sigma operators,
|
|
32
|
+
with driving strenght `omegas` and phases `phis`.
|
|
33
|
+
_apply_sigma_operators_real(): only applies ∑ⱼ(Ωⱼ/2)σˣⱼ when all phases are zero (ϕⱼ=0).
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
omegas: torch.Tensor,
|
|
39
|
+
deltas: torch.Tensor,
|
|
40
|
+
phis: torch.Tensor,
|
|
41
|
+
interaction_matrix: torch.Tensor,
|
|
42
|
+
device: torch.device,
|
|
43
|
+
):
|
|
44
|
+
self.nqubits: int = len(omegas)
|
|
45
|
+
self.omegas: torch.Tensor = omegas / 2.0
|
|
46
|
+
self.deltas: torch.Tensor = deltas
|
|
47
|
+
self.phis: torch.Tensor = phis
|
|
48
|
+
self.interaction_matrix: torch.Tensor = interaction_matrix
|
|
49
|
+
self.device: torch.device = device
|
|
50
|
+
|
|
51
|
+
self.diag: torch.Tensor = self._create_diagonal()
|
|
52
|
+
self.inds = torch.tensor([1, 0], device=self.device) # flips the state, for σˣ
|
|
53
|
+
|
|
54
|
+
self._apply_sigma_operators = self._apply_sigma_operators_real
|
|
55
|
+
if self.phis.any():
|
|
56
|
+
self._apply_sigma_operators = self._apply_sigma_operators_complex
|
|
57
|
+
|
|
58
|
+
def __mul__(self, vec: torch.Tensor) -> torch.Tensor:
|
|
59
|
+
"""
|
|
60
|
+
Apply the `RydbergHamiltonian` to the input state vector, i.e. H*|ψ❭.
|
|
61
|
+
|
|
62
|
+
- The diagonal part of the Hamiltonian (Δⱼ and Uᵢⱼ terms) is stored and
|
|
63
|
+
applyed directly as H.diag*|ψ❭.
|
|
64
|
+
- The off-diagonal part (Ωⱼ and ϕⱼ terms) are applied sequentially across
|
|
65
|
+
qubit indices in `self._apply_sigma_operators`.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
vec (torch.Tensor): vec (torch.Tensor): the input state vector.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
the resulting state vector.
|
|
72
|
+
"""
|
|
73
|
+
# (-∑ⱼΔⱼnⱼ + ∑ᵢ﹥ⱼUᵢⱼnᵢnⱼ)|ψ❭
|
|
74
|
+
diag_result = self.diag * vec
|
|
75
|
+
# ∑ⱼΩⱼ/2[cos(ϕⱼ)σˣⱼ + sin(ϕⱼ)σʸⱼ]|ψ❭
|
|
76
|
+
sigma_result = self._apply_sigma_operators(vec)
|
|
77
|
+
result: torch.Tensor
|
|
78
|
+
result = diag_result + sigma_result
|
|
79
|
+
|
|
80
|
+
return result
|
|
81
|
+
|
|
82
|
+
def _apply_sigma_operators_real(self, vec: torch.Tensor) -> torch.Tensor:
|
|
83
|
+
"""
|
|
84
|
+
Apply the ∑ⱼ(Ωⱼ/2)σˣⱼ operator to the input vector |ψ❭.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
vec (torch.Tensor): the input state vector.
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
the resulting state vector.
|
|
91
|
+
"""
|
|
92
|
+
result = torch.zeros_like(vec)
|
|
93
|
+
|
|
94
|
+
dim_to_act = 1
|
|
95
|
+
for n, omega_n in enumerate(self.omegas):
|
|
96
|
+
shape_n = (2**n, 2, 2 ** (self.nqubits - n - 1))
|
|
97
|
+
vec = vec.reshape(shape_n)
|
|
98
|
+
result = result.reshape(shape_n)
|
|
99
|
+
result.index_add_(dim_to_act, self.inds, vec, alpha=omega_n)
|
|
100
|
+
|
|
101
|
+
return result.reshape(-1)
|
|
102
|
+
|
|
103
|
+
def _apply_sigma_operators_complex(self, vec: torch.Tensor) -> torch.Tensor:
|
|
104
|
+
"""
|
|
105
|
+
Apply the ∑ⱼΩⱼ/2[cos(ϕⱼ)σˣⱼ + sin(ϕⱼ)σʸⱼ] operator to the input vector |ψ❭.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
vec (torch.Tensor): the input state vector.
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
the resulting state vector.
|
|
112
|
+
"""
|
|
113
|
+
c_omegas = self.omegas * torch.exp(1j * self.phis)
|
|
114
|
+
result = torch.zeros_like(vec)
|
|
115
|
+
|
|
116
|
+
dim_to_act = 1
|
|
117
|
+
for n, c_omega_n in enumerate(c_omegas):
|
|
118
|
+
shape_n = (2**n, 2, 2 ** (self.nqubits - n - 1))
|
|
119
|
+
vec = vec.reshape(shape_n)
|
|
120
|
+
result = result.reshape(shape_n)
|
|
121
|
+
result.index_add_(
|
|
122
|
+
dim_to_act, self.inds[0], vec[:, 0, :].unsqueeze(1), alpha=c_omega_n
|
|
123
|
+
)
|
|
124
|
+
result.index_add_(
|
|
125
|
+
dim_to_act,
|
|
126
|
+
self.inds[1],
|
|
127
|
+
vec[:, 1, :].unsqueeze(1),
|
|
128
|
+
alpha=c_omega_n.conj(),
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
return result.reshape(-1)
|
|
132
|
+
|
|
133
|
+
def _create_diagonal(self) -> torch.Tensor:
|
|
134
|
+
"""
|
|
135
|
+
Return the diagonal elements of the Rydberg Hamiltonian matrix
|
|
136
|
+
|
|
137
|
+
H.diag = -∑ⱼΔⱼnⱼ + ∑ᵢ﹥ⱼUᵢⱼnᵢnⱼ
|
|
138
|
+
"""
|
|
139
|
+
diag = torch.zeros(2**self.nqubits, dtype=torch.complex128, device=self.device)
|
|
140
|
+
|
|
141
|
+
for i in range(self.nqubits):
|
|
142
|
+
diag = diag.reshape(2**i, 2, -1)
|
|
143
|
+
i_fixed = diag[:, 1, :]
|
|
144
|
+
i_fixed -= self.deltas[i]
|
|
145
|
+
for j in range(i + 1, self.nqubits):
|
|
146
|
+
i_fixed = i_fixed.reshape(2**i, 2 ** (j - i - 1), 2, -1)
|
|
147
|
+
# replacing i_j_fixed by i_fixed breaks the code :)
|
|
148
|
+
i_j_fixed = i_fixed[:, :, 1, :]
|
|
149
|
+
i_j_fixed += self.interaction_matrix[i, j]
|
|
150
|
+
return diag.reshape(-1)
|
|
151
|
+
|
|
152
|
+
def expect(self, state: StateVector) -> torch.Tensor:
|
|
153
|
+
"""Return the energy expectation value E=❬ψ|H|ψ❭"""
|
|
154
|
+
assert isinstance(
|
|
155
|
+
state, StateVector
|
|
156
|
+
), "Currently, only expectation values of StateVectors are supported"
|
|
157
|
+
return torch.vdot(state.vector, self * state.vector)
|
emu_sv/state_vector.py
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections import Counter
|
|
4
|
+
from typing import Any, Iterable
|
|
5
|
+
import math
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
from emu_base import State, DEVICE_COUNT
|
|
9
|
+
|
|
10
|
+
import torch
|
|
11
|
+
|
|
12
|
+
dtype = torch.complex128
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class StateVector(State):
|
|
16
|
+
"""
|
|
17
|
+
Represents a quantum state vector in a computational basis.
|
|
18
|
+
|
|
19
|
+
This class extends the `State` class to handle state vectors,
|
|
20
|
+
providing various utilities for initialization, normalization,
|
|
21
|
+
manipulation, and measurement. The state vector must have a length
|
|
22
|
+
that is a power of 2, representing 2ⁿ basis states for n qubits.
|
|
23
|
+
|
|
24
|
+
Attributes:
|
|
25
|
+
vector: 1D tensor representation of a state vector.
|
|
26
|
+
gpu: store the vector on GPU if True, otherwise on CPU
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
vector: torch.Tensor,
|
|
32
|
+
*,
|
|
33
|
+
gpu: bool = True,
|
|
34
|
+
):
|
|
35
|
+
# NOTE: this accepts also zero vectors.
|
|
36
|
+
|
|
37
|
+
assert math.log2(
|
|
38
|
+
len(vector)
|
|
39
|
+
).is_integer(), "The number of elements in the vector should be power of 2"
|
|
40
|
+
|
|
41
|
+
device = "cuda" if gpu and DEVICE_COUNT > 0 else "cpu"
|
|
42
|
+
self.vector = vector.to(dtype=dtype, device=device)
|
|
43
|
+
|
|
44
|
+
def _normalize(self) -> None:
|
|
45
|
+
# NOTE: use this in the callbacks
|
|
46
|
+
"""Checks if the input is normalized or not"""
|
|
47
|
+
norm_state = torch.linalg.vector_norm(self.vector)
|
|
48
|
+
|
|
49
|
+
if not torch.allclose(norm_state, torch.tensor(1.0, dtype=torch.float64)):
|
|
50
|
+
self.vector = self.vector / norm_state
|
|
51
|
+
|
|
52
|
+
@classmethod
|
|
53
|
+
def zero(cls, num_sites: int, gpu: bool = True) -> StateVector:
|
|
54
|
+
"""
|
|
55
|
+
Returns a zero uninitialized "state" vector. Warning, this has no physical meaning as-is!
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
num_sites: the number of qubits
|
|
59
|
+
gpu: whether gpu or cpu
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
The zero state
|
|
63
|
+
|
|
64
|
+
Examples:
|
|
65
|
+
>>> StateVector.zero(2)
|
|
66
|
+
tensor([0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], dtype=torch.complex128)
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
device = "cuda" if gpu and DEVICE_COUNT > 0 else "cpu"
|
|
70
|
+
vector = torch.zeros(2**num_sites, dtype=dtype, device=device)
|
|
71
|
+
return cls(vector, gpu=gpu)
|
|
72
|
+
|
|
73
|
+
@classmethod
|
|
74
|
+
def make(cls, num_sites: int, gpu: bool = True) -> StateVector:
|
|
75
|
+
"""
|
|
76
|
+
Returns a State vector in ground state |000..0>.
|
|
77
|
+
The vector in the output of StateVector has the shape (2,)*number of qubits
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
num_sites: the number of qubits
|
|
81
|
+
gpu: whether gpu or cpu
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
The described state
|
|
85
|
+
|
|
86
|
+
Examples:
|
|
87
|
+
>>> StateVector.make(2)
|
|
88
|
+
tensor([1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], dtype=torch.complex128)
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
result = cls.zero(num_sites=num_sites, gpu=gpu)
|
|
92
|
+
result.vector[0] = 1.0
|
|
93
|
+
return result
|
|
94
|
+
|
|
95
|
+
def inner(self, other: State) -> float | complex:
|
|
96
|
+
"""
|
|
97
|
+
Compute <self, other>. The type of other must be StateVector.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
other: the other state
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
the inner product
|
|
104
|
+
"""
|
|
105
|
+
assert isinstance(
|
|
106
|
+
other, StateVector
|
|
107
|
+
), "Other state also needs to be a StateVector"
|
|
108
|
+
assert (
|
|
109
|
+
self.vector.shape == other.vector.shape
|
|
110
|
+
), "States do not have the same number of sites"
|
|
111
|
+
|
|
112
|
+
return torch.vdot(self.vector, other.vector).item()
|
|
113
|
+
|
|
114
|
+
def sample(
|
|
115
|
+
self, num_shots: int = 1000, p_false_pos: float = 0.0, p_false_neg: float = 0.0
|
|
116
|
+
) -> Counter[str]:
|
|
117
|
+
"""
|
|
118
|
+
Samples bitstrings, taking into account the specified error rates.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
num_shots: how many bitstrings to sample
|
|
122
|
+
p_false_pos: the rate at which a 0 is read as a 1
|
|
123
|
+
p_false_neg: teh rate at which a 1 is read as a 0
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
the measured bitstrings, by count
|
|
127
|
+
"""
|
|
128
|
+
|
|
129
|
+
probabilities = torch.abs(self.vector) ** 2
|
|
130
|
+
probabilities /= probabilities.sum() # multinomial does not normalize the input
|
|
131
|
+
|
|
132
|
+
outcomes = torch.multinomial(probabilities, num_shots, replacement=True)
|
|
133
|
+
|
|
134
|
+
# Convert outcomes to bitstrings and count occurrences
|
|
135
|
+
counts = Counter([self._index_to_bitstring(outcome) for outcome in outcomes])
|
|
136
|
+
|
|
137
|
+
# NOTE: false positives and negatives
|
|
138
|
+
return counts
|
|
139
|
+
|
|
140
|
+
def _index_to_bitstring(self, index: int) -> str:
|
|
141
|
+
"""
|
|
142
|
+
Convert an integer index into its corresponding bitstring representation.
|
|
143
|
+
"""
|
|
144
|
+
nqubits = int(math.log2(self.vector.reshape(-1).shape[0]))
|
|
145
|
+
return format(index, f"0{nqubits}b")
|
|
146
|
+
|
|
147
|
+
def __add__(self, other: State) -> StateVector:
|
|
148
|
+
"""Sum of two state vectors
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
other: the vector to add to this vector
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
The summed state
|
|
155
|
+
"""
|
|
156
|
+
assert isinstance(
|
|
157
|
+
other, StateVector
|
|
158
|
+
), "Other state also needs to be a StateVector"
|
|
159
|
+
result = self.vector + other.vector
|
|
160
|
+
return StateVector(result)
|
|
161
|
+
|
|
162
|
+
def __rmul__(self, scalar: complex) -> StateVector:
|
|
163
|
+
"""Scalar multiplication
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
scalar: the scalar to multiply with
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
The scaled state
|
|
170
|
+
"""
|
|
171
|
+
result = scalar * self.vector
|
|
172
|
+
|
|
173
|
+
return StateVector(result)
|
|
174
|
+
|
|
175
|
+
def norm(self) -> float | complex:
|
|
176
|
+
"""Returns the norm of the state
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
the norm of the state
|
|
180
|
+
"""
|
|
181
|
+
norm: float | complex = torch.linalg.vector_norm(self.vector).item()
|
|
182
|
+
return norm
|
|
183
|
+
|
|
184
|
+
def __repr__(self) -> str:
|
|
185
|
+
return repr(self.vector)
|
|
186
|
+
|
|
187
|
+
@staticmethod
|
|
188
|
+
def from_state_string(
|
|
189
|
+
*,
|
|
190
|
+
basis: Iterable[str],
|
|
191
|
+
nqubits: int,
|
|
192
|
+
strings: dict[str, complex],
|
|
193
|
+
**kwargs: Any,
|
|
194
|
+
) -> StateVector:
|
|
195
|
+
"""Transforms a state given by a string into a state vector.
|
|
196
|
+
|
|
197
|
+
Construct a state from the pulser abstract representation
|
|
198
|
+
https://pulser.readthedocs.io/en/stable/conventions.html
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
basis: A tuple containing the basis states (e.g., ('r', 'g')).
|
|
202
|
+
nqubits: the number of qubits.
|
|
203
|
+
strings: A dictionary mapping state strings to complex or floats amplitudes.
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
The resulting state.
|
|
207
|
+
|
|
208
|
+
Examples:
|
|
209
|
+
>>> basis = ("r","g")
|
|
210
|
+
>>> n = 2
|
|
211
|
+
>>> st=StateVector.from_state_string(basis=basis,nqubits=n,strings={"rr":1.0,"gg":1.0})
|
|
212
|
+
>>> print(st)
|
|
213
|
+
tensor([0.7071+0.j, 0.0000+0.j, 0.0000+0.j, 0.7071+0.j], dtype=torch.complex128)
|
|
214
|
+
"""
|
|
215
|
+
|
|
216
|
+
basis = set(basis)
|
|
217
|
+
if basis == {"r", "g"}:
|
|
218
|
+
one = "r"
|
|
219
|
+
elif basis == {"0", "1"}:
|
|
220
|
+
one = "1"
|
|
221
|
+
else:
|
|
222
|
+
raise ValueError("Unsupported basis provided")
|
|
223
|
+
|
|
224
|
+
accum_state = StateVector.zero(num_sites=nqubits, **kwargs)
|
|
225
|
+
|
|
226
|
+
for state, amplitude in strings.items():
|
|
227
|
+
bin_to_int = int(
|
|
228
|
+
state.replace(one, "1").replace("g", "0"), 2
|
|
229
|
+
) # "0" basis is already in "0"
|
|
230
|
+
accum_state.vector[bin_to_int] = torch.tensor([amplitude])
|
|
231
|
+
|
|
232
|
+
accum_state._normalize()
|
|
233
|
+
|
|
234
|
+
return accum_state
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def inner(left: StateVector, right: StateVector) -> torch.Tensor:
|
|
238
|
+
"""
|
|
239
|
+
Wrapper around StateVector.inner.
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
left: StateVector argument
|
|
243
|
+
right: StateVector argument
|
|
244
|
+
|
|
245
|
+
Returns:
|
|
246
|
+
the inner product
|
|
247
|
+
|
|
248
|
+
Examples:
|
|
249
|
+
>>> factor = math.sqrt(2.0)
|
|
250
|
+
>>> basis = ("r","g")
|
|
251
|
+
>>> nqubits = 2
|
|
252
|
+
>>> string_state1 = {"gg":1.0,"rr":1.0}
|
|
253
|
+
>>> state1 = StateVector.from_state_string(basis=basis,
|
|
254
|
+
>>> nqubits=nqubits,strings=string_state1)
|
|
255
|
+
>>> string_state2 = {"gr":1.0/factor,"rr":1.0/factor}
|
|
256
|
+
>>> state2 = StateVector.from_state_string(basis=basis,
|
|
257
|
+
>>> nqubits=nqubits,strings=string_state2)
|
|
258
|
+
>>> inner(state1,state2).item()
|
|
259
|
+
(0.4999999999999999+0j)
|
|
260
|
+
"""
|
|
261
|
+
|
|
262
|
+
assert (left.vector.shape == right.vector.shape) and (
|
|
263
|
+
left.vector.dim() == 1
|
|
264
|
+
), "Shape of a and b should be the same and both needs to be 1D tesnor"
|
|
265
|
+
return torch.inner(left.vector, right.vector)
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
if __name__ == "__main__":
|
|
269
|
+
import doctest
|
|
270
|
+
|
|
271
|
+
doctest.testmod()
|
emu_sv/sv_backend.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
from emu_base.base_classes.backend import Backend, BackendConfig
|
|
2
|
+
from emu_base.base_classes.results import Results
|
|
3
|
+
from emu_sv.sv_config import SVConfig
|
|
4
|
+
from pulser import Sequence
|
|
5
|
+
from emu_base.pulser_adapter import PulserData
|
|
6
|
+
from emu_sv.time_evolution import do_time_step
|
|
7
|
+
from emu_sv import StateVector
|
|
8
|
+
import torch
|
|
9
|
+
from time import time
|
|
10
|
+
from resource import RUSAGE_SELF, getrusage
|
|
11
|
+
from emu_base import DEVICE_COUNT
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class SVBackend(Backend):
|
|
15
|
+
"""
|
|
16
|
+
A backend for emulating Pulser sequences using state vectors and sparse matrices.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def run(self, sequence: Sequence, sv_config: BackendConfig) -> Results:
|
|
20
|
+
"""
|
|
21
|
+
Emulates the given sequence.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
sequence: a Pulser sequence to simulate
|
|
25
|
+
sv_config: the backends config. Should be of type SVConfig
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
the simulation results
|
|
29
|
+
"""
|
|
30
|
+
assert isinstance(sv_config, SVConfig)
|
|
31
|
+
|
|
32
|
+
self.validate_sequence(sequence)
|
|
33
|
+
|
|
34
|
+
results = Results()
|
|
35
|
+
|
|
36
|
+
data = PulserData(sequence=sequence, config=sv_config, dt=sv_config.dt)
|
|
37
|
+
omega, delta, phi = data.omega, data.delta, data.phi
|
|
38
|
+
|
|
39
|
+
nsteps = omega.shape[0]
|
|
40
|
+
nqubits = omega.shape[1]
|
|
41
|
+
device = "cuda" if sv_config.gpu and DEVICE_COUNT > 0 else "cpu"
|
|
42
|
+
|
|
43
|
+
if sv_config.initial_state is not None:
|
|
44
|
+
state = sv_config.initial_state
|
|
45
|
+
state.vector = state.vector.to(device)
|
|
46
|
+
else:
|
|
47
|
+
state = StateVector.make(nqubits, gpu=sv_config.gpu)
|
|
48
|
+
|
|
49
|
+
dt = sv_config.dt * 1e-3 # ns to µs
|
|
50
|
+
|
|
51
|
+
for step in range(nsteps):
|
|
52
|
+
|
|
53
|
+
start = time()
|
|
54
|
+
|
|
55
|
+
state.vector, H = do_time_step(
|
|
56
|
+
dt,
|
|
57
|
+
omega[step],
|
|
58
|
+
delta[step],
|
|
59
|
+
phi[step],
|
|
60
|
+
data.full_interaction_matrix,
|
|
61
|
+
state.vector,
|
|
62
|
+
sv_config.krylov_tolerance,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
for callback in sv_config.callbacks:
|
|
66
|
+
callback(
|
|
67
|
+
sv_config,
|
|
68
|
+
(step + 1) * sv_config.dt,
|
|
69
|
+
state,
|
|
70
|
+
H, # type: ignore[arg-type]
|
|
71
|
+
results,
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
end = time()
|
|
75
|
+
self.log_step_statistics(
|
|
76
|
+
results,
|
|
77
|
+
step=step,
|
|
78
|
+
duration=end - start,
|
|
79
|
+
timestep_count=nsteps,
|
|
80
|
+
state=state,
|
|
81
|
+
sv_config=sv_config,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
return results
|
|
85
|
+
|
|
86
|
+
@staticmethod
|
|
87
|
+
def log_step_statistics(
|
|
88
|
+
results: Results,
|
|
89
|
+
*,
|
|
90
|
+
step: int,
|
|
91
|
+
duration: float,
|
|
92
|
+
timestep_count: int,
|
|
93
|
+
state: StateVector,
|
|
94
|
+
sv_config: SVConfig,
|
|
95
|
+
) -> None:
|
|
96
|
+
if state.vector.is_cuda:
|
|
97
|
+
max_mem_per_device = (
|
|
98
|
+
torch.cuda.max_memory_allocated(device) * 1e-6
|
|
99
|
+
for device in range(torch.cuda.device_count())
|
|
100
|
+
)
|
|
101
|
+
max_mem = max(max_mem_per_device)
|
|
102
|
+
else:
|
|
103
|
+
max_mem = getrusage(RUSAGE_SELF).ru_maxrss * 1e-3
|
|
104
|
+
|
|
105
|
+
sv_config.logger.info(
|
|
106
|
+
f"step = {step + 1}/{timestep_count}, "
|
|
107
|
+
+ f"RSS = {max_mem:.3f} MB, "
|
|
108
|
+
+ f"Δt = {duration:.3f} s"
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
if results.statistics is None:
|
|
112
|
+
assert step == 0
|
|
113
|
+
results.statistics = {"steps": []}
|
|
114
|
+
|
|
115
|
+
assert "steps" in results.statistics
|
|
116
|
+
assert len(results.statistics["steps"]) == step
|
|
117
|
+
|
|
118
|
+
results.statistics["steps"].append(
|
|
119
|
+
{
|
|
120
|
+
"RSS": max_mem,
|
|
121
|
+
"duration": duration,
|
|
122
|
+
}
|
|
123
|
+
)
|
emu_sv/sv_config.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
from emu_base.base_classes import (
|
|
2
|
+
CorrelationMatrix,
|
|
3
|
+
QubitDensity,
|
|
4
|
+
EnergyVariance,
|
|
5
|
+
SecondMomentOfEnergy,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
import copy
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
from emu_base import BackendConfig
|
|
12
|
+
from emu_sv import StateVector
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
from emu_sv.custom_callback_implementations import (
|
|
16
|
+
qubit_density_sv_impl,
|
|
17
|
+
energy_variance_sv_impl,
|
|
18
|
+
second_moment_sv_impl,
|
|
19
|
+
correlation_matrix_sv_impl,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
from types import MethodType
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class SVConfig(BackendConfig):
|
|
26
|
+
"""
|
|
27
|
+
The configuration of the emu-sv SVBackend. The kwargs passed to this class
|
|
28
|
+
are passed on to the base class.
|
|
29
|
+
See the API for that class for a list of available options.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
initial_state: the initial state to use in the simulation
|
|
33
|
+
dt: the timestep size that the solver uses. Note that observables are
|
|
34
|
+
only calculated if the evaluation_times are divisible by dt.
|
|
35
|
+
max_krylov_dim:
|
|
36
|
+
the size of the krylov subspace that the Lanczos algorithm maximally builds
|
|
37
|
+
krylov_tolerance:
|
|
38
|
+
the Lanczos algorithm uses this as the convergence tolerance
|
|
39
|
+
gpu: Use 1 gpu if True, and a GPU is available, otherwise, cpu.
|
|
40
|
+
Will cause errors if True when a gpu is not available
|
|
41
|
+
kwargs: arguments that are passed to the base class
|
|
42
|
+
|
|
43
|
+
Examples:
|
|
44
|
+
>>> gpu = True
|
|
45
|
+
>>> dt = 1 #this will impact the runtime
|
|
46
|
+
>>> krylov_tolerance = 1e-8 #the simulation will be faster, but less accurate
|
|
47
|
+
>>> SVConfig(gpu=gpu, dt=dt, krylov_tolerance=krylov_tolerance,
|
|
48
|
+
>>> with_modulation=True) #the last arg is taken from the base class
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def __init__(
|
|
52
|
+
self,
|
|
53
|
+
*,
|
|
54
|
+
initial_state: StateVector | None = None,
|
|
55
|
+
dt: int = 10,
|
|
56
|
+
max_krylov_dim: int = 100,
|
|
57
|
+
krylov_tolerance: float = 1e-10,
|
|
58
|
+
gpu: bool = True,
|
|
59
|
+
**kwargs: Any,
|
|
60
|
+
):
|
|
61
|
+
super().__init__(**kwargs)
|
|
62
|
+
|
|
63
|
+
self.initial_state = initial_state
|
|
64
|
+
self.dt = dt
|
|
65
|
+
self.max_krylov_dim = max_krylov_dim
|
|
66
|
+
self.gpu = gpu
|
|
67
|
+
self.krylov_tolerance = krylov_tolerance
|
|
68
|
+
|
|
69
|
+
for num, obs in enumerate(self.callbacks): # monkey patch
|
|
70
|
+
obs_copy = copy.deepcopy(obs)
|
|
71
|
+
if isinstance(obs, QubitDensity):
|
|
72
|
+
obs_copy.apply = MethodType( # type: ignore[method-assign]
|
|
73
|
+
qubit_density_sv_impl, obs
|
|
74
|
+
)
|
|
75
|
+
self.callbacks[num] = obs_copy
|
|
76
|
+
elif isinstance(obs, EnergyVariance):
|
|
77
|
+
obs_copy.apply = MethodType( # type: ignore[method-assign]
|
|
78
|
+
energy_variance_sv_impl, obs
|
|
79
|
+
)
|
|
80
|
+
self.callbacks[num] = obs_copy
|
|
81
|
+
elif isinstance(obs, SecondMomentOfEnergy):
|
|
82
|
+
obs_copy.apply = MethodType( # type: ignore[method-assign]
|
|
83
|
+
second_moment_sv_impl, obs
|
|
84
|
+
)
|
|
85
|
+
self.callbacks[num] = obs_copy
|
|
86
|
+
elif isinstance(obs, CorrelationMatrix):
|
|
87
|
+
obs_copy.apply = MethodType( # type: ignore[method-assign]
|
|
88
|
+
correlation_matrix_sv_impl, obs
|
|
89
|
+
)
|
|
90
|
+
self.callbacks[num] = obs_copy
|
emu_sv/time_evolution.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
from emu_base.math.krylov_exp import krylov_exp
|
|
4
|
+
from emu_sv.hamiltonian import RydbergHamiltonian
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def do_time_step(
|
|
8
|
+
dt: float,
|
|
9
|
+
omegas: torch.Tensor,
|
|
10
|
+
deltas: torch.Tensor,
|
|
11
|
+
phis: torch.Tensor,
|
|
12
|
+
full_interaction_matrix: torch.Tensor,
|
|
13
|
+
state_vector: torch.Tensor,
|
|
14
|
+
krylov_tolerance: float,
|
|
15
|
+
) -> tuple[torch.Tensor, RydbergHamiltonian]:
|
|
16
|
+
ham = RydbergHamiltonian(
|
|
17
|
+
omegas=omegas,
|
|
18
|
+
deltas=deltas,
|
|
19
|
+
phis=phis,
|
|
20
|
+
interaction_matrix=full_interaction_matrix,
|
|
21
|
+
device=state_vector.device,
|
|
22
|
+
)
|
|
23
|
+
op = lambda x: -1j * dt * (ham * x)
|
|
24
|
+
return (
|
|
25
|
+
krylov_exp(
|
|
26
|
+
op,
|
|
27
|
+
state_vector,
|
|
28
|
+
norm_tolerance=krylov_tolerance,
|
|
29
|
+
exp_tolerance=krylov_tolerance,
|
|
30
|
+
),
|
|
31
|
+
ham,
|
|
32
|
+
)
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: emu-sv
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: Pasqal State Vector based pulse emulator built on PyTorch
|
|
5
|
+
Project-URL: Documentation, https://pasqal-io.github.io/emulators/
|
|
6
|
+
Project-URL: Repository, https://github.com/pasqal-io/emulators
|
|
7
|
+
Project-URL: Issues, https://github.com/pasqal-io/emulators/issues
|
|
8
|
+
Author-email: Anton Quelle <anton.quelle@pasqal.com>, Mauro Mendizabal <mauro.mendizabal-pico@pasqal.com>, Stefano Grava <stefano.grava@pasqal.com>, Pablo Le Henaff <pablo.le-henaff@pasqal.com>
|
|
9
|
+
License: PASQAL OPEN-SOURCE SOFTWARE LICENSE AGREEMENT (MIT-derived)
|
|
10
|
+
|
|
11
|
+
The author of the License is:
|
|
12
|
+
Pasqal, a Société par Actions Simplifiée (Simplified Joint Stock Company) registered under number 849 441 522 at the Registre du commerce et des sociétés (Trade and Companies Register) of Evry – France, headquartered at 7 rue Leonard de Vinci – 91300 – Massy – France, duly represented by its Président, M. Georges-Olivier REYMOND,
|
|
13
|
+
|
|
14
|
+
Hereafter referred to as « the Licensor »
|
|
15
|
+
|
|
16
|
+
- Permission is hereby granted, free of charge, to any person obtaining a copy of this software (the “Licensee”) and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
17
|
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. The Software is “as is”, without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose and non-infringement. In no event shall the authors or copyright holders be liable for any claim, damages or other liability, whether in an action of contract, tort or otherwise arising from, out of or in connection with the Software or the use or other dealings in the Software.
|
|
18
|
+
|
|
19
|
+
- If use of the Software leads to the necessary use of any patent of the Licensor and/or any of its Affiliates (defined as a company owned or controlled by the Licensor), the Licensee is granted a royalty-free license, in any country where such patent is in force, to use the object of such patent; or use the process covered by such patent,
|
|
20
|
+
|
|
21
|
+
- Such a patent license is granted for internal research or academic use of the Licensee's, which includes use by employees and students of the Licensee, acting on behalf of the Licensee, for research purposes only.
|
|
22
|
+
|
|
23
|
+
- The License is governed by the laws of France. Any dispute relating to the License, notably its execution, performance and/or termination shall be brought to, heard and tried by the Tribunal Judiciaire de Paris, regardless of the rules of jurisdiction in the matter.
|
|
24
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
25
|
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
26
|
+
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
|
27
|
+
Requires-Python: >=3.10
|
|
28
|
+
Requires-Dist: emu-base==1.2.6
|
|
29
|
+
Description-Content-Type: text/markdown
|
|
30
|
+
|
|
31
|
+
<div align="center">
|
|
32
|
+
<img src="docs/logos/LogoTaglineSoftGreen.svg">
|
|
33
|
+
</div>
|
|
34
|
+
|
|
35
|
+
# Welcome to the Pasqal analog emulators
|
|
36
|
+
|
|
37
|
+
Welcome, and please see the [GitHub pages](https://pasqal-io.github.io/emulators/) for a landing page to this repo.
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
emu_sv/__init__.py,sha256=Ji7PePRQka8XrVMw3N3i2k_sBwMQIj6LBOZFGZ0HNvs,845
|
|
2
|
+
emu_sv/custom_callback_implementations.py,sha256=H7fy0trUtY00wid6VeRJ2inUazFbkMlgavTfm8QtGq8,2754
|
|
3
|
+
emu_sv/dense_operator.py,sha256=69rv1J5jsHSRoPsgZqKJZnttCgMLIk4tDCBsOaOBVR8,7034
|
|
4
|
+
emu_sv/hamiltonian.py,sha256=uzR7XHsv0QXBbcNuk93-phrO-On-WfJCINWz7Zofc90,6270
|
|
5
|
+
emu_sv/state_vector.py,sha256=XsbHDNu6LDBop_qVVUoaw8blVPOystP8MOQ9demME6g,8125
|
|
6
|
+
emu_sv/sv_backend.py,sha256=cghNpJq8ALdcoHCnx0xHcZBM9x8URLRFo0_sZ5CXXvY,3576
|
|
7
|
+
emu_sv/sv_config.py,sha256=NUAxqYG0NTWbarVreFr1Tb-8FzB0LVQ0fYnb2QSCzxo,3161
|
|
8
|
+
emu_sv/time_evolution.py,sha256=48C0DL_SOu7Jdjk2QKBNPsevOpQlgsPYUHE7cScY-ZM,796
|
|
9
|
+
emu_sv-1.0.0.dist-info/METADATA,sha256=yqdJOFSlS5RqKiBC-Tx_LSL2n3hFnkt1unkv3UMNJG8,3513
|
|
10
|
+
emu_sv-1.0.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
11
|
+
emu_sv-1.0.0.dist-info/RECORD,,
|