emu-sv 2.1.1__py3-none-any.whl → 2.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emu_sv/__init__.py +3 -1
- emu_sv/custom_callback_implementations.py +97 -23
- emu_sv/density_matrix_state.py +2 -1
- emu_sv/lindblad_operator.py +69 -40
- emu_sv/state_vector.py +3 -3
- emu_sv/sv_backend.py +7 -123
- emu_sv/sv_backend_impl.py +241 -0
- emu_sv/sv_config.py +37 -7
- emu_sv/time_evolution.py +119 -36
- {emu_sv-2.1.1.dist-info → emu_sv-2.2.1.dist-info}/METADATA +3 -3
- emu_sv-2.2.1.dist-info/RECORD +15 -0
- emu_sv-2.1.1.dist-info/RECORD +0 -14
- {emu_sv-2.1.1.dist-info → emu_sv-2.2.1.dist-info}/WHEEL +0 -0
emu_sv/__init__.py
CHANGED
|
@@ -15,6 +15,7 @@ from pulser.backend import (
|
|
|
15
15
|
from .dense_operator import DenseOperator
|
|
16
16
|
from .sv_backend import SVBackend, SVConfig
|
|
17
17
|
from .state_vector import StateVector, inner
|
|
18
|
+
from .density_matrix_state import DensityMatrix
|
|
18
19
|
|
|
19
20
|
|
|
20
21
|
__all__ = [
|
|
@@ -34,6 +35,7 @@ __all__ = [
|
|
|
34
35
|
"StateResult",
|
|
35
36
|
"StateVector",
|
|
36
37
|
"inner",
|
|
38
|
+
"DensityMatrix",
|
|
37
39
|
]
|
|
38
40
|
|
|
39
|
-
__version__ = "2.
|
|
41
|
+
__version__ = "2.2.1"
|
|
@@ -6,12 +6,14 @@ from pulser.backend import (
|
|
|
6
6
|
EnergySecondMoment,
|
|
7
7
|
EnergyVariance,
|
|
8
8
|
Occupation,
|
|
9
|
-
Energy,
|
|
10
9
|
)
|
|
11
|
-
|
|
10
|
+
from emu_sv.density_matrix_state import DensityMatrix
|
|
12
11
|
from emu_sv.state_vector import StateVector
|
|
13
12
|
from emu_sv.dense_operator import DenseOperator
|
|
14
13
|
from emu_sv.hamiltonian import RydbergHamiltonian
|
|
14
|
+
from emu_sv.lindblad_operator import RydbergLindbladian
|
|
15
|
+
|
|
16
|
+
dtype = torch.float64
|
|
15
17
|
|
|
16
18
|
|
|
17
19
|
def qubit_occupation_sv_impl(
|
|
@@ -25,7 +27,7 @@ def qubit_occupation_sv_impl(
|
|
|
25
27
|
Custom implementation of the occupation ❬ψ|nᵢ|ψ❭ for the state vector solver.
|
|
26
28
|
"""
|
|
27
29
|
nqubits = state.n_qudits
|
|
28
|
-
occupation = torch.zeros(nqubits, dtype=
|
|
30
|
+
occupation = torch.zeros(nqubits, dtype=dtype, device=state.vector.device)
|
|
29
31
|
for i in range(nqubits):
|
|
30
32
|
state_tensor = state.vector.view(2**i, 2, -1)
|
|
31
33
|
# nᵢ is a projector and therefore nᵢ == nᵢnᵢ
|
|
@@ -34,6 +36,33 @@ def qubit_occupation_sv_impl(
|
|
|
34
36
|
return occupation.cpu()
|
|
35
37
|
|
|
36
38
|
|
|
39
|
+
def qubit_occupation_sv_den_mat_impl(
|
|
40
|
+
self: Occupation,
|
|
41
|
+
*,
|
|
42
|
+
config: EmulationConfig,
|
|
43
|
+
state: DensityMatrix,
|
|
44
|
+
hamiltonian: DenseOperator,
|
|
45
|
+
) -> torch.Tensor:
|
|
46
|
+
"""
|
|
47
|
+
Custom implementation of the occupation nᵢ observable for density matrix.
|
|
48
|
+
The observable nᵢ is given by: I ⊗ ... ⊗ nᵢ ⊗ ... ⊗I
|
|
49
|
+
where nᵢ is the occupation operator for qubit i.
|
|
50
|
+
The expectation value is given by: <nᵢ> = Tr(ρ nᵢ).
|
|
51
|
+
|
|
52
|
+
The output will be a tensor of size (nqubits,), where each element will be the
|
|
53
|
+
expectation value of the occupation operator for each qubit.
|
|
54
|
+
In case of 3 atoms, the output will be a tensor of size (3,), where each element
|
|
55
|
+
will be <nᵢ> = Tr(ρnᵢ), or [ <n₁>, <n₂>, <n₃> ].
|
|
56
|
+
"""
|
|
57
|
+
nqubits = state.n_qudits
|
|
58
|
+
occupation = torch.zeros(nqubits, dtype=dtype, device=state.matrix.device)
|
|
59
|
+
diag_state_tensor = state.matrix.diagonal()
|
|
60
|
+
for i in range(nqubits):
|
|
61
|
+
state_tensor = diag_state_tensor.view(2**i, 2, 2 ** (nqubits - i - 1))[:, 1, :]
|
|
62
|
+
occupation[i] = state_tensor.sum().real
|
|
63
|
+
return occupation.cpu()
|
|
64
|
+
|
|
65
|
+
|
|
37
66
|
def correlation_matrix_sv_impl(
|
|
38
67
|
self: CorrelationMatrix,
|
|
39
68
|
*,
|
|
@@ -47,27 +76,47 @@ def correlation_matrix_sv_impl(
|
|
|
47
76
|
TODO: extend to arbitrary two-point correlation ❬ψ|AᵢBⱼ|ψ❭
|
|
48
77
|
"""
|
|
49
78
|
nqubits = state.n_qudits
|
|
50
|
-
correlation = torch.zeros(
|
|
51
|
-
nqubits, nqubits, dtype=torch.float64, device=state.vector.device
|
|
52
|
-
)
|
|
79
|
+
correlation = torch.zeros(nqubits, nqubits, dtype=dtype, device=state.vector.device)
|
|
53
80
|
|
|
54
81
|
for i in range(nqubits):
|
|
55
82
|
select_i = state.vector.view(2**i, 2, -1)
|
|
56
83
|
select_i = select_i[:, 1]
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
select_ij = select_i[:, :, 1, :]
|
|
64
|
-
value = torch.linalg.vector_norm(select_ij) ** 2
|
|
65
|
-
correlation[i, j] = value
|
|
66
|
-
correlation[j, i] = value
|
|
84
|
+
correlation[i, i] = torch.linalg.vector_norm(select_i) ** 2
|
|
85
|
+
for j in range(i + 1, nqubits): # select the upper triangle
|
|
86
|
+
select_i = select_i.view(2**i, 2 ** (j - i - 1), 2, -1)
|
|
87
|
+
select_ij = select_i[:, :, 1, :]
|
|
88
|
+
correlation[i, j] = torch.linalg.vector_norm(select_ij) ** 2
|
|
89
|
+
correlation[j, i] = correlation[i, j]
|
|
67
90
|
|
|
68
91
|
return correlation.cpu()
|
|
69
92
|
|
|
70
93
|
|
|
94
|
+
def correlation_matrix_sv_den_mat_impl(
|
|
95
|
+
self: CorrelationMatrix,
|
|
96
|
+
*,
|
|
97
|
+
config: EmulationConfig,
|
|
98
|
+
state: DensityMatrix,
|
|
99
|
+
hamiltonian: DenseOperator,
|
|
100
|
+
) -> torch.Tensor:
|
|
101
|
+
"""
|
|
102
|
+
Custom implementation of the density-density correlation <nᵢnⱼ> = Tr(ρ nᵢnⱼ)
|
|
103
|
+
in the case of Lindblad noise
|
|
104
|
+
"""
|
|
105
|
+
nqubits = state.n_qudits
|
|
106
|
+
correlation = torch.zeros(nqubits, nqubits, dtype=dtype)
|
|
107
|
+
state_diag_matrix = state.matrix.diagonal()
|
|
108
|
+
for i in range(nqubits): # applying ni
|
|
109
|
+
shapei = (2**i, 2, 2 ** (nqubits - i - 1))
|
|
110
|
+
state_diag_ni = state_diag_matrix.view(*shapei)[:, 1, :]
|
|
111
|
+
correlation[i, i] = state_diag_ni.sum().real # diagonal
|
|
112
|
+
for j in range(i + 1, nqubits):
|
|
113
|
+
shapeij = (2**i, 2 ** (j - i - 1), 2, 2 ** (nqubits - 1 - j))
|
|
114
|
+
state_diag_ni_nj = state_diag_ni.view(*shapeij)[:, :, 1, :]
|
|
115
|
+
correlation[i, j] = state_diag_ni_nj.sum().real
|
|
116
|
+
correlation[j, i] = correlation[i, j]
|
|
117
|
+
return correlation.cpu()
|
|
118
|
+
|
|
119
|
+
|
|
71
120
|
def energy_variance_sv_impl(
|
|
72
121
|
self: EnergyVariance,
|
|
73
122
|
*,
|
|
@@ -85,6 +134,26 @@ def energy_variance_sv_impl(
|
|
|
85
134
|
return en_var.cpu()
|
|
86
135
|
|
|
87
136
|
|
|
137
|
+
def energy_variance_sv_den_mat_impl(
|
|
138
|
+
self: EnergyVariance,
|
|
139
|
+
*,
|
|
140
|
+
config: EmulationConfig,
|
|
141
|
+
state: DensityMatrix,
|
|
142
|
+
hamiltonian: RydbergLindbladian,
|
|
143
|
+
) -> torch.Tensor:
|
|
144
|
+
"""
|
|
145
|
+
Custom implementation of the energy variance tr(ρH²)-tr(ρH)² for the
|
|
146
|
+
lindblad equation solver.
|
|
147
|
+
"""
|
|
148
|
+
h_dense_matrix = hamiltonian.h_eff(state.matrix) # Hρ
|
|
149
|
+
gpu = state.matrix.is_cuda
|
|
150
|
+
h_squared_dense_mat = hamiltonian.expect(
|
|
151
|
+
DensityMatrix(h_dense_matrix, gpu=gpu)
|
|
152
|
+
) # tr(ρH²)
|
|
153
|
+
en_var: torch.Tensor = h_squared_dense_mat - hamiltonian.expect(state) ** 2 # tr(ρH)²
|
|
154
|
+
return en_var.cpu()
|
|
155
|
+
|
|
156
|
+
|
|
88
157
|
def energy_second_moment_sv_impl(
|
|
89
158
|
self: EnergySecondMoment,
|
|
90
159
|
*,
|
|
@@ -101,15 +170,20 @@ def energy_second_moment_sv_impl(
|
|
|
101
170
|
return en_2_mom.cpu()
|
|
102
171
|
|
|
103
172
|
|
|
104
|
-
def
|
|
105
|
-
self:
|
|
173
|
+
def energy_second_moment_den_mat_impl(
|
|
174
|
+
self: EnergyVariance,
|
|
106
175
|
*,
|
|
107
176
|
config: EmulationConfig,
|
|
108
|
-
state:
|
|
109
|
-
hamiltonian:
|
|
177
|
+
state: DensityMatrix,
|
|
178
|
+
hamiltonian: RydbergLindbladian,
|
|
110
179
|
) -> torch.Tensor:
|
|
111
180
|
"""
|
|
112
|
-
Custom implementation of the energy
|
|
181
|
+
Custom implementation of the second moment of energy tr(ρH²) for the
|
|
182
|
+
lindblad equation solver.
|
|
113
183
|
"""
|
|
114
|
-
|
|
115
|
-
|
|
184
|
+
h_dense_matrix = hamiltonian.h_eff(state.matrix) # Hρ
|
|
185
|
+
gpu = state.matrix.is_cuda
|
|
186
|
+
|
|
187
|
+
return hamiltonian.expect(
|
|
188
|
+
DensityMatrix(h_dense_matrix, gpu=gpu)
|
|
189
|
+
).cpu() # tr(ρH²) = tr(ρ H H)
|
emu_sv/density_matrix_state.py
CHANGED
|
@@ -111,6 +111,7 @@ class DensityMatrix(State[complex, torch.Tensor]):
|
|
|
111
111
|
cls: Type[DensityMatrixType],
|
|
112
112
|
*,
|
|
113
113
|
eigenstates: Sequence[Eigenstate],
|
|
114
|
+
n_qudits: int,
|
|
114
115
|
amplitudes: Mapping[str, complex],
|
|
115
116
|
) -> tuple[DensityMatrix, Mapping[str, complex]]:
|
|
116
117
|
"""Transforms a state given by a string into a density matrix.
|
|
@@ -140,7 +141,7 @@ class DensityMatrix(State[complex, torch.Tensor]):
|
|
|
140
141
|
"""
|
|
141
142
|
|
|
142
143
|
state_vector, amplitudes = StateVector._from_state_amplitudes(
|
|
143
|
-
eigenstates=eigenstates, amplitudes=amplitudes
|
|
144
|
+
eigenstates=eigenstates, n_qudits=n_qudits, amplitudes=amplitudes
|
|
144
145
|
)
|
|
145
146
|
|
|
146
147
|
return DensityMatrix.from_state_vector(state_vector), amplitudes
|
emu_sv/lindblad_operator.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import torch
|
|
2
|
-
from emu_base
|
|
3
|
-
|
|
2
|
+
from emu_base import compute_noise_from_lindbladians, matmul_2x2_with_batched
|
|
3
|
+
from emu_sv.density_matrix_state import DensityMatrix
|
|
4
4
|
|
|
5
5
|
dtype = torch.complex128
|
|
6
6
|
sigmax = torch.tensor([[0.0, 1.0], [1.0, 0.0]], dtype=dtype)
|
|
@@ -17,6 +17,10 @@ class RydbergLindbladian:
|
|
|
17
17
|
where A_k is a jump operator and H is the Rydberg Hamiltonian.
|
|
18
18
|
The complex -𝑖, will be multiplied in the evolution.
|
|
19
19
|
|
|
20
|
+
Only works with effective noise channels, i.e., the jump or collapse
|
|
21
|
+
operators. For more information, see:
|
|
22
|
+
https://pulser.readthedocs.io/en/stable/tutorials/effective_noise.html
|
|
23
|
+
|
|
20
24
|
Attributes:
|
|
21
25
|
nqubits (int): number of qubits in the system.
|
|
22
26
|
omegas (torch.Tensor): amplited frequencies Ωⱼ for each qubit, divided by 2.
|
|
@@ -96,7 +100,10 @@ class RydbergLindbladian:
|
|
|
96
100
|
|
|
97
101
|
orignal_shape = density_matrix.shape
|
|
98
102
|
density_matrix = density_matrix.view(2**target_qubit, 2, -1)
|
|
99
|
-
|
|
103
|
+
if density_matrix.is_cpu:
|
|
104
|
+
density_matrix = local_op @ density_matrix
|
|
105
|
+
else:
|
|
106
|
+
density_matrix = matmul_2x2_with_batched(local_op, density_matrix)
|
|
100
107
|
|
|
101
108
|
return density_matrix.view(orignal_shape)
|
|
102
109
|
|
|
@@ -114,12 +121,60 @@ class RydbergLindbladian:
|
|
|
114
121
|
"""
|
|
115
122
|
|
|
116
123
|
orignal_shape = density_matrix.shape
|
|
117
|
-
|
|
118
124
|
density_matrix = density_matrix.view(2 ** (target_qubit + self.nqubits), 2, -1)
|
|
119
|
-
density_matrix
|
|
125
|
+
if density_matrix.is_cpu:
|
|
126
|
+
density_matrix = local_op.conj() @ density_matrix
|
|
127
|
+
else:
|
|
128
|
+
density_matrix = matmul_2x2_with_batched(local_op.conj(), density_matrix)
|
|
120
129
|
|
|
121
130
|
return density_matrix.view(orignal_shape)
|
|
122
131
|
|
|
132
|
+
def h_eff(
|
|
133
|
+
self,
|
|
134
|
+
density_matrix: torch.Tensor,
|
|
135
|
+
lindblad_ops: torch.Tensor = torch.zeros(2, 2, dtype=dtype),
|
|
136
|
+
) -> torch.Tensor:
|
|
137
|
+
"""Compute the effective Hamiltonian, Heff = Hρ -0.5i ∑ₖ Lₖ† Lₖ ρ, applied
|
|
138
|
+
to a density matrix ρ.
|
|
139
|
+
- libdlad_ops by default are 2x2 zero matrix"""
|
|
140
|
+
H_den_matrix = torch.zeros_like(density_matrix, dtype=dtype, device=self.device)
|
|
141
|
+
|
|
142
|
+
for qubit in range(len(self.omegas)):
|
|
143
|
+
H_q = self._local_terms_hamiltonian(qubit, lindblad_ops.to(self.device))
|
|
144
|
+
H_den_matrix += self.apply_local_op_to_density_matrix(
|
|
145
|
+
density_matrix, H_q, qubit
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
H_den_matrix += self._apply_interaction_terms(density_matrix)
|
|
149
|
+
return H_den_matrix
|
|
150
|
+
|
|
151
|
+
def _local_terms_hamiltonian(
|
|
152
|
+
self, qubit: int, lindblad_ops: torch.Tensor
|
|
153
|
+
) -> torch.Tensor:
|
|
154
|
+
"""Construct the Hamiltonian for single qubit terms:
|
|
155
|
+
∑ᵢ 𝛺ᵢ /2 𝜎ₓ^ i − 𝛿ⁱ nᵢ + jump operators terms , including the sum
|
|
156
|
+
of Lindblad terms, when 𝜙ᵢ equals to 0.0.
|
|
157
|
+
When 𝜙ᵢ not equals to 0.0:
|
|
158
|
+
∑ᵢ 𝛺ᵢ /2 (cos (𝜙ᵢ) 𝜎ₓ^ i + sin(𝜙ᵢ)𝜎_yⁱ) − 𝛿ⁱ nᵢ + jump operators terms
|
|
159
|
+
"""
|
|
160
|
+
omega = self.omegas[qubit]
|
|
161
|
+
delta = self.deltas[qubit]
|
|
162
|
+
|
|
163
|
+
sigma_x = sigmax.to(device=self.device)
|
|
164
|
+
n = n_op.to(device=self.device)
|
|
165
|
+
|
|
166
|
+
if not self.complex:
|
|
167
|
+
return omega * sigma_x - delta * n + lindblad_ops.to(self.device)
|
|
168
|
+
|
|
169
|
+
phi = self.phis[qubit]
|
|
170
|
+
sigma_y = sigmay.to(device=self.device)
|
|
171
|
+
complex_part = torch.cos(phi) * sigma_x + torch.sin(phi) * sigma_y
|
|
172
|
+
return omega * complex_part - delta * n + lindblad_ops.to(self.device)
|
|
173
|
+
|
|
174
|
+
def _apply_interaction_terms(self, density_matrix: torch.Tensor) -> torch.Tensor:
|
|
175
|
+
"""Apply the interaction terms ∑ᵢⱼ Uᵢⱼ nᵢ nⱼ to the density matrix."""
|
|
176
|
+
return self.diag.view(-1, 1) * density_matrix
|
|
177
|
+
|
|
123
178
|
def __matmul__(self, density_matrix: torch.Tensor) -> torch.Tensor:
|
|
124
179
|
"""Apply the i*RydbergLindbladian operator to the density matrix ρ
|
|
125
180
|
in the following way:
|
|
@@ -133,41 +188,8 @@ class RydbergLindbladian:
|
|
|
133
188
|
sum_lindblad_local = compute_noise_from_lindbladians(self.pulser_linblads).to(
|
|
134
189
|
self.device
|
|
135
190
|
)
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
H_den_matrix = torch.zeros_like(density_matrix, dtype=dtype, device=self.device)
|
|
139
|
-
|
|
140
|
-
if not self.complex:
|
|
141
|
-
for qubit, (omega, delta) in enumerate(zip(self.omegas, self.deltas)):
|
|
142
|
-
H_q = (
|
|
143
|
-
omega * sigmax.to(device=self.device)
|
|
144
|
-
- delta * n_op.to(device=self.device)
|
|
145
|
-
+ sum_lindblad_local
|
|
146
|
-
)
|
|
147
|
-
H_den_matrix += self.apply_local_op_to_density_matrix(
|
|
148
|
-
density_matrix, H_q, qubit
|
|
149
|
-
)
|
|
150
|
-
else:
|
|
151
|
-
for qubit, (omega, delta, phi) in enumerate(
|
|
152
|
-
zip(self.omegas, self.deltas, self.phis)
|
|
153
|
-
):
|
|
154
|
-
H_q = (
|
|
155
|
-
omega
|
|
156
|
-
* (
|
|
157
|
-
(
|
|
158
|
-
torch.cos(phi) * sigmax.to(device=self.device)
|
|
159
|
-
+ torch.sin(phi) * sigmay.to(device=self.device)
|
|
160
|
-
)
|
|
161
|
-
)
|
|
162
|
-
- delta * n_op.to(device=self.device)
|
|
163
|
-
+ sum_lindblad_local
|
|
164
|
-
)
|
|
165
|
-
H_den_matrix += self.apply_local_op_to_density_matrix(
|
|
166
|
-
density_matrix, H_q, qubit
|
|
167
|
-
)
|
|
168
|
-
|
|
169
|
-
# apply the interaction terms ∑ᵢⱼ Uᵢⱼ nᵢ nⱼ
|
|
170
|
-
H_den_matrix += self.diag.view(-1, 1) * density_matrix
|
|
191
|
+
# Heff = Hρ -0.5i ∑ₖ Lₖ† Lₖ ρ
|
|
192
|
+
H_den_matrix = self.h_eff(density_matrix, sum_lindblad_local)
|
|
171
193
|
|
|
172
194
|
# Heff - Heff^†= [H, ρ] - 0.5i ∑ₖ Lₖ† Lₖρ - ρ 0.5i ∑ₖ Lₖ† Lₖρ
|
|
173
195
|
H_den_matrix = H_den_matrix - H_den_matrix.conj().T
|
|
@@ -186,3 +208,10 @@ class RydbergLindbladian:
|
|
|
186
208
|
)
|
|
187
209
|
|
|
188
210
|
return H_den_matrix + 1.0j * L_den_matrix_Ldag
|
|
211
|
+
|
|
212
|
+
def expect(self, state: DensityMatrix) -> torch.Tensor:
|
|
213
|
+
"""Return the energy expectation value E=tr(H𝜌)"""
|
|
214
|
+
en = (self.h_eff(state.matrix)).trace()
|
|
215
|
+
|
|
216
|
+
assert torch.allclose(en.imag, torch.zeros_like(en.imag), atol=1e-8)
|
|
217
|
+
return en.real
|
emu_sv/state_vector.py
CHANGED
|
@@ -51,7 +51,7 @@ class StateVector(State[complex, torch.Tensor]):
|
|
|
51
51
|
@property
|
|
52
52
|
def n_qudits(self) -> int:
|
|
53
53
|
"""The number of qudits in the state."""
|
|
54
|
-
nqudits = math.log2(self.vector.
|
|
54
|
+
nqudits = math.log2(self.vector.view(-1).shape[0])
|
|
55
55
|
return int(nqudits)
|
|
56
56
|
|
|
57
57
|
def _normalize(self) -> None:
|
|
@@ -220,6 +220,7 @@ class StateVector(State[complex, torch.Tensor]):
|
|
|
220
220
|
cls: Type[StateVectorType],
|
|
221
221
|
*,
|
|
222
222
|
eigenstates: Sequence[Eigenstate],
|
|
223
|
+
n_qudits: int,
|
|
223
224
|
amplitudes: Mapping[str, complex],
|
|
224
225
|
) -> tuple[StateVector, Mapping[str, complex]]:
|
|
225
226
|
"""Transforms a state given by a string into a state vector.
|
|
@@ -257,8 +258,7 @@ class StateVector(State[complex, torch.Tensor]):
|
|
|
257
258
|
else:
|
|
258
259
|
raise ValueError("Unsupported basis provided")
|
|
259
260
|
|
|
260
|
-
|
|
261
|
-
accum_state = StateVector.zero(num_sites=nqubits, eigenstates=eigenstates)
|
|
261
|
+
accum_state = StateVector.zero(num_sites=n_qudits, eigenstates=eigenstates)
|
|
262
262
|
|
|
263
263
|
for state, amplitude in amplitudes.items():
|
|
264
264
|
bin_to_int = int(
|
emu_sv/sv_backend.py
CHANGED
|
@@ -1,23 +1,14 @@
|
|
|
1
|
-
import
|
|
2
|
-
from
|
|
3
|
-
import time
|
|
4
|
-
import typing
|
|
5
|
-
|
|
6
|
-
from pulser.backend import EmulatorBackend, Results, Observable, State, EmulationConfig
|
|
7
|
-
|
|
8
|
-
from emu_base import PulserData
|
|
9
|
-
|
|
10
|
-
from emu_sv.state_vector import StateVector
|
|
1
|
+
from pulser.backend import EmulatorBackend
|
|
2
|
+
from pulser.backend import Results
|
|
11
3
|
from emu_sv.sv_config import SVConfig
|
|
12
|
-
from emu_sv.
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
_TIME_CONVERSION_COEFF = 0.001 # Omega and delta are given in rad/μs, dt in ns
|
|
4
|
+
from emu_sv.sv_backend_impl import create_impl
|
|
16
5
|
|
|
17
6
|
|
|
18
7
|
class SVBackend(EmulatorBackend):
|
|
19
8
|
"""
|
|
20
9
|
A backend for emulating Pulser sequences using state vectors and sparse matrices.
|
|
10
|
+
Noisy simulation is supported by solving the Lindblad equation and using effective
|
|
11
|
+
noise channel or jump operators
|
|
21
12
|
"""
|
|
22
13
|
|
|
23
14
|
default_config = SVConfig()
|
|
@@ -31,112 +22,5 @@ class SVBackend(EmulatorBackend):
|
|
|
31
22
|
"""
|
|
32
23
|
assert isinstance(self._config, SVConfig)
|
|
33
24
|
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
)
|
|
37
|
-
self.target_times = pulser_data.target_times
|
|
38
|
-
self.time = time.time()
|
|
39
|
-
omega, delta, phi = pulser_data.omega, pulser_data.delta, pulser_data.phi
|
|
40
|
-
|
|
41
|
-
nsteps = omega.shape[0]
|
|
42
|
-
nqubits = omega.shape[1]
|
|
43
|
-
|
|
44
|
-
self.results = Results(atom_order=(), total_duration=self.target_times[-1])
|
|
45
|
-
self.statistics = Statistics(
|
|
46
|
-
evaluation_times=[t / self.target_times[-1] for t in self.target_times],
|
|
47
|
-
data=[],
|
|
48
|
-
timestep_count=nsteps,
|
|
49
|
-
)
|
|
50
|
-
|
|
51
|
-
if self._config.initial_state is not None:
|
|
52
|
-
state = self._config.initial_state
|
|
53
|
-
state = StateVector(state.vector.clone(), gpu=state.vector.is_cuda)
|
|
54
|
-
else:
|
|
55
|
-
state = StateVector.make(nqubits, gpu=self._config.gpu)
|
|
56
|
-
|
|
57
|
-
stepper = EvolveStateVector.apply
|
|
58
|
-
for step in range(nsteps):
|
|
59
|
-
dt = self.target_times[step + 1] - self.target_times[step]
|
|
60
|
-
state.vector, H = stepper(
|
|
61
|
-
dt * _TIME_CONVERSION_COEFF,
|
|
62
|
-
omega[step],
|
|
63
|
-
delta[step],
|
|
64
|
-
phi[step],
|
|
65
|
-
pulser_data.full_interaction_matrix,
|
|
66
|
-
state.vector,
|
|
67
|
-
self._config.krylov_tolerance,
|
|
68
|
-
)
|
|
69
|
-
|
|
70
|
-
# callbacks in observables and self.statistics in H
|
|
71
|
-
# have "# type: ignore[arg-type]" because H has it's own type
|
|
72
|
-
# meaning H is not inherited from Operator class.
|
|
73
|
-
# We decided that ignore[arg-type] is better compared to
|
|
74
|
-
# having many unused NotImplemented methods
|
|
75
|
-
for callback in self._config.observables:
|
|
76
|
-
callback(
|
|
77
|
-
self._config,
|
|
78
|
-
self.target_times[step + 1] / self.target_times[-1],
|
|
79
|
-
state,
|
|
80
|
-
H, # type: ignore[arg-type]
|
|
81
|
-
self.results,
|
|
82
|
-
)
|
|
83
|
-
|
|
84
|
-
self.statistics.data.append(time.time() - self.time)
|
|
85
|
-
self.statistics(
|
|
86
|
-
self._config,
|
|
87
|
-
self.target_times[step + 1] / self.target_times[-1],
|
|
88
|
-
state,
|
|
89
|
-
H, # type: ignore[arg-type]
|
|
90
|
-
self.results,
|
|
91
|
-
)
|
|
92
|
-
self.time = time.time()
|
|
93
|
-
del H
|
|
94
|
-
|
|
95
|
-
return self.results
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
class Statistics(Observable):
|
|
99
|
-
def __init__(
|
|
100
|
-
self,
|
|
101
|
-
evaluation_times: typing.Sequence[float] | None,
|
|
102
|
-
data: list[float],
|
|
103
|
-
timestep_count: int,
|
|
104
|
-
):
|
|
105
|
-
super().__init__(evaluation_times=evaluation_times)
|
|
106
|
-
self.data = data
|
|
107
|
-
self.timestep_count = timestep_count
|
|
108
|
-
|
|
109
|
-
@property
|
|
110
|
-
def _base_tag(self) -> str:
|
|
111
|
-
return "statistics"
|
|
112
|
-
|
|
113
|
-
def apply(
|
|
114
|
-
self,
|
|
115
|
-
*,
|
|
116
|
-
config: EmulationConfig,
|
|
117
|
-
state: State,
|
|
118
|
-
**kwargs: typing.Any,
|
|
119
|
-
) -> dict:
|
|
120
|
-
"""Calculates the observable to store in the Results."""
|
|
121
|
-
assert isinstance(state, StateVector)
|
|
122
|
-
assert isinstance(config, SVConfig)
|
|
123
|
-
duration = self.data[-1]
|
|
124
|
-
if state.vector.is_cuda:
|
|
125
|
-
max_mem_per_device = (
|
|
126
|
-
torch.cuda.max_memory_allocated(device) * 1e-6
|
|
127
|
-
for device in range(torch.cuda.device_count())
|
|
128
|
-
)
|
|
129
|
-
max_mem = max(max_mem_per_device)
|
|
130
|
-
else:
|
|
131
|
-
max_mem = getrusage(RUSAGE_SELF).ru_maxrss * 1e-3
|
|
132
|
-
|
|
133
|
-
config.logger.info(
|
|
134
|
-
f"step = {len(self.data)}/{self.timestep_count}, "
|
|
135
|
-
+ f"RSS = {max_mem:.3f} MB, "
|
|
136
|
-
+ f"Δt = {duration:.3f} s"
|
|
137
|
-
)
|
|
138
|
-
|
|
139
|
-
return {
|
|
140
|
-
"RSS": max_mem,
|
|
141
|
-
"duration": duration,
|
|
142
|
-
}
|
|
25
|
+
impl = create_impl(self._sequence, self._config)
|
|
26
|
+
return impl._run()
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
from abc import abstractmethod
|
|
2
|
+
import time
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
from emu_sv.hamiltonian import RydbergHamiltonian
|
|
6
|
+
from emu_sv.lindblad_operator import RydbergLindbladian
|
|
7
|
+
from pulser import Sequence
|
|
8
|
+
import torch
|
|
9
|
+
from resource import RUSAGE_SELF, getrusage
|
|
10
|
+
|
|
11
|
+
from pulser.backend import Results, Observable, State, EmulationConfig
|
|
12
|
+
from emu_base import PulserData
|
|
13
|
+
|
|
14
|
+
from emu_sv.state_vector import StateVector
|
|
15
|
+
from emu_sv.density_matrix_state import DensityMatrix
|
|
16
|
+
from emu_sv.sv_config import SVConfig
|
|
17
|
+
from emu_sv.time_evolution import EvolveStateVector, EvolveDensityMatrix
|
|
18
|
+
|
|
19
|
+
_TIME_CONVERSION_COEFF = 0.001 # Omega and delta are given in rad/μs, dt in ns
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class Statistics(Observable):
|
|
23
|
+
def __init__(
|
|
24
|
+
self,
|
|
25
|
+
evaluation_times: typing.Sequence[float] | None,
|
|
26
|
+
data: list[float],
|
|
27
|
+
timestep_count: int,
|
|
28
|
+
):
|
|
29
|
+
super().__init__(evaluation_times=evaluation_times)
|
|
30
|
+
self.data = data
|
|
31
|
+
self.timestep_count = timestep_count
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def _base_tag(self) -> str:
|
|
35
|
+
return "statistics"
|
|
36
|
+
|
|
37
|
+
def apply(
|
|
38
|
+
self,
|
|
39
|
+
*,
|
|
40
|
+
config: EmulationConfig,
|
|
41
|
+
state: State,
|
|
42
|
+
**kwargs: typing.Any,
|
|
43
|
+
) -> dict:
|
|
44
|
+
"""Calculates the observable to store in the Results."""
|
|
45
|
+
assert isinstance(state, StateVector | DensityMatrix)
|
|
46
|
+
assert isinstance(config, SVConfig)
|
|
47
|
+
duration = self.data[-1]
|
|
48
|
+
if isinstance(state, StateVector) and state.vector.is_cuda:
|
|
49
|
+
max_mem_per_device = (
|
|
50
|
+
torch.cuda.max_memory_allocated(device) * 1e-6
|
|
51
|
+
for device in range(torch.cuda.device_count())
|
|
52
|
+
)
|
|
53
|
+
max_mem = max(max_mem_per_device)
|
|
54
|
+
elif isinstance(state, DensityMatrix) and state.matrix.is_cuda:
|
|
55
|
+
max_mem_per_device = (
|
|
56
|
+
torch.cuda.max_memory_allocated(device) * 1e-6
|
|
57
|
+
for device in range(torch.cuda.device_count())
|
|
58
|
+
)
|
|
59
|
+
max_mem = max(max_mem_per_device)
|
|
60
|
+
else:
|
|
61
|
+
max_mem = getrusage(RUSAGE_SELF).ru_maxrss * 1e-3
|
|
62
|
+
|
|
63
|
+
config.logger.info(
|
|
64
|
+
f"step = {len(self.data)}/{self.timestep_count}, "
|
|
65
|
+
+ f"RSS = {max_mem:.3f} MB, "
|
|
66
|
+
+ f"Δt = {duration:.3f} s"
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
return {
|
|
70
|
+
"RSS": max_mem,
|
|
71
|
+
"duration": duration,
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class BaseSVBackendImpl:
|
|
76
|
+
"""
|
|
77
|
+
This class is used to handle the state vector and density matrix evolution.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
def __init__(self, config: SVConfig, pulser_data: PulserData):
|
|
81
|
+
self._config = config
|
|
82
|
+
self._pulser_data = pulser_data
|
|
83
|
+
self.target_times = pulser_data.target_times
|
|
84
|
+
self.omega = pulser_data.omega
|
|
85
|
+
self.delta = pulser_data.delta
|
|
86
|
+
self.phi = pulser_data.phi
|
|
87
|
+
self.nsteps = pulser_data.omega.shape[0]
|
|
88
|
+
self.nqubits = pulser_data.omega.shape[1]
|
|
89
|
+
self.state: State
|
|
90
|
+
self.time = time.time()
|
|
91
|
+
self.results = Results(atom_order=(), total_duration=self.target_times[-1])
|
|
92
|
+
self.statistics = Statistics(
|
|
93
|
+
evaluation_times=[t / self.target_times[-1] for t in self.target_times],
|
|
94
|
+
data=[],
|
|
95
|
+
timestep_count=self.nsteps,
|
|
96
|
+
)
|
|
97
|
+
self._current_H: None | RydbergLindbladian | RydbergHamiltonian = None
|
|
98
|
+
|
|
99
|
+
if self._config.initial_state is not None and (
|
|
100
|
+
self._config.initial_state.n_qudits != self.nqubits
|
|
101
|
+
):
|
|
102
|
+
raise ValueError(
|
|
103
|
+
"Mismatch in number of atoms: initial state has "
|
|
104
|
+
f"{self._config.initial_state.n_qudits} and the sequence has {self.nqubits}"
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
def step(self, step_idx: int) -> None:
|
|
108
|
+
"""One step of the evolution"""
|
|
109
|
+
dt = self._compute_dt(step_idx)
|
|
110
|
+
self._evolve_step(dt, step_idx)
|
|
111
|
+
self._apply_observables(step_idx)
|
|
112
|
+
self._save_statistics(step_idx)
|
|
113
|
+
|
|
114
|
+
def _compute_dt(self, step_idx: int) -> float:
|
|
115
|
+
return self.target_times[step_idx + 1] - self.target_times[step_idx]
|
|
116
|
+
|
|
117
|
+
@abstractmethod
|
|
118
|
+
def _evolve_step(self, dt: float, step_idx: int) -> None:
|
|
119
|
+
"""One step evolution"""
|
|
120
|
+
|
|
121
|
+
def _apply_observables(self, step_idx: int) -> None:
|
|
122
|
+
norm_time = self.target_times[step_idx + 1] / self.target_times[-1]
|
|
123
|
+
for callback in self._config.observables:
|
|
124
|
+
callback(
|
|
125
|
+
self._config,
|
|
126
|
+
norm_time,
|
|
127
|
+
self.state,
|
|
128
|
+
self._current_H, # type: ignore[arg-type]
|
|
129
|
+
self.results,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
def _save_statistics(self, step_idx: int) -> None:
|
|
133
|
+
norm_time = self.target_times[step_idx + 1] / self.target_times[-1]
|
|
134
|
+
self.statistics.data.append(time.time() - self.time)
|
|
135
|
+
self.statistics(
|
|
136
|
+
self._config,
|
|
137
|
+
norm_time,
|
|
138
|
+
self.state,
|
|
139
|
+
self._current_H, # type: ignore[arg-type]
|
|
140
|
+
self.results,
|
|
141
|
+
)
|
|
142
|
+
self.time = time.time()
|
|
143
|
+
self._current_H = None
|
|
144
|
+
|
|
145
|
+
def _run(self) -> Results:
|
|
146
|
+
for step in range(self.nsteps):
|
|
147
|
+
self.step(step)
|
|
148
|
+
|
|
149
|
+
return self.results
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class SVBackendImpl(BaseSVBackendImpl):
|
|
153
|
+
|
|
154
|
+
def __init__(self, config: SVConfig, pulser_data: PulserData):
|
|
155
|
+
"""
|
|
156
|
+
For running sequences without noise. The state will evolve accoring
|
|
157
|
+
to e^(-iH t)
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
config: The configuration for the emulator.
|
|
161
|
+
pulser_data: The data for the sequence to be emulated.
|
|
162
|
+
"""
|
|
163
|
+
super().__init__(config, pulser_data)
|
|
164
|
+
|
|
165
|
+
self.state: StateVector = (
|
|
166
|
+
StateVector.make(self.nqubits, gpu=self._config.gpu)
|
|
167
|
+
if self._config.initial_state is None
|
|
168
|
+
else StateVector(
|
|
169
|
+
self._config.initial_state.vector.clone(),
|
|
170
|
+
gpu=self._config.gpu,
|
|
171
|
+
)
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
self.stepper = EvolveStateVector.apply
|
|
175
|
+
|
|
176
|
+
def _evolve_step(self, dt: float, step_idx: int) -> None:
|
|
177
|
+
self.state.vector, self._current_H = self.stepper(
|
|
178
|
+
dt * _TIME_CONVERSION_COEFF,
|
|
179
|
+
self.omega[step_idx],
|
|
180
|
+
self.delta[step_idx],
|
|
181
|
+
self.phi[step_idx],
|
|
182
|
+
self._pulser_data.full_interaction_matrix,
|
|
183
|
+
self.state.vector,
|
|
184
|
+
self._config.krylov_tolerance,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
class NoisySVBackendImpl(BaseSVBackendImpl):
|
|
189
|
+
|
|
190
|
+
def __init__(self, config: SVConfig, pulser_data: PulserData):
|
|
191
|
+
"""
|
|
192
|
+
Initializes the NoisySVBackendImpl, master equation version.
|
|
193
|
+
This class handles the Lindblad operators and
|
|
194
|
+
solves the Lindblad master equation
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
config: The configuration for the emulator.
|
|
198
|
+
pulser_data: The data for the sequence to be emulated.
|
|
199
|
+
"""
|
|
200
|
+
|
|
201
|
+
super().__init__(config, pulser_data)
|
|
202
|
+
|
|
203
|
+
self.pulser_lindblads = pulser_data.lindblad_ops
|
|
204
|
+
|
|
205
|
+
self.state: DensityMatrix = (
|
|
206
|
+
DensityMatrix.make(self.nqubits, gpu=self._config.gpu)
|
|
207
|
+
if self._config.initial_state is None
|
|
208
|
+
else DensityMatrix(
|
|
209
|
+
self._config.initial_state.matrix.clone(), gpu=self._config.gpu
|
|
210
|
+
)
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
def _evolve_step(self, dt: float, step_idx: int) -> None:
|
|
214
|
+
self.state.matrix, self._current_H = EvolveDensityMatrix.evolve(
|
|
215
|
+
dt * _TIME_CONVERSION_COEFF,
|
|
216
|
+
self.omega[step_idx],
|
|
217
|
+
self.delta[step_idx],
|
|
218
|
+
self.phi[step_idx],
|
|
219
|
+
self._pulser_data.full_interaction_matrix,
|
|
220
|
+
self.state.matrix,
|
|
221
|
+
self._config.krylov_tolerance,
|
|
222
|
+
self.pulser_lindblads,
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def create_impl(sequence: Sequence, config: SVConfig) -> BaseSVBackendImpl:
|
|
227
|
+
"""
|
|
228
|
+
Creates the backend implementation for the given sequence and config.
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
sequence: The sequence to be emulated.
|
|
232
|
+
config: configuration for the emulator.
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
An instance of SVBackendImpl.
|
|
236
|
+
"""
|
|
237
|
+
pulse_data = PulserData(sequence=sequence, config=config, dt=config.dt)
|
|
238
|
+
if pulse_data.has_lindblad_noise:
|
|
239
|
+
return NoisySVBackendImpl(config, pulse_data)
|
|
240
|
+
else:
|
|
241
|
+
return SVBackendImpl(config, pulse_data)
|
emu_sv/sv_config.py
CHANGED
|
@@ -7,9 +7,13 @@ from typing import Any, ClassVar
|
|
|
7
7
|
|
|
8
8
|
from emu_sv.custom_callback_implementations import (
|
|
9
9
|
correlation_matrix_sv_impl,
|
|
10
|
+
correlation_matrix_sv_den_mat_impl,
|
|
10
11
|
energy_second_moment_sv_impl,
|
|
12
|
+
energy_second_moment_den_mat_impl,
|
|
11
13
|
energy_variance_sv_impl,
|
|
14
|
+
energy_variance_sv_den_mat_impl,
|
|
12
15
|
qubit_occupation_sv_impl,
|
|
16
|
+
qubit_occupation_sv_den_mat_impl,
|
|
13
17
|
)
|
|
14
18
|
|
|
15
19
|
from pulser.backend import (
|
|
@@ -101,6 +105,10 @@ class SVConfig(EmulationConfig):
|
|
|
101
105
|
"Warning: The runs and samples_per_run "
|
|
102
106
|
"values of the NoiseModel are ignored!"
|
|
103
107
|
)
|
|
108
|
+
if "SPAM" in self.noise_model.noise_types:
|
|
109
|
+
raise NotImplementedError(
|
|
110
|
+
"SPAM errors are currently not supported in emu-sv."
|
|
111
|
+
)
|
|
104
112
|
|
|
105
113
|
def _expected_kwargs(self) -> set[str]:
|
|
106
114
|
return super()._expected_kwargs() | {
|
|
@@ -115,23 +123,45 @@ class SVConfig(EmulationConfig):
|
|
|
115
123
|
|
|
116
124
|
def monkeypatch_observables(self) -> None:
|
|
117
125
|
obs_list = []
|
|
126
|
+
|
|
118
127
|
for _, obs in enumerate(self.observables): # monkey patch
|
|
119
128
|
obs_copy = copy.deepcopy(obs)
|
|
129
|
+
|
|
120
130
|
if isinstance(obs, Occupation):
|
|
121
131
|
obs_copy.apply = MethodType( # type: ignore[method-assign]
|
|
122
|
-
|
|
132
|
+
(
|
|
133
|
+
qubit_occupation_sv_impl
|
|
134
|
+
if self.noise_model.noise_types == ()
|
|
135
|
+
else qubit_occupation_sv_den_mat_impl
|
|
136
|
+
),
|
|
137
|
+
obs_copy,
|
|
123
138
|
)
|
|
124
|
-
|
|
139
|
+
if isinstance(obs, CorrelationMatrix):
|
|
125
140
|
obs_copy.apply = MethodType( # type: ignore[method-assign]
|
|
126
|
-
|
|
141
|
+
(
|
|
142
|
+
correlation_matrix_sv_impl
|
|
143
|
+
if self.noise_model.noise_types == ()
|
|
144
|
+
else correlation_matrix_sv_den_mat_impl
|
|
145
|
+
),
|
|
146
|
+
obs_copy,
|
|
127
147
|
)
|
|
128
|
-
|
|
148
|
+
if isinstance(obs, EnergyVariance):
|
|
129
149
|
obs_copy.apply = MethodType( # type: ignore[method-assign]
|
|
130
|
-
|
|
150
|
+
(
|
|
151
|
+
energy_variance_sv_impl
|
|
152
|
+
if self.noise_model.noise_types == ()
|
|
153
|
+
else energy_variance_sv_den_mat_impl
|
|
154
|
+
),
|
|
155
|
+
obs_copy,
|
|
131
156
|
)
|
|
132
|
-
elif isinstance(obs,
|
|
157
|
+
elif isinstance(obs, EnergySecondMoment):
|
|
133
158
|
obs_copy.apply = MethodType( # type: ignore[method-assign]
|
|
134
|
-
|
|
159
|
+
(
|
|
160
|
+
energy_second_moment_sv_impl
|
|
161
|
+
if self.noise_model.noise_types == ()
|
|
162
|
+
else energy_second_moment_den_mat_impl
|
|
163
|
+
),
|
|
164
|
+
obs_copy,
|
|
135
165
|
)
|
|
136
166
|
obs_list.append(obs_copy)
|
|
137
167
|
self.observables = tuple(obs_list)
|
emu_sv/time_evolution.py
CHANGED
|
@@ -3,6 +3,7 @@ from typing import Any, no_type_check
|
|
|
3
3
|
from emu_base.math.krylov_exp import krylov_exp
|
|
4
4
|
from emu_base.math.double_krylov import double_krylov
|
|
5
5
|
from emu_sv.hamiltonian import RydbergHamiltonian
|
|
6
|
+
from emu_sv.lindblad_operator import RydbergLindbladian
|
|
6
7
|
|
|
7
8
|
|
|
8
9
|
def _apply_omega_real(
|
|
@@ -87,27 +88,68 @@ class DHDPhiSparse:
|
|
|
87
88
|
class DHDDeltaSparse:
|
|
88
89
|
"""
|
|
89
90
|
Derivative of the Rydberg Hamiltonian respect to Delta:
|
|
90
|
-
∂H
|
|
91
|
+
∂H/∂Δᵢ = -nᵢ
|
|
91
92
|
"""
|
|
92
93
|
|
|
93
|
-
def __init__(self,
|
|
94
|
-
self.
|
|
95
|
-
self.shape = (2**
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
94
|
+
def __init__(self, i: int, nqubits: int):
|
|
95
|
+
self.nqubits = nqubits
|
|
96
|
+
self.shape = (2**i, 2, 2 ** (nqubits - i - 1))
|
|
97
|
+
|
|
98
|
+
def __matmul__(self, vec: torch.Tensor) -> torch.Tensor:
|
|
99
|
+
result = vec.clone()
|
|
100
|
+
result = result.view(vec.shape[0], *self.shape)
|
|
101
|
+
result[:, :, 0] = 0.0
|
|
102
|
+
return -result.view(vec.shape[0], 2**self.nqubits)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class DHDUSparse:
|
|
106
|
+
"""
|
|
107
|
+
Derivative of the Rydberg Hamiltonian respect to the interaction matrix:
|
|
108
|
+
∂H/∂Uᵢⱼ = nᵢnⱼ
|
|
109
|
+
"""
|
|
110
|
+
|
|
111
|
+
def __init__(self, i: int, j: int, nqubits: int):
|
|
112
|
+
self.shape = (2**i, 2, 2 ** (j - i - 1), 2, 2 ** (nqubits - j - 1))
|
|
113
|
+
self.nqubits = nqubits
|
|
103
114
|
|
|
104
115
|
def __matmul__(self, vec: torch.Tensor) -> torch.Tensor:
|
|
105
|
-
|
|
116
|
+
result = vec.clone()
|
|
117
|
+
result = result.view(vec.shape[0], *self.shape)
|
|
118
|
+
result[:, :, 0] = 0.0
|
|
119
|
+
result[:, :, 1, :, 0] = 0.0
|
|
120
|
+
return result.view(vec.shape[0], 2**self.nqubits)
|
|
106
121
|
|
|
107
122
|
|
|
108
123
|
class EvolveStateVector(torch.autograd.Function):
|
|
109
124
|
"""Custom autograd implementation of a step in the time evolution."""
|
|
110
125
|
|
|
126
|
+
@staticmethod
|
|
127
|
+
def evolve(
|
|
128
|
+
dt: float,
|
|
129
|
+
omegas: torch.Tensor,
|
|
130
|
+
deltas: torch.Tensor,
|
|
131
|
+
phis: torch.Tensor,
|
|
132
|
+
interaction_matrix: torch.Tensor,
|
|
133
|
+
state: torch.Tensor,
|
|
134
|
+
krylov_tolerance: float,
|
|
135
|
+
) -> tuple[torch.Tensor, RydbergHamiltonian]:
|
|
136
|
+
ham = RydbergHamiltonian(
|
|
137
|
+
omegas=omegas,
|
|
138
|
+
deltas=deltas,
|
|
139
|
+
phis=phis,
|
|
140
|
+
interaction_matrix=interaction_matrix,
|
|
141
|
+
device=state.device,
|
|
142
|
+
)
|
|
143
|
+
op = lambda x: -1j * dt * (ham * x)
|
|
144
|
+
res = krylov_exp(
|
|
145
|
+
op,
|
|
146
|
+
state,
|
|
147
|
+
norm_tolerance=krylov_tolerance,
|
|
148
|
+
exp_tolerance=krylov_tolerance,
|
|
149
|
+
is_hermitian=True,
|
|
150
|
+
)
|
|
151
|
+
return res, ham
|
|
152
|
+
|
|
111
153
|
@staticmethod
|
|
112
154
|
def forward(
|
|
113
155
|
ctx: Any,
|
|
@@ -136,20 +178,8 @@ class EvolveStateVector(torch.autograd.Function):
|
|
|
136
178
|
state (Tensor): input state to be evolved
|
|
137
179
|
krylov_tolerance (float):
|
|
138
180
|
"""
|
|
139
|
-
ham =
|
|
140
|
-
omegas
|
|
141
|
-
deltas=deltas,
|
|
142
|
-
phis=phis,
|
|
143
|
-
interaction_matrix=interaction_matrix,
|
|
144
|
-
device=state.device,
|
|
145
|
-
)
|
|
146
|
-
op = lambda x: -1j * dt * (ham * x)
|
|
147
|
-
res = krylov_exp(
|
|
148
|
-
op,
|
|
149
|
-
state,
|
|
150
|
-
norm_tolerance=krylov_tolerance,
|
|
151
|
-
exp_tolerance=krylov_tolerance,
|
|
152
|
-
is_hermitian=True,
|
|
181
|
+
res, ham = EvolveStateVector.evolve(
|
|
182
|
+
dt, omegas, deltas, phis, interaction_matrix, state, krylov_tolerance
|
|
153
183
|
)
|
|
154
184
|
ctx.save_for_backward(omegas, deltas, phis, interaction_matrix, state)
|
|
155
185
|
ctx.dt = dt
|
|
@@ -217,9 +247,10 @@ class EvolveStateVector(torch.autograd.Function):
|
|
|
217
247
|
|
|
218
248
|
- The action of the derivatives of the Hamiltonian with
|
|
219
249
|
respect to the input parameters are implemented separately in
|
|
220
|
-
- ∂H/∂Ω:
|
|
221
|
-
- ∂H/∂Δ:
|
|
222
|
-
- ∂H/∂φ:
|
|
250
|
+
- ∂H/∂Ω: `DHDOmegaSparse`
|
|
251
|
+
- ∂H/∂Δ: `DHDDeltaSparse`
|
|
252
|
+
- ∂H/∂φ: `DHDPhiSparse`
|
|
253
|
+
- ∂H/∂Uᵢⱼ `DHDUSparse`
|
|
223
254
|
|
|
224
255
|
Then, the resulting gradient respect to a generic parameter reads:
|
|
225
256
|
gΩ = Tr( -i dt ∂H/∂Ω @ Vs @ dS @ Vg* )
|
|
@@ -229,7 +260,9 @@ class EvolveStateVector(torch.autograd.Function):
|
|
|
229
260
|
tolerance = ctx.tolerance
|
|
230
261
|
nqubits = len(omegas)
|
|
231
262
|
|
|
232
|
-
grad_omegas, grad_deltas, grad_phis
|
|
263
|
+
grad_omegas, grad_deltas, grad_phis = None, None, None
|
|
264
|
+
grad_int_mat = None
|
|
265
|
+
grad_state_in = None
|
|
233
266
|
|
|
234
267
|
ham = RydbergHamiltonian(
|
|
235
268
|
omegas=omegas,
|
|
@@ -239,7 +272,7 @@ class EvolveStateVector(torch.autograd.Function):
|
|
|
239
272
|
device=state.device,
|
|
240
273
|
)
|
|
241
274
|
|
|
242
|
-
if any(ctx.needs_input_grad[1:
|
|
275
|
+
if any(ctx.needs_input_grad[1:5]):
|
|
243
276
|
op = lambda x: -1j * dt * (ham * x)
|
|
244
277
|
lanczos_vectors_state, dS, lanczos_vectors_grad = double_krylov(
|
|
245
278
|
op, state, grad_state_out, tolerance
|
|
@@ -263,9 +296,7 @@ class EvolveStateVector(torch.autograd.Function):
|
|
|
263
296
|
if ctx.needs_input_grad[2]:
|
|
264
297
|
grad_deltas = torch.zeros_like(deltas)
|
|
265
298
|
for i in range(nqubits):
|
|
266
|
-
|
|
267
|
-
dhd = DHDDeltaSparse(i, e_l.device, nqubits)
|
|
268
|
-
# compute the trace
|
|
299
|
+
dhd = DHDDeltaSparse(i, nqubits)
|
|
269
300
|
v = dhd @ e_l
|
|
270
301
|
grad_deltas[i] = (-1j * dt * torch.tensordot(Vg.conj(), v)).real
|
|
271
302
|
|
|
@@ -276,8 +307,60 @@ class EvolveStateVector(torch.autograd.Function):
|
|
|
276
307
|
v = dhp @ e_l
|
|
277
308
|
grad_phis[i] = (-1j * dt * torch.tensordot(Vg.conj(), v)).real
|
|
278
309
|
|
|
310
|
+
if ctx.needs_input_grad[4]:
|
|
311
|
+
grad_int_mat = torch.zeros_like(interaction_matrix)
|
|
312
|
+
for i in range(nqubits):
|
|
313
|
+
for j in range(i + 1, nqubits):
|
|
314
|
+
dhu = DHDUSparse(i, j, nqubits)
|
|
315
|
+
v = dhu @ e_l
|
|
316
|
+
grad_int_mat[i, j] = (-1j * dt * torch.tensordot(Vg.conj(), v)).real
|
|
317
|
+
|
|
279
318
|
if ctx.needs_input_grad[5]:
|
|
280
319
|
op = lambda x: (1j * dt) * (ham * x)
|
|
281
|
-
grad_state_in = krylov_exp(op, grad_state_out, tolerance, tolerance)
|
|
320
|
+
grad_state_in = krylov_exp(op, grad_state_out.detach(), tolerance, tolerance)
|
|
321
|
+
|
|
322
|
+
return (
|
|
323
|
+
None,
|
|
324
|
+
grad_omegas,
|
|
325
|
+
grad_deltas,
|
|
326
|
+
grad_phis,
|
|
327
|
+
grad_int_mat,
|
|
328
|
+
grad_state_in,
|
|
329
|
+
None,
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
class EvolveDensityMatrix:
|
|
334
|
+
"""Evolution of a density matrix under a Lindbladian operator."""
|
|
282
335
|
|
|
283
|
-
|
|
336
|
+
@staticmethod
|
|
337
|
+
def evolve(
|
|
338
|
+
dt: float,
|
|
339
|
+
omegas: torch.Tensor,
|
|
340
|
+
deltas: torch.Tensor,
|
|
341
|
+
phis: torch.Tensor,
|
|
342
|
+
full_interaction_matrix: torch.Tensor,
|
|
343
|
+
density_matrix: torch.Tensor,
|
|
344
|
+
krylov_tolerance: float,
|
|
345
|
+
pulser_lindblads: list[torch.Tensor],
|
|
346
|
+
) -> tuple[torch.Tensor, RydbergLindbladian]:
|
|
347
|
+
ham = RydbergLindbladian(
|
|
348
|
+
omegas=omegas,
|
|
349
|
+
deltas=deltas,
|
|
350
|
+
phis=phis,
|
|
351
|
+
pulser_linblads=pulser_lindblads,
|
|
352
|
+
interaction_matrix=full_interaction_matrix,
|
|
353
|
+
device=density_matrix.device,
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
op = lambda x: -1j * dt * (ham @ x)
|
|
357
|
+
return (
|
|
358
|
+
krylov_exp(
|
|
359
|
+
op,
|
|
360
|
+
density_matrix,
|
|
361
|
+
norm_tolerance=krylov_tolerance,
|
|
362
|
+
exp_tolerance=krylov_tolerance,
|
|
363
|
+
is_hermitian=False,
|
|
364
|
+
),
|
|
365
|
+
ham,
|
|
366
|
+
)
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: emu-sv
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.2.1
|
|
4
4
|
Summary: Pasqal State Vector based pulse emulator built on PyTorch
|
|
5
5
|
Project-URL: Documentation, https://pasqal-io.github.io/emulators/
|
|
6
6
|
Project-URL: Repository, https://github.com/pasqal-io/emulators
|
|
7
7
|
Project-URL: Issues, https://github.com/pasqal-io/emulators/issues
|
|
8
|
-
Author-email:
|
|
8
|
+
Author-email: Kemal Bidzhiev <kemal.bidzhiev@pasqal.com>, Stefano Grava <stefano.grava@pasqal.com>, Pablo Le Henaff <pablo.le-henaff@pasqal.com>, Mauro Mendizabal <mauro.mendizabal-pico@pasqal.com>, Elie Merhej <elie.merhej@pasqal.com>, Anton Quelle <anton.quelle@pasqal.com>
|
|
9
9
|
License: PASQAL OPEN-SOURCE SOFTWARE LICENSE AGREEMENT (MIT-derived)
|
|
10
10
|
|
|
11
11
|
The author of the License is:
|
|
@@ -25,7 +25,7 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
25
25
|
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
26
26
|
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
|
27
27
|
Requires-Python: >=3.10
|
|
28
|
-
Requires-Dist: emu-base==2.
|
|
28
|
+
Requires-Dist: emu-base==2.2.1
|
|
29
29
|
Description-Content-Type: text/markdown
|
|
30
30
|
|
|
31
31
|
<div align="center">
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
emu_sv/__init__.py,sha256=sREepAk-h3a3-e7PGexVNmYmj5hf4fVIrKk78vnDKiE,771
|
|
2
|
+
emu_sv/custom_callback_implementations.py,sha256=_7XLIDzJ-p3DVqz-Jyv0eYbl8nih2x2p-pM4cBCLumA,6367
|
|
3
|
+
emu_sv/dense_operator.py,sha256=NfgzVpnNitc5ZSM4RlfpAc5Ls2wFPNsTxdeFdhJSg1o,6909
|
|
4
|
+
emu_sv/density_matrix_state.py,sha256=5W_UKIAYHb0k3ryRLQ2dbFUgrb5ju5jceDGAekM2gNE,7035
|
|
5
|
+
emu_sv/hamiltonian.py,sha256=CqNGuWJlO2ZljK47wt130s-5uKiOldQUsC3tjwk1mKA,6106
|
|
6
|
+
emu_sv/lindblad_operator.py,sha256=pgjRNLBcvEM2-qxM8uy9wL74OtrD4A8trQeERi_AXH8,8892
|
|
7
|
+
emu_sv/state_vector.py,sha256=zKHCdgl_eRIOPE4qVKO53ig9UyYTQ7a_guNFXgynU7g,9753
|
|
8
|
+
emu_sv/sv_backend.py,sha256=-soOkSEzEBK1dCKnYnbtvYjmNZtZra1_4jP3H1ROOtM,737
|
|
9
|
+
emu_sv/sv_backend_impl.py,sha256=mdPWBLDwH0q7EEwQTmLNLLx5tycMmsCQbUifIHvciMk,8059
|
|
10
|
+
emu_sv/sv_config.py,sha256=0geciKkrF3h8pU_UAQ8R-G6WxfYb_X5XIrxZavGxK5Q,6511
|
|
11
|
+
emu_sv/time_evolution.py,sha256=_VH4f2RF6lGKzO08WxTYJ5Aw8_pTTMRKcyMnIuxH03I,13382
|
|
12
|
+
emu_sv/utils.py,sha256=-axfQ2tqw0C7I9yw-28g7lytyk373DNBjDALh4kLBrM,302
|
|
13
|
+
emu_sv-2.2.1.dist-info/METADATA,sha256=_NvCSEGYdw9ZXATsZKME-GX0ECeiJcDy8MthY9C5ZUU,3595
|
|
14
|
+
emu_sv-2.2.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
15
|
+
emu_sv-2.2.1.dist-info/RECORD,,
|
emu_sv-2.1.1.dist-info/RECORD
DELETED
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
emu_sv/__init__.py,sha256=907RfJcGq7Nq6ayS0bsE5q7vrMTTEf41FBIXt651lvY,702
|
|
2
|
-
emu_sv/custom_callback_implementations.py,sha256=zvsSiDIc56gwybKq87VFZyKsniTDye6-oFd2-R0shpg,3447
|
|
3
|
-
emu_sv/dense_operator.py,sha256=NfgzVpnNitc5ZSM4RlfpAc5Ls2wFPNsTxdeFdhJSg1o,6909
|
|
4
|
-
emu_sv/density_matrix_state.py,sha256=6UBLUXaJaUdzOhflrKolcnH8737JszX7sry1WmbyakI,6993
|
|
5
|
-
emu_sv/hamiltonian.py,sha256=CqNGuWJlO2ZljK47wt130s-5uKiOldQUsC3tjwk1mKA,6106
|
|
6
|
-
emu_sv/lindblad_operator.py,sha256=KmaNCahpAi8SIXh-TrFD-ggmGpa1zklp8DMWVK9Y_J4,7433
|
|
7
|
-
emu_sv/state_vector.py,sha256=lqSbv4BMtDtgY0YUPuhIUNJxrlVa7vUWuN_XqwpG5sQ,9823
|
|
8
|
-
emu_sv/sv_backend.py,sha256=FrSBG8pacgucZ4MHKApfPh-VEFApsjnBzVb03GCcTpc,4493
|
|
9
|
-
emu_sv/sv_config.py,sha256=q-cnyWwr_nNbpXI_m5vG51Wz_tyV5TL5M28uP2WctP4,5412
|
|
10
|
-
emu_sv/time_evolution.py,sha256=pTmWzgI4AboRYklvCz4OLQNNN_RB1bOqJBXdsrFf6jk,10867
|
|
11
|
-
emu_sv/utils.py,sha256=-axfQ2tqw0C7I9yw-28g7lytyk373DNBjDALh4kLBrM,302
|
|
12
|
-
emu_sv-2.1.1.dist-info/METADATA,sha256=8QGaiNu0nRthfw2O717nMMoO60LCLnVaVrAPjp_t7dk,3513
|
|
13
|
-
emu_sv-2.1.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
14
|
-
emu_sv-2.1.1.dist-info/RECORD,,
|
|
File without changes
|