emu-sv 2.0.4__py3-none-any.whl → 2.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emu_sv/__init__.py CHANGED
@@ -36,4 +36,4 @@ __all__ = [
36
36
  "inner",
37
37
  ]
38
38
 
39
- __version__ = "2.0.4"
39
+ __version__ = "2.1.1"
emu_sv/sv_backend.py CHANGED
@@ -9,7 +9,7 @@ from emu_base import PulserData
9
9
 
10
10
  from emu_sv.state_vector import StateVector
11
11
  from emu_sv.sv_config import SVConfig
12
- from emu_sv.time_evolution import do_time_step
12
+ from emu_sv.time_evolution import EvolveStateVector
13
13
 
14
14
 
15
15
  _TIME_CONVERSION_COEFF = 0.001 # Omega and delta are given in rad/μs, dt in ns
@@ -54,10 +54,10 @@ class SVBackend(EmulatorBackend):
54
54
  else:
55
55
  state = StateVector.make(nqubits, gpu=self._config.gpu)
56
56
 
57
+ stepper = EvolveStateVector.apply
57
58
  for step in range(nsteps):
58
59
  dt = self.target_times[step + 1] - self.target_times[step]
59
-
60
- state.vector, H = do_time_step(
60
+ state.vector, H = stepper(
61
61
  dt * _TIME_CONVERSION_COEFF,
62
62
  omega[step],
63
63
  delta[step],
emu_sv/sv_config.py CHANGED
@@ -37,6 +37,10 @@ class SVConfig(EmulationConfig):
37
37
  the Lanczos algorithm uses this as the convergence tolerance
38
38
  gpu: Use 1 gpu if True, and a GPU is available, otherwise, cpu.
39
39
  Will cause errors if True when a gpu is not available
40
+ interaction_cutoff: Set interaction coefficients below this value to `0`.
41
+ Potentially improves runtime and memory consumption.
42
+ log_level: How much to log. Set to `logging.WARN` to get rid of the timestep info.
43
+ log_file: If specified, log to this file rather than stout.
40
44
  kwargs: arguments that are passed to the base class
41
45
 
42
46
  Examples:
emu_sv/time_evolution.py CHANGED
@@ -1,33 +1,283 @@
1
1
  import torch
2
-
2
+ from typing import Any, no_type_check
3
3
  from emu_base.math.krylov_exp import krylov_exp
4
+ from emu_base.math.double_krylov import double_krylov
4
5
  from emu_sv.hamiltonian import RydbergHamiltonian
5
6
 
6
7
 
7
- def do_time_step(
8
- dt: float,
9
- omegas: torch.Tensor,
10
- deltas: torch.Tensor,
11
- phis: torch.Tensor,
12
- full_interaction_matrix: torch.Tensor,
13
- state_vector: torch.Tensor,
14
- krylov_tolerance: float,
15
- ) -> tuple[torch.Tensor, RydbergHamiltonian]:
16
- ham = RydbergHamiltonian(
17
- omegas=omegas,
18
- deltas=deltas,
19
- phis=phis,
20
- interaction_matrix=full_interaction_matrix,
21
- device=state_vector.device,
8
+ def _apply_omega_real(
9
+ result: torch.Tensor,
10
+ i: int,
11
+ inds: torch.Tensor,
12
+ source: torch.Tensor,
13
+ alpha: complex,
14
+ ) -> None:
15
+ """Accumulate to `result` the application of ασˣᵢ on `source`"""
16
+ result.index_add_(i, inds, source, alpha=alpha)
17
+
18
+
19
+ def _apply_omega_complex(
20
+ result: torch.Tensor,
21
+ i: int,
22
+ inds: torch.Tensor,
23
+ source: torch.Tensor,
24
+ alpha: complex,
25
+ ) -> None:
26
+ """Accumulate to `result` the application of ασ⁺ᵢ + α*σ⁻ᵢ on `source`"""
27
+ result.index_add_(i, inds[0], source.select(i, 0).unsqueeze(i), alpha=alpha)
28
+ result.index_add_(
29
+ i,
30
+ inds[1],
31
+ source.select(i, 1).unsqueeze(2),
32
+ alpha=alpha.conjugate(),
22
33
  )
23
- op = lambda x: -1j * dt * (ham * x)
24
34
 
25
- return (
26
- krylov_exp(
35
+
36
+ class DHDOmegaSparse:
37
+ """
38
+ Derivative of the RydbergHamiltonian respect to Omega.
39
+ ∂H/∂Ωₖ = 0.5[cos(ϕₖ)σˣₖ + sin(ϕₖ)σʸₖ]
40
+
41
+ If ϕₖ=0, simplifies to ∂H/∂Ωₖ = 0.5σˣₖ
42
+ """
43
+
44
+ def __init__(self, index: int, device: torch.device, nqubits: int, phi: torch.Tensor):
45
+ self.index = index
46
+ self.shape = (2**index, 2, 2 ** (nqubits - index - 1))
47
+ self.inds = torch.tensor([1, 0], device=device) # flips the state, for 𝜎ₓ
48
+ self.alpha = 0.5 * torch.exp(1j * phi).item()
49
+ if phi.is_nonzero():
50
+ self._apply_sigmas = _apply_omega_complex
51
+ else: # ∂H/∂Ωₖ = 0.5σˣₖ
52
+ self._apply_sigmas = _apply_omega_real
53
+
54
+ def __matmul__(self, vec: torch.Tensor) -> torch.Tensor:
55
+ vec = vec.view(vec.shape[0], *self.shape) # add batch dimension
56
+ result = torch.zeros_like(vec)
57
+ self._apply_sigmas(result, 2, self.inds, vec, alpha=self.alpha)
58
+ return result.view(vec.shape[0], -1)
59
+
60
+
61
+ class DHDPhiSparse:
62
+ """
63
+ Derivative of the RydbergHamiltonian respect to Phi.
64
+ ∂H/∂ϕₖ = 0.5Ωₖ[cos(ϕₖ+π/2)σˣₖ + sin(ϕₖ+π/2)σʸₖ]
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ index: int,
70
+ device: torch.device,
71
+ nqubits: int,
72
+ omega: torch.Tensor,
73
+ phi: torch.Tensor,
74
+ ):
75
+ self.index = index
76
+ self.shape = (2**index, 2, 2 ** (nqubits - index - 1))
77
+ self.alpha = 0.5 * (omega * torch.exp(1j * (phi + torch.pi / 2))).item()
78
+ self.inds = torch.tensor([1, 0], device=device) # flips the state, for 𝜎ₓ
79
+
80
+ def __matmul__(self, vec: torch.Tensor) -> torch.Tensor:
81
+ vec = vec.view(vec.shape[0], *self.shape) # add batch dimension
82
+ result = torch.zeros_like(vec)
83
+ _apply_omega_complex(result, 2, self.inds, vec, alpha=self.alpha)
84
+ return result.view(vec.shape[0], -1)
85
+
86
+
87
+ class DHDDeltaSparse:
88
+ """
89
+ Derivative of the Rydberg Hamiltonian respect to Delta:
90
+ ∂H/∂Δₖ = -nₖ
91
+ """
92
+
93
+ def __init__(self, index: int, device: torch.device, nqubits: int):
94
+ self.index = index
95
+ self.shape = (2**index, 2, 2 ** (nqubits - index - 1))
96
+ diag = torch.zeros(
97
+ *self.shape,
98
+ dtype=torch.complex128,
99
+ device=device,
100
+ )
101
+ diag[:, 1, :] = -1.0
102
+ self.diag = diag.reshape(-1)
103
+
104
+ def __matmul__(self, vec: torch.Tensor) -> torch.Tensor:
105
+ return vec * self.diag
106
+
107
+
108
+ class EvolveStateVector(torch.autograd.Function):
109
+ """Custom autograd implementation of a step in the time evolution."""
110
+
111
+ @staticmethod
112
+ def forward(
113
+ ctx: Any,
114
+ dt: float,
115
+ omegas: torch.Tensor,
116
+ deltas: torch.Tensor,
117
+ phis: torch.Tensor,
118
+ interaction_matrix: torch.Tensor,
119
+ state: torch.Tensor,
120
+ krylov_tolerance: float,
121
+ ) -> tuple[torch.Tensor, RydbergHamiltonian]:
122
+ """
123
+ Returns the time evolved state
124
+ |ψ(t+dt)〉= exp(-i dt H)|ψ(t)〉
125
+ under the Hamiltonian H built from the input Tensor parameters, omegas, deltas, phis and
126
+ the interaction matrix.
127
+
128
+ Args:
129
+ ctx (Any): context object to stash information for backward computation.
130
+ dt (float): timestep
131
+ omegas (torch.Tensor): 1D tensor of driving strengths for each qubit.
132
+ deltas (torch.Tensor): 1D tensor of detuning values for each qubit.
133
+ phis (torch.Tensor): 1D tensor of phase values for each qubit.
134
+ interaction_matrix (torch.Tensor): matrix representing the interaction
135
+ strengths between each pair of qubits.
136
+ state (Tensor): input state to be evolved
137
+ krylov_tolerance (float):
138
+ """
139
+ ham = RydbergHamiltonian(
140
+ omegas=omegas,
141
+ deltas=deltas,
142
+ phis=phis,
143
+ interaction_matrix=interaction_matrix,
144
+ device=state.device,
145
+ )
146
+ op = lambda x: -1j * dt * (ham * x)
147
+ res = krylov_exp(
27
148
  op,
28
- state_vector,
149
+ state,
29
150
  norm_tolerance=krylov_tolerance,
30
151
  exp_tolerance=krylov_tolerance,
31
- ),
32
- ham,
33
- )
152
+ is_hermitian=True,
153
+ )
154
+ ctx.save_for_backward(omegas, deltas, phis, interaction_matrix, state)
155
+ ctx.dt = dt
156
+ ctx.tolerance = krylov_tolerance
157
+ return res, ham
158
+
159
+ # mypy complains and I don't know why
160
+ # backward expects same number of gradients as output of forward, gham is unused
161
+ @no_type_check
162
+ @staticmethod
163
+ def backward(ctx: Any, grad_state_out: torch.Tensor, gham: None) -> tuple[
164
+ None,
165
+ torch.Tensor | None,
166
+ torch.Tensor | None,
167
+ torch.Tensor | None,
168
+ torch.Tensor | None,
169
+ torch.Tensor | None,
170
+ None,
171
+ ]:
172
+ """
173
+ In the backward pass we receive a Tensor containing the gradient of the loss L
174
+ with respect to the output
175
+ |gψ(t+dt)〉= ∂L/∂|ψ(t+dt)〉,
176
+ and return the gradients of the loss with respect to the input tensors parameters
177
+ - gΩⱼ = ∂L/∂Ωⱼ =〈gψ(t+dt)|dU(H,∂H/∂Ωⱼ)|ψ(t)〉
178
+ - gΔⱼ = ∂L/∂Δⱼ = ...
179
+ - |gψ(t)〉= ∂L/∂|ψ(t)〉= exp(i dt H)|gψ(t+dt)〉
180
+
181
+ Args:
182
+ ctx (Any): context object to stash information for backward computation.
183
+ grad_state_out (torch.Tensor): |gψ(t+dt)〉
184
+
185
+ Return:
186
+ grad_omegas (torch.Tensor): 1D tensor of gradients with respect to Ωⱼ for each qubit.
187
+ grad_deltas (torch.Tensor): 1D tensor of gradients with respect to Δⱼ for each qubit.
188
+ grad_phis (torch.Tensor): 1D tensor of gradients with respect to φⱼ for each qubit.
189
+ grad_state_in (torch.Tensor): 1D tensor gradient with respect to the input state.
190
+
191
+ Notes:
192
+ Gradients are obtained by matching the total variations
193
+ 〈gψ(t+dt)|d|ψ(t+dt)〉= ∑ⱼgΔⱼ*dΔⱼ + ∑ⱼgΩⱼ*dΩⱼ + ∑ⱼgφ*dφⱼ +〈gψ(t)|d|ψ(t)〉 (1)
194
+
195
+ For the exponential map U = exp(-i dt H), differentiating reads:
196
+ d|ψ(t+dt)〉= dU|ψ(t)〉+ Ud|ψ(t)〉
197
+ dU = ∑ⱼdU(H,∂H/∂Δⱼ) + ∑ⱼdU(H,∂H/∂Ωⱼ) + ∑ⱼdU(H,∂H/∂φⱼ) (2)
198
+
199
+ where dU(H,E) is the Fréchet derivative of the exponential map
200
+ along the direction E:
201
+ - https://eprints.maths.manchester.ac.uk/1218/1/covered/MIMS_ep2008_26.pdf
202
+ - https://en.wikipedia.org/wiki/Derivative_of_the_exponential_map
203
+
204
+ Substituting (2) into (1) leads to the expressions of the gradients
205
+ with respect to the input tensors above.
206
+
207
+ Variations with respect to the Hamiltonian parameters are computed as
208
+ gΩ = 〈gψ(t+dt)|dU(H,∂H/∂Ω)|ψ(t)〉
209
+ = Tr( -i dt ∂H/∂Ω @ dU(H,|ψ(t)〉〈gψ(t+dt)|) ),
210
+ where under the trace sign, ∂H/∂Ω and |ψ(t)〉〈gψ(t+dt)| can be switched.
211
+
212
+ - The Fréchet derivative is computed in a Arnoldi-Gram-Schmidt
213
+ decomposition in the `double_krylov` method:
214
+ dU(H,|a〉〈b|) = Va @ dS @ Vb*
215
+ where Va,Vb are orthogonal Krylov basis associated
216
+ with |a〉and |b〉respectively.
217
+
218
+ - The action of the derivatives of the Hamiltonian with
219
+ respect to the input parameters are implemented separately in
220
+ - ∂H/∂Ω: `DHDOmegaSparse`
221
+ - ∂H/∂Δ: `DHDDeltaSparse`
222
+ - ∂H/∂φ: `DHDPhiSparse`
223
+
224
+ Then, the resulting gradient respect to a generic parameter reads:
225
+ gΩ = Tr( -i dt ∂H/∂Ω @ Vs @ dS @ Vg* )
226
+ """
227
+ omegas, deltas, phis, interaction_matrix, state = ctx.saved_tensors
228
+ dt = ctx.dt
229
+ tolerance = ctx.tolerance
230
+ nqubits = len(omegas)
231
+
232
+ grad_omegas, grad_deltas, grad_phis, grad_state_in = None, None, None, None
233
+
234
+ ham = RydbergHamiltonian(
235
+ omegas=omegas,
236
+ deltas=deltas,
237
+ phis=phis,
238
+ interaction_matrix=interaction_matrix,
239
+ device=state.device,
240
+ )
241
+
242
+ if any(ctx.needs_input_grad[1:4]):
243
+ op = lambda x: -1j * dt * (ham * x)
244
+ lanczos_vectors_state, dS, lanczos_vectors_grad = double_krylov(
245
+ op, state, grad_state_out, tolerance
246
+ )
247
+ # TODO: explore returning directly the basis in matrix form
248
+ Vs = torch.stack(lanczos_vectors_state)
249
+ del lanczos_vectors_state
250
+ Vg = torch.stack(lanczos_vectors_grad)
251
+ del lanczos_vectors_grad
252
+ e_l = dS.mT @ Vs
253
+
254
+ if ctx.needs_input_grad[1]:
255
+ grad_omegas = torch.zeros_like(omegas)
256
+ for i in range(nqubits):
257
+ # dh as per the docstring
258
+ dho = DHDOmegaSparse(i, e_l.device, nqubits, phis[i])
259
+ # compute the trace
260
+ v = dho @ e_l
261
+ grad_omegas[i] = (-1j * dt * torch.tensordot(Vg.conj(), v)).real
262
+
263
+ if ctx.needs_input_grad[2]:
264
+ grad_deltas = torch.zeros_like(deltas)
265
+ for i in range(nqubits):
266
+ # dh as per the docstring
267
+ dhd = DHDDeltaSparse(i, e_l.device, nqubits)
268
+ # compute the trace
269
+ v = dhd @ e_l
270
+ grad_deltas[i] = (-1j * dt * torch.tensordot(Vg.conj(), v)).real
271
+
272
+ if ctx.needs_input_grad[3]:
273
+ grad_phis = torch.zeros_like(phis)
274
+ for i in range(nqubits):
275
+ dhp = DHDPhiSparse(i, e_l.device, nqubits, omegas[i], phis[i])
276
+ v = dhp @ e_l
277
+ grad_phis[i] = (-1j * dt * torch.tensordot(Vg.conj(), v)).real
278
+
279
+ if ctx.needs_input_grad[5]:
280
+ op = lambda x: (1j * dt) * (ham * x)
281
+ grad_state_in = krylov_exp(op, grad_state_out, tolerance, tolerance)
282
+
283
+ return None, grad_omegas, grad_deltas, grad_phis, None, grad_state_in, None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: emu-sv
3
- Version: 2.0.4
3
+ Version: 2.1.1
4
4
  Summary: Pasqal State Vector based pulse emulator built on PyTorch
5
5
  Project-URL: Documentation, https://pasqal-io.github.io/emulators/
6
6
  Project-URL: Repository, https://github.com/pasqal-io/emulators
@@ -25,7 +25,7 @@ Classifier: Programming Language :: Python :: 3.10
25
25
  Classifier: Programming Language :: Python :: Implementation :: CPython
26
26
  Classifier: Programming Language :: Python :: Implementation :: PyPy
27
27
  Requires-Python: >=3.10
28
- Requires-Dist: emu-base==2.0.4
28
+ Requires-Dist: emu-base==2.1.1
29
29
  Description-Content-Type: text/markdown
30
30
 
31
31
  <div align="center">
@@ -1,14 +1,14 @@
1
- emu_sv/__init__.py,sha256=Tzc6RlABZ1ZVKt1mPUes9djq0eiK6FTgPagEHuFhF6Q,702
1
+ emu_sv/__init__.py,sha256=907RfJcGq7Nq6ayS0bsE5q7vrMTTEf41FBIXt651lvY,702
2
2
  emu_sv/custom_callback_implementations.py,sha256=zvsSiDIc56gwybKq87VFZyKsniTDye6-oFd2-R0shpg,3447
3
3
  emu_sv/dense_operator.py,sha256=NfgzVpnNitc5ZSM4RlfpAc5Ls2wFPNsTxdeFdhJSg1o,6909
4
4
  emu_sv/density_matrix_state.py,sha256=6UBLUXaJaUdzOhflrKolcnH8737JszX7sry1WmbyakI,6993
5
5
  emu_sv/hamiltonian.py,sha256=CqNGuWJlO2ZljK47wt130s-5uKiOldQUsC3tjwk1mKA,6106
6
6
  emu_sv/lindblad_operator.py,sha256=KmaNCahpAi8SIXh-TrFD-ggmGpa1zklp8DMWVK9Y_J4,7433
7
7
  emu_sv/state_vector.py,sha256=lqSbv4BMtDtgY0YUPuhIUNJxrlVa7vUWuN_XqwpG5sQ,9823
8
- emu_sv/sv_backend.py,sha256=AkEtI6-SY20D0ORro3Kv8tHDRUc8gxejSiRa6d--vBE,4452
9
- emu_sv/sv_config.py,sha256=QRy0VbCugmY6TQZ48nD6RxPJbpu0wzN7-E1Sud7YxLQ,5106
10
- emu_sv/time_evolution.py,sha256=obV7DcHot0jtnEmjR1ilYiSyDcJ5rTvThRB8hFjP-2s,797
8
+ emu_sv/sv_backend.py,sha256=FrSBG8pacgucZ4MHKApfPh-VEFApsjnBzVb03GCcTpc,4493
9
+ emu_sv/sv_config.py,sha256=q-cnyWwr_nNbpXI_m5vG51Wz_tyV5TL5M28uP2WctP4,5412
10
+ emu_sv/time_evolution.py,sha256=pTmWzgI4AboRYklvCz4OLQNNN_RB1bOqJBXdsrFf6jk,10867
11
11
  emu_sv/utils.py,sha256=-axfQ2tqw0C7I9yw-28g7lytyk373DNBjDALh4kLBrM,302
12
- emu_sv-2.0.4.dist-info/METADATA,sha256=paHoVgW22OONoxvlwaypB-UF01zi0giZqUtvAz7fhmw,3513
13
- emu_sv-2.0.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
14
- emu_sv-2.0.4.dist-info/RECORD,,
12
+ emu_sv-2.1.1.dist-info/METADATA,sha256=8QGaiNu0nRthfw2O717nMMoO60LCLnVaVrAPjp_t7dk,3513
13
+ emu_sv-2.1.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
14
+ emu_sv-2.1.1.dist-info/RECORD,,
File without changes