emu-mps 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emu_mps/__init__.py +38 -0
- emu_mps/algebra.py +151 -0
- emu_mps/hamiltonian.py +449 -0
- emu_mps/mpo.py +243 -0
- emu_mps/mps.py +528 -0
- emu_mps/mps_backend.py +35 -0
- emu_mps/mps_backend_impl.py +525 -0
- emu_mps/mps_config.py +64 -0
- emu_mps/noise.py +29 -0
- emu_mps/tdvp.py +209 -0
- emu_mps/utils.py +258 -0
- emu_mps-1.2.1.dist-info/METADATA +133 -0
- emu_mps-1.2.1.dist-info/RECORD +14 -0
- emu_mps-1.2.1.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,525 @@
|
|
|
1
|
+
import math
|
|
2
|
+
import random
|
|
3
|
+
from resource import RUSAGE_SELF, getrusage
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
import torch
|
|
7
|
+
import time
|
|
8
|
+
from pulser import Sequence
|
|
9
|
+
|
|
10
|
+
from emu_base import Results, State, PulserData
|
|
11
|
+
from emu_base.math.brents_root_finding import BrentsRootFinder
|
|
12
|
+
from emu_mps.hamiltonian import make_H, update_H
|
|
13
|
+
from emu_mps.mpo import MPO
|
|
14
|
+
from emu_mps.mps import MPS
|
|
15
|
+
from emu_mps.mps_config import MPSConfig
|
|
16
|
+
from emu_mps.noise import compute_noise_from_lindbladians, pick_well_prepared_qubits
|
|
17
|
+
from emu_mps.tdvp import (
|
|
18
|
+
evolve_single,
|
|
19
|
+
evolve_pair,
|
|
20
|
+
EvolveConfig,
|
|
21
|
+
new_right_bath,
|
|
22
|
+
right_baths,
|
|
23
|
+
)
|
|
24
|
+
from emu_mps.utils import (
|
|
25
|
+
extended_mpo_factors,
|
|
26
|
+
extended_mps_factors,
|
|
27
|
+
get_extended_site_index,
|
|
28
|
+
new_left_bath,
|
|
29
|
+
)
|
|
30
|
+
from enum import Enum, auto
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class SwipeDirection(Enum):
|
|
34
|
+
LEFT_TO_RIGHT = auto()
|
|
35
|
+
RIGHT_TO_LEFT = auto()
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class MPSBackendImpl:
|
|
39
|
+
current_time: float = (
|
|
40
|
+
0.0 # While dt is an integer, noisy collapse can happen at non-integer times.
|
|
41
|
+
)
|
|
42
|
+
well_prepared_qubits_filter: Optional[list[bool]]
|
|
43
|
+
hamiltonian: MPO
|
|
44
|
+
state: MPS
|
|
45
|
+
right_baths: list[torch.Tensor]
|
|
46
|
+
tdvp_index: int
|
|
47
|
+
swipe_direction: SwipeDirection
|
|
48
|
+
timestep_index: int
|
|
49
|
+
target_time: float
|
|
50
|
+
results: Results
|
|
51
|
+
|
|
52
|
+
def __init__(self, mps_config: MPSConfig, pulser_data: PulserData):
|
|
53
|
+
self.config = mps_config
|
|
54
|
+
self.target_time = float(self.config.dt)
|
|
55
|
+
self.qubit_count = pulser_data.qubit_count
|
|
56
|
+
assert self.qubit_count >= 2
|
|
57
|
+
self.omega = pulser_data.omega
|
|
58
|
+
self.delta = pulser_data.delta
|
|
59
|
+
self.phi = pulser_data.phi
|
|
60
|
+
self.timestep_count: int = self.omega.shape[0]
|
|
61
|
+
self.has_lindblad_noise = pulser_data.has_lindblad_noise
|
|
62
|
+
self.lindblad_noise = torch.zeros(2, 2, dtype=torch.complex128)
|
|
63
|
+
self.full_interaction_matrix = pulser_data.full_interaction_matrix
|
|
64
|
+
self.masked_interaction_matrix = pulser_data.masked_interaction_matrix
|
|
65
|
+
self.hamiltonian_type = pulser_data.hamiltonian_type
|
|
66
|
+
self.slm_end_time = pulser_data.slm_end_time
|
|
67
|
+
self.is_masked = self.slm_end_time > 0.0
|
|
68
|
+
self.left_baths: list[torch.Tensor]
|
|
69
|
+
self.time = time.time()
|
|
70
|
+
self.swipe_direction = SwipeDirection.LEFT_TO_RIGHT
|
|
71
|
+
self.tdvp_index = 0
|
|
72
|
+
self.timestep_index = 0
|
|
73
|
+
self.results = Results()
|
|
74
|
+
|
|
75
|
+
self.evolve_config = EvolveConfig(
|
|
76
|
+
exp_tolerance=self.config.precision * self.config.extra_krylov_tolerance,
|
|
77
|
+
norm_tolerance=self.config.precision * self.config.extra_krylov_tolerance,
|
|
78
|
+
max_krylov_dim=self.config.max_krylov_dim,
|
|
79
|
+
is_hermitian=not self.has_lindblad_noise,
|
|
80
|
+
max_error=self.config.precision,
|
|
81
|
+
max_rank=self.config.max_bond_dim,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
def init_dark_qubits(self) -> None:
|
|
85
|
+
has_state_preparation_error: bool = (
|
|
86
|
+
self.config.noise_model is not None
|
|
87
|
+
and self.config.noise_model.state_prep_error > 0.0
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
self.well_prepared_qubits_filter = (
|
|
91
|
+
pick_well_prepared_qubits(
|
|
92
|
+
self.config.noise_model.state_prep_error, self.qubit_count
|
|
93
|
+
)
|
|
94
|
+
if has_state_preparation_error
|
|
95
|
+
else None
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
if self.well_prepared_qubits_filter is not None:
|
|
99
|
+
self.qubit_count = sum(1 for x in self.well_prepared_qubits_filter if x)
|
|
100
|
+
|
|
101
|
+
self.full_interaction_matrix = self.full_interaction_matrix[
|
|
102
|
+
self.well_prepared_qubits_filter, :
|
|
103
|
+
][:, self.well_prepared_qubits_filter]
|
|
104
|
+
self.masked_interaction_matrix = self.masked_interaction_matrix[
|
|
105
|
+
self.well_prepared_qubits_filter, :
|
|
106
|
+
][:, self.well_prepared_qubits_filter]
|
|
107
|
+
self.omega = self.omega[:, self.well_prepared_qubits_filter]
|
|
108
|
+
self.delta = self.delta[:, self.well_prepared_qubits_filter]
|
|
109
|
+
self.phi = self.phi[:, self.well_prepared_qubits_filter]
|
|
110
|
+
|
|
111
|
+
def init_initial_state(self, initial_state: State | None = None) -> None:
|
|
112
|
+
if initial_state is None:
|
|
113
|
+
self.state = MPS.make(
|
|
114
|
+
self.qubit_count,
|
|
115
|
+
precision=self.config.precision,
|
|
116
|
+
max_bond_dim=self.config.max_bond_dim,
|
|
117
|
+
num_gpus_to_use=self.config.num_gpus_to_use,
|
|
118
|
+
)
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
if self.well_prepared_qubits_filter is not None:
|
|
122
|
+
raise NotImplementedError(
|
|
123
|
+
"Specifying the initial state in the presence "
|
|
124
|
+
"of state preparation errors is currently not implemented."
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
assert isinstance(initial_state, MPS)
|
|
128
|
+
initial_state = MPS(
|
|
129
|
+
# Deep copy of every tensor of the initial state.
|
|
130
|
+
[f.clone().detach() for f in initial_state.factors],
|
|
131
|
+
precision=self.config.precision,
|
|
132
|
+
max_bond_dim=self.config.max_bond_dim,
|
|
133
|
+
num_gpus_to_use=self.config.num_gpus_to_use,
|
|
134
|
+
)
|
|
135
|
+
initial_state.truncate()
|
|
136
|
+
initial_state *= 1 / initial_state.norm()
|
|
137
|
+
self.state = initial_state
|
|
138
|
+
|
|
139
|
+
def init_hamiltonian(self) -> None:
|
|
140
|
+
"""
|
|
141
|
+
Must be called AFTER init_dark_qubits otherwise,
|
|
142
|
+
too many factors are put in the Hamiltonian
|
|
143
|
+
"""
|
|
144
|
+
self.hamiltonian = make_H(
|
|
145
|
+
interaction_matrix=self.masked_interaction_matrix
|
|
146
|
+
if self.is_masked
|
|
147
|
+
else self.full_interaction_matrix,
|
|
148
|
+
hamiltonian_type=self.hamiltonian_type,
|
|
149
|
+
num_gpus_to_use=self.config.num_gpus_to_use,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
update_H(
|
|
153
|
+
hamiltonian=self.hamiltonian,
|
|
154
|
+
omega=self.omega[self.timestep_index, :],
|
|
155
|
+
delta=self.delta[self.timestep_index, :],
|
|
156
|
+
phi=self.phi[self.timestep_index, :],
|
|
157
|
+
noise=self.lindblad_noise,
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
def init_baths(self) -> None:
|
|
161
|
+
self.left_baths = [
|
|
162
|
+
torch.ones(
|
|
163
|
+
1, 1, 1, dtype=torch.complex128, device=self.state.factors[0].device
|
|
164
|
+
)
|
|
165
|
+
]
|
|
166
|
+
self.right_baths = right_baths(self.state, self.hamiltonian, final_qubit=2)
|
|
167
|
+
assert len(self.right_baths) == self.qubit_count - 1
|
|
168
|
+
|
|
169
|
+
def init(self) -> None:
|
|
170
|
+
self.init_dark_qubits()
|
|
171
|
+
self.init_initial_state(self.config.initial_state)
|
|
172
|
+
self.init_hamiltonian()
|
|
173
|
+
self.init_baths()
|
|
174
|
+
|
|
175
|
+
def is_finished(self) -> bool:
|
|
176
|
+
return self.timestep_index >= self.timestep_count
|
|
177
|
+
|
|
178
|
+
def _evolve(
|
|
179
|
+
self, *indices: int, dt: float, orth_center_right: Optional[bool] = None
|
|
180
|
+
) -> None:
|
|
181
|
+
"""
|
|
182
|
+
Time-evolve the state's tensors located at the given 1 or 2 indices by dt,
|
|
183
|
+
using the baths stored in self.left_baths and self.right_baths.
|
|
184
|
+
When 2 indices are given, they need to be consecutive.
|
|
185
|
+
Updates the state's orthogonality center according to orth_center_right.
|
|
186
|
+
"""
|
|
187
|
+
assert 1 <= len(indices) <= 2
|
|
188
|
+
|
|
189
|
+
baths = (self.left_baths[-1], self.right_baths[-1])
|
|
190
|
+
|
|
191
|
+
if len(indices) == 1:
|
|
192
|
+
assert orth_center_right is None
|
|
193
|
+
(index,) = indices
|
|
194
|
+
assert self.state.orthogonality_center == index
|
|
195
|
+
|
|
196
|
+
self.state.factors[index] = evolve_single(
|
|
197
|
+
state_factor=self.state.factors[index],
|
|
198
|
+
ham_factor=self.hamiltonian.factors[index],
|
|
199
|
+
baths=baths,
|
|
200
|
+
dt=dt,
|
|
201
|
+
config=self.evolve_config,
|
|
202
|
+
)
|
|
203
|
+
else:
|
|
204
|
+
assert orth_center_right is not None
|
|
205
|
+
l, r = indices
|
|
206
|
+
assert r == l + 1, "Indices need to be consecutive"
|
|
207
|
+
assert self.state.orthogonality_center in {l, r}, (
|
|
208
|
+
"State needs to be orthogonalized" " on one of the evolved indices"
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
self.state.factors[l : r + 1] = evolve_pair(
|
|
212
|
+
state_factors=self.state.factors[l : r + 1],
|
|
213
|
+
ham_factors=self.hamiltonian.factors[l : r + 1],
|
|
214
|
+
baths=baths,
|
|
215
|
+
dt=dt,
|
|
216
|
+
config=self.evolve_config,
|
|
217
|
+
orth_center_right=orth_center_right,
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
self.state.orthogonality_center = r if orth_center_right else l
|
|
221
|
+
|
|
222
|
+
def progress(self) -> None:
|
|
223
|
+
"""
|
|
224
|
+
Do one unit of simulation work given the current state.
|
|
225
|
+
Update the state accordingly.
|
|
226
|
+
The state of the simulation is stored in self.tdvp_index and self.swipe_direction.
|
|
227
|
+
"""
|
|
228
|
+
if self.is_finished():
|
|
229
|
+
return
|
|
230
|
+
|
|
231
|
+
delta_time = self.target_time - self.current_time
|
|
232
|
+
|
|
233
|
+
assert self.qubit_count >= 1
|
|
234
|
+
if 1 <= self.qubit_count <= 2:
|
|
235
|
+
# Corner case: only 1 or 2 qubits
|
|
236
|
+
assert self.swipe_direction == SwipeDirection.LEFT_TO_RIGHT
|
|
237
|
+
assert self.tdvp_index == 0
|
|
238
|
+
|
|
239
|
+
if self.qubit_count == 1:
|
|
240
|
+
self._evolve(0, dt=delta_time)
|
|
241
|
+
else:
|
|
242
|
+
self._evolve(0, 1, dt=delta_time, orth_center_right=False)
|
|
243
|
+
|
|
244
|
+
self.tdvp_complete()
|
|
245
|
+
|
|
246
|
+
elif (
|
|
247
|
+
self.tdvp_index < self.qubit_count - 2
|
|
248
|
+
and self.swipe_direction == SwipeDirection.LEFT_TO_RIGHT
|
|
249
|
+
):
|
|
250
|
+
# Left-to-right swipe of TDVP
|
|
251
|
+
self._evolve(
|
|
252
|
+
self.tdvp_index,
|
|
253
|
+
self.tdvp_index + 1,
|
|
254
|
+
dt=delta_time / 2,
|
|
255
|
+
orth_center_right=True,
|
|
256
|
+
)
|
|
257
|
+
self.left_baths.append(
|
|
258
|
+
new_left_bath(
|
|
259
|
+
self.left_baths[-1],
|
|
260
|
+
self.state.factors[self.tdvp_index],
|
|
261
|
+
self.hamiltonian.factors[self.tdvp_index],
|
|
262
|
+
)
|
|
263
|
+
)
|
|
264
|
+
self._evolve(self.tdvp_index + 1, dt=-delta_time / 2)
|
|
265
|
+
self.right_baths.pop()
|
|
266
|
+
self.tdvp_index += 1
|
|
267
|
+
|
|
268
|
+
elif (
|
|
269
|
+
self.tdvp_index == self.qubit_count - 2
|
|
270
|
+
and self.swipe_direction == SwipeDirection.LEFT_TO_RIGHT
|
|
271
|
+
):
|
|
272
|
+
# Time-evolution of the rightmost 2 tensors
|
|
273
|
+
self._evolve(
|
|
274
|
+
self.tdvp_index,
|
|
275
|
+
self.tdvp_index + 1,
|
|
276
|
+
dt=delta_time,
|
|
277
|
+
orth_center_right=False,
|
|
278
|
+
)
|
|
279
|
+
self.swipe_direction = SwipeDirection.RIGHT_TO_LEFT
|
|
280
|
+
|
|
281
|
+
elif (
|
|
282
|
+
1 <= self.tdvp_index and self.swipe_direction == SwipeDirection.RIGHT_TO_LEFT
|
|
283
|
+
):
|
|
284
|
+
# Right-to-left swipe of TDVP
|
|
285
|
+
assert self.tdvp_index <= self.qubit_count - 2
|
|
286
|
+
self.right_baths.append(
|
|
287
|
+
new_right_bath(
|
|
288
|
+
self.right_baths[-1],
|
|
289
|
+
self.state.factors[self.tdvp_index + 1],
|
|
290
|
+
self.hamiltonian.factors[self.tdvp_index + 1],
|
|
291
|
+
)
|
|
292
|
+
)
|
|
293
|
+
if not self.has_lindblad_noise:
|
|
294
|
+
# Free memory because it won't be used anymore
|
|
295
|
+
self.right_baths[-2] = None
|
|
296
|
+
|
|
297
|
+
self._evolve(self.tdvp_index, dt=-delta_time / 2)
|
|
298
|
+
self.left_baths.pop()
|
|
299
|
+
|
|
300
|
+
self._evolve(
|
|
301
|
+
self.tdvp_index - 1,
|
|
302
|
+
self.tdvp_index,
|
|
303
|
+
dt=delta_time / 2,
|
|
304
|
+
orth_center_right=False,
|
|
305
|
+
)
|
|
306
|
+
self.tdvp_index -= 1
|
|
307
|
+
|
|
308
|
+
if self.tdvp_index == 0:
|
|
309
|
+
self.tdvp_complete()
|
|
310
|
+
self.swipe_direction = SwipeDirection.LEFT_TO_RIGHT
|
|
311
|
+
|
|
312
|
+
else:
|
|
313
|
+
raise Exception("Didn't expect this")
|
|
314
|
+
|
|
315
|
+
# TODO: checkpoint/autosave here
|
|
316
|
+
|
|
317
|
+
def tdvp_complete(self) -> None:
|
|
318
|
+
self.current_time = self.target_time
|
|
319
|
+
self.timestep_complete()
|
|
320
|
+
|
|
321
|
+
def timestep_complete(self) -> None:
|
|
322
|
+
self.fill_results()
|
|
323
|
+
self.timestep_index += 1
|
|
324
|
+
self.target_time = float((self.timestep_index + 1) * self.config.dt)
|
|
325
|
+
if self.is_masked and self.current_time >= self.slm_end_time:
|
|
326
|
+
self.is_masked = False
|
|
327
|
+
self.hamiltonian = make_H(
|
|
328
|
+
interaction_matrix=self.full_interaction_matrix,
|
|
329
|
+
hamiltonian_type=self.hamiltonian_type,
|
|
330
|
+
num_gpus_to_use=self.config.num_gpus_to_use,
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
if not self.is_finished():
|
|
334
|
+
update_H(
|
|
335
|
+
hamiltonian=self.hamiltonian,
|
|
336
|
+
omega=self.omega[self.timestep_index, :],
|
|
337
|
+
delta=self.delta[self.timestep_index, :],
|
|
338
|
+
phi=self.phi[self.timestep_index, :],
|
|
339
|
+
noise=self.lindblad_noise,
|
|
340
|
+
)
|
|
341
|
+
self.init_baths()
|
|
342
|
+
|
|
343
|
+
self.log_step_statistics(duration=time.time() - self.time)
|
|
344
|
+
self.time = time.time()
|
|
345
|
+
|
|
346
|
+
def fill_results(self) -> None:
|
|
347
|
+
normalized_state = 1 / self.state.norm() * self.state
|
|
348
|
+
|
|
349
|
+
if self.well_prepared_qubits_filter is None:
|
|
350
|
+
for callback in self.config.callbacks:
|
|
351
|
+
callback(
|
|
352
|
+
self.config,
|
|
353
|
+
self.current_time,
|
|
354
|
+
normalized_state,
|
|
355
|
+
self.hamiltonian,
|
|
356
|
+
self.results,
|
|
357
|
+
)
|
|
358
|
+
return
|
|
359
|
+
|
|
360
|
+
full_mpo, full_state = None, None
|
|
361
|
+
for callback in self.config.callbacks:
|
|
362
|
+
if self.current_time not in callback.evaluation_times:
|
|
363
|
+
continue
|
|
364
|
+
|
|
365
|
+
if full_mpo is None or full_state is None:
|
|
366
|
+
# Only do this potentially expensive step once and when needed.
|
|
367
|
+
full_mpo = MPO(
|
|
368
|
+
extended_mpo_factors(
|
|
369
|
+
self.hamiltonian.factors, self.well_prepared_qubits_filter
|
|
370
|
+
)
|
|
371
|
+
)
|
|
372
|
+
full_state = MPS(
|
|
373
|
+
extended_mps_factors(
|
|
374
|
+
normalized_state.factors, self.well_prepared_qubits_filter
|
|
375
|
+
),
|
|
376
|
+
num_gpus_to_use=None, # Keep the already assigned devices.
|
|
377
|
+
orthogonality_center=get_extended_site_index(
|
|
378
|
+
self.well_prepared_qubits_filter,
|
|
379
|
+
normalized_state.orthogonality_center,
|
|
380
|
+
),
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
callback(self.config, self.current_time, full_state, full_mpo, self.results)
|
|
384
|
+
|
|
385
|
+
def log_step_statistics(self, *, duration: float) -> None:
|
|
386
|
+
if self.state.factors[0].is_cuda:
|
|
387
|
+
max_mem_per_device = (
|
|
388
|
+
torch.cuda.max_memory_allocated(device) * 1e-6
|
|
389
|
+
for device in range(torch.cuda.device_count())
|
|
390
|
+
)
|
|
391
|
+
max_mem = max(max_mem_per_device)
|
|
392
|
+
else:
|
|
393
|
+
max_mem = getrusage(RUSAGE_SELF).ru_maxrss * 1e-3
|
|
394
|
+
|
|
395
|
+
self.config.logger.info(
|
|
396
|
+
f"step = {self.timestep_index}/{self.timestep_count}, "
|
|
397
|
+
+ f"χ = {self.state.get_max_bond_dim()}, "
|
|
398
|
+
+ f"|ψ| = {self.state.get_memory_footprint():.3f} MB, "
|
|
399
|
+
+ f"RSS = {max_mem:.3f} MB, "
|
|
400
|
+
+ f"Δt = {duration:.3f} s"
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
if self.results.statistics is None:
|
|
404
|
+
assert self.timestep_index == 1
|
|
405
|
+
self.results.statistics = {"steps": []}
|
|
406
|
+
|
|
407
|
+
assert "steps" in self.results.statistics
|
|
408
|
+
assert len(self.results.statistics["steps"]) == self.timestep_index - 1
|
|
409
|
+
|
|
410
|
+
self.results.statistics["steps"].append(
|
|
411
|
+
{
|
|
412
|
+
"max_bond_dimension": self.state.get_max_bond_dim(),
|
|
413
|
+
"memory_footprint": self.state.get_memory_footprint(),
|
|
414
|
+
"RSS": max_mem,
|
|
415
|
+
"duration": duration,
|
|
416
|
+
}
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
class NoisyMPSBackendImpl(MPSBackendImpl):
|
|
421
|
+
"""
|
|
422
|
+
Version of MPSBackendImpl with non-zero lindbladian noise.
|
|
423
|
+
Implements the Monte-Carlo Wave Function jump method.
|
|
424
|
+
"""
|
|
425
|
+
|
|
426
|
+
jump_threshold: float
|
|
427
|
+
aggregated_lindblad_ops: Optional[torch.Tensor]
|
|
428
|
+
norm_gap_before_jump: float
|
|
429
|
+
root_finder: Optional[BrentsRootFinder]
|
|
430
|
+
|
|
431
|
+
def __init__(self, config: MPSConfig, pulser_data: PulserData):
|
|
432
|
+
super().__init__(config, pulser_data)
|
|
433
|
+
self.aggregated_lindblad_ops = None
|
|
434
|
+
self.lindblad_ops = pulser_data.lindblad_ops
|
|
435
|
+
self.root_finder = None
|
|
436
|
+
|
|
437
|
+
assert self.has_lindblad_noise
|
|
438
|
+
|
|
439
|
+
def init_lindblad_noise(self) -> None:
|
|
440
|
+
stacked = torch.stack(self.lindblad_ops)
|
|
441
|
+
# The below is used for batch computation of noise collapse weights.
|
|
442
|
+
self.aggregated_lindblad_ops = stacked.conj().transpose(1, 2) @ stacked
|
|
443
|
+
|
|
444
|
+
self.lindblad_noise = compute_noise_from_lindbladians(self.lindblad_ops)
|
|
445
|
+
self.jump_threshold = random.random()
|
|
446
|
+
self.norm_gap_before_jump = self.state.norm() ** 2 - self.jump_threshold
|
|
447
|
+
|
|
448
|
+
def init(self) -> None:
|
|
449
|
+
super().init()
|
|
450
|
+
self.init_lindblad_noise()
|
|
451
|
+
|
|
452
|
+
def tdvp_complete(self) -> None:
|
|
453
|
+
previous_time = self.current_time
|
|
454
|
+
self.current_time = self.target_time
|
|
455
|
+
previous_norm_gap_before_jump = self.norm_gap_before_jump
|
|
456
|
+
self.norm_gap_before_jump = self.state.norm() ** 2 - self.jump_threshold
|
|
457
|
+
|
|
458
|
+
if self.root_finder is None:
|
|
459
|
+
# No quantum jump location finding in progress
|
|
460
|
+
if self.norm_gap_before_jump < 0:
|
|
461
|
+
# Initiate quantum jump location finding
|
|
462
|
+
# Jump occurs when norm_gap_before_jump ~ 0
|
|
463
|
+
self.root_finder = BrentsRootFinder(
|
|
464
|
+
start=previous_time,
|
|
465
|
+
end=self.current_time,
|
|
466
|
+
f_start=previous_norm_gap_before_jump,
|
|
467
|
+
f_end=self.norm_gap_before_jump,
|
|
468
|
+
epsilon=1,
|
|
469
|
+
)
|
|
470
|
+
self.target_time = self.root_finder.get_next_abscissa()
|
|
471
|
+
else:
|
|
472
|
+
self.timestep_complete()
|
|
473
|
+
|
|
474
|
+
return
|
|
475
|
+
|
|
476
|
+
self.norm_gap_before_jump = self.state.norm() ** 2 - self.jump_threshold
|
|
477
|
+
self.root_finder.provide_ordinate(self.current_time, self.norm_gap_before_jump)
|
|
478
|
+
|
|
479
|
+
if self.root_finder.is_converged(tolerance=1):
|
|
480
|
+
self.do_random_quantum_jump()
|
|
481
|
+
self.target_time = (self.timestep_index + 1) * self.config.dt
|
|
482
|
+
self.root_finder = None
|
|
483
|
+
else:
|
|
484
|
+
self.target_time = self.root_finder.get_next_abscissa()
|
|
485
|
+
|
|
486
|
+
def do_random_quantum_jump(self) -> None:
|
|
487
|
+
jump_operator_weights = self.state.expect_batch(self.aggregated_lindblad_ops).real
|
|
488
|
+
jumped_qubit_index, jump_operator = random.choices(
|
|
489
|
+
[
|
|
490
|
+
(qubit, op)
|
|
491
|
+
for qubit in range(self.state.num_sites)
|
|
492
|
+
for op in self.lindblad_ops
|
|
493
|
+
],
|
|
494
|
+
weights=jump_operator_weights.reshape(-1),
|
|
495
|
+
)[0]
|
|
496
|
+
|
|
497
|
+
self.state.apply(jumped_qubit_index, jump_operator)
|
|
498
|
+
self.state.orthogonalize(0)
|
|
499
|
+
self.state *= 1 / self.state.norm()
|
|
500
|
+
|
|
501
|
+
norm_after_normalizing = self.state.norm()
|
|
502
|
+
assert math.isclose(norm_after_normalizing, 1, abs_tol=1e-10)
|
|
503
|
+
self.jump_threshold = random.uniform(0.0, norm_after_normalizing**2)
|
|
504
|
+
self.norm_gap_before_jump = norm_after_normalizing**2 - self.jump_threshold
|
|
505
|
+
|
|
506
|
+
def fill_results(self) -> None:
|
|
507
|
+
# Remove the noise from self.hamiltonian for the callbacks.
|
|
508
|
+
# Since update_H is called at the start of do_time_step this is safe.
|
|
509
|
+
update_H(
|
|
510
|
+
hamiltonian=self.hamiltonian,
|
|
511
|
+
omega=self.omega[self.timestep_index - 1, :], # Meh
|
|
512
|
+
delta=self.delta[self.timestep_index - 1, :],
|
|
513
|
+
phi=self.phi[self.timestep_index - 1, :],
|
|
514
|
+
)
|
|
515
|
+
|
|
516
|
+
super().fill_results()
|
|
517
|
+
|
|
518
|
+
|
|
519
|
+
def create_impl(sequence: Sequence, config: MPSConfig) -> MPSBackendImpl:
|
|
520
|
+
pulser_data = PulserData(sequence=sequence, config=config, dt=config.dt)
|
|
521
|
+
|
|
522
|
+
if pulser_data.has_lindblad_noise:
|
|
523
|
+
return NoisyMPSBackendImpl(config, pulser_data)
|
|
524
|
+
|
|
525
|
+
return MPSBackendImpl(config, pulser_data)
|
emu_mps/mps_config.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from emu_base import BackendConfig, State
|
|
4
|
+
from emu_mps.utils import DEVICE_COUNT
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class MPSConfig(BackendConfig):
|
|
8
|
+
"""
|
|
9
|
+
The configuration of the emu-ct MPSBackend. The kwargs passed to this class
|
|
10
|
+
are passed on to the base class.
|
|
11
|
+
See the API for that class for a list of available options.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
initial_state: the initial state to use in the simulation
|
|
15
|
+
dt: the timestep size that the solver uses. Note that observables are
|
|
16
|
+
only calculated if the evaluation_times are divisible by dt.
|
|
17
|
+
precision: up to what precision the state is truncated
|
|
18
|
+
max_bond_dim: the maximum bond dimension that the state is allowed to have.
|
|
19
|
+
max_krylov_dim:
|
|
20
|
+
the size of the krylov subspace that the Lanczos algorithm maximally builds
|
|
21
|
+
extra_krylov_tolerance:
|
|
22
|
+
the Lanczos algorithm uses this*precision as the convergence tolerance
|
|
23
|
+
num_gpus_to_use: during the simulation, distribute the state over this many GPUs
|
|
24
|
+
0=all factors to cpu. As shown in the benchmarks, using multiple GPUs might
|
|
25
|
+
alleviate memory pressure per GPU, but the runtime should be similar.
|
|
26
|
+
kwargs: arguments that are passed to the base class
|
|
27
|
+
|
|
28
|
+
Examples:
|
|
29
|
+
>>> num_gpus_to_use = 2 #use 2 gpus if available, otherwise 1 or cpu
|
|
30
|
+
>>> dt = 1 #this will impact the runtime
|
|
31
|
+
>>> precision = 1e-6 #smaller dt requires better precision, generally
|
|
32
|
+
>>> MPSConfig(num_gpus_to_use=num_gpus_to_use, dt=dt, precision=precision,
|
|
33
|
+
>>> with_modulation=True) #the last arg is taken from the base class
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
*,
|
|
39
|
+
initial_state: State | None = None,
|
|
40
|
+
dt: int = 10,
|
|
41
|
+
precision: float = 1e-5,
|
|
42
|
+
max_bond_dim: int = 1024,
|
|
43
|
+
max_krylov_dim: int = 100,
|
|
44
|
+
extra_krylov_tolerance: float = 1e-3,
|
|
45
|
+
num_gpus_to_use: int = DEVICE_COUNT,
|
|
46
|
+
**kwargs: Any,
|
|
47
|
+
):
|
|
48
|
+
super().__init__(**kwargs)
|
|
49
|
+
self.initial_state = initial_state
|
|
50
|
+
self.dt = dt
|
|
51
|
+
self.precision = precision
|
|
52
|
+
self.max_bond_dim = max_bond_dim
|
|
53
|
+
self.max_krylov_dim = max_krylov_dim
|
|
54
|
+
self.num_gpus_to_use = num_gpus_to_use
|
|
55
|
+
self.extra_krylov_tolerance = extra_krylov_tolerance
|
|
56
|
+
|
|
57
|
+
if self.noise_model is not None:
|
|
58
|
+
if "doppler" in self.noise_model.noise_types:
|
|
59
|
+
raise NotImplementedError("Unsupported noise type: doppler")
|
|
60
|
+
if (
|
|
61
|
+
"amplitude" in self.noise_model.noise_types
|
|
62
|
+
and self.noise_model.amp_sigma != 0.0
|
|
63
|
+
):
|
|
64
|
+
raise NotImplementedError("Unsupported noise type: amp_sigma")
|
emu_mps/noise.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import random
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def compute_noise_from_lindbladians(lindbladians: list[torch.Tensor]) -> torch.Tensor:
|
|
6
|
+
"""
|
|
7
|
+
Compute the single-qubit Hamiltonian noise term -0.5i∑L†L from all the given lindbladians.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
assert all(
|
|
11
|
+
lindbladian.shape == (2, 2) for lindbladian in lindbladians
|
|
12
|
+
), "Only single-qubit lindblad operators are supported"
|
|
13
|
+
|
|
14
|
+
return (
|
|
15
|
+
-1j
|
|
16
|
+
/ 2.0
|
|
17
|
+
* sum(
|
|
18
|
+
(lindbladian.T.conj() @ lindbladian for lindbladian in lindbladians),
|
|
19
|
+
start=torch.zeros(2, 2, dtype=torch.complex128),
|
|
20
|
+
)
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def pick_well_prepared_qubits(eta: float, n: int) -> list[bool]:
|
|
25
|
+
"""
|
|
26
|
+
Randomly pick n booleans such that ℙ(False) = eta.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
return [random.random() > eta for _ in range(n)]
|