emu-mps 2.6.0__py3-none-any.whl → 2.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emu_mps/__init__.py +1 -1
- emu_mps/mpo.py +17 -2
- emu_mps/mps.py +42 -28
- emu_mps/mps_backend.py +22 -11
- emu_mps/mps_backend_impl.py +87 -65
- emu_mps/mps_config.py +16 -16
- emu_mps/solver.py +11 -0
- {emu_mps-2.6.0.dist-info → emu_mps-2.7.0.dist-info}/METADATA +2 -2
- {emu_mps-2.6.0.dist-info → emu_mps-2.7.0.dist-info}/RECORD +10 -10
- {emu_mps-2.6.0.dist-info → emu_mps-2.7.0.dist-info}/WHEEL +0 -0
emu_mps/__init__.py
CHANGED
emu_mps/mpo.py
CHANGED
|
@@ -17,10 +17,25 @@ class MPO(Operator[complex, torch.Tensor, MPS]):
|
|
|
17
17
|
"""
|
|
18
18
|
Matrix Product Operator.
|
|
19
19
|
|
|
20
|
-
Each tensor
|
|
20
|
+
Each tensor is 4 dimensions with axes ordered as
|
|
21
|
+
(left_bond, phys_out, phys_in, right_bond). When contracting an MPO with
|
|
22
|
+
an MPS as H|ψ⟩, phys_in contracts with the MPS physical index, while
|
|
23
|
+
phys_out becomes the physical index of the resulting MPS.
|
|
21
24
|
|
|
22
25
|
Args:
|
|
23
|
-
factors:
|
|
26
|
+
factors: List of 4D tensors with shape (Dl, d_out, d_in, Dr).
|
|
27
|
+
Neighboring tensors must satisfy Dr[i] == Dl[i+1].
|
|
28
|
+
num_gpus_to_use: Number of GPUs to use for placing MPO factors
|
|
29
|
+
(implementation-dependent placement). If None, uses all available
|
|
30
|
+
GPUs.
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
MPO: A matrix product operator constructed from the provided factors.
|
|
34
|
+
|
|
35
|
+
Raises:
|
|
36
|
+
ValueError: If any factor is not 4D or if neighboring bond dimensions
|
|
37
|
+
do not match.
|
|
38
|
+
RuntimeError: If requested GPUs are not available.
|
|
24
39
|
"""
|
|
25
40
|
|
|
26
41
|
def __init__(
|
emu_mps/mps.py
CHANGED
|
@@ -23,11 +23,35 @@ DEFAULT_MAX_BOND_DIM = 1024
|
|
|
23
23
|
|
|
24
24
|
class MPS(State[complex, torch.Tensor]):
|
|
25
25
|
"""
|
|
26
|
-
Matrix Product State,
|
|
27
|
-
|
|
26
|
+
Matrix Product State, a.k.a tensor train.
|
|
28
27
|
Each tensor has 3 dimensions ordered as such: (left bond, site, right bond).
|
|
28
|
+
Only qubits and qutrits (using the leakage state: 'x') are supported. This
|
|
29
|
+
constructor creates a MPS directly from a list of tensors.
|
|
29
30
|
|
|
30
|
-
|
|
31
|
+
Args:
|
|
32
|
+
factors: the tensors for each site. WARNING: for efficiency, this list
|
|
33
|
+
of tensors IS NOT DEEP-COPIED. Therefore, the new MPS object is not
|
|
34
|
+
necessarily the exclusive owner of the list and its tensors.
|
|
35
|
+
As a consequence, beware of potential external modifications
|
|
36
|
+
affecting the list or the tensors.
|
|
37
|
+
You are responsible for deciding whether to pass its own exclusive
|
|
38
|
+
copy of the data to this constructor, or some shared objects.
|
|
39
|
+
orthogonality_center: the orthogonality center of the MPS, or None
|
|
40
|
+
(in which case it will be orthogonalized when needed)
|
|
41
|
+
precision: the threshold for truncating singular values during SVD
|
|
42
|
+
operations. Any singular value below this threshold will be
|
|
43
|
+
discarded, effectively reducing the bond dimension and improving
|
|
44
|
+
computational efficiency.
|
|
45
|
+
Check [precision in config](advanced/config.md#precision)
|
|
46
|
+
max_bond_dim: the maximum bond dimension to allow for this MPS
|
|
47
|
+
num_gpus_to_use: number of GPUs to use for placing MPS factors.
|
|
48
|
+
- If set to 0, all factors are placed on CPU.
|
|
49
|
+
- If set to None, factors retain their current device assignment.
|
|
50
|
+
- Otherwise, factors are distributed across the specified number of
|
|
51
|
+
GPUs.
|
|
52
|
+
eigenstates: the basis states for each qudit (['0','1'] or ['r','g'])
|
|
53
|
+
or qutrit ['g','r','x'], where 'x' is the leakage state
|
|
54
|
+
(default: ['0','1'])
|
|
31
55
|
"""
|
|
32
56
|
|
|
33
57
|
def __init__(
|
|
@@ -41,25 +65,6 @@ class MPS(State[complex, torch.Tensor]):
|
|
|
41
65
|
num_gpus_to_use: Optional[int] = DEVICE_COUNT,
|
|
42
66
|
eigenstates: Sequence[Eigenstate] = ("r", "g"),
|
|
43
67
|
):
|
|
44
|
-
"""
|
|
45
|
-
This constructor creates a MPS directly from a list of tensors. It is
|
|
46
|
-
for internal use only.
|
|
47
|
-
|
|
48
|
-
Args:
|
|
49
|
-
factors: the tensors for each site
|
|
50
|
-
WARNING: for efficiency in a lot of use cases, this list of tensors
|
|
51
|
-
IS NOT DEEP-COPIED. Therefore, the new MPS object is not necessarily
|
|
52
|
-
the exclusive owner of the list and its tensors. As a consequence,
|
|
53
|
-
beware of potential external modifications affecting the list or the tensors.
|
|
54
|
-
You are responsible for deciding whether to pass its own exclusive copy
|
|
55
|
-
of the data to this constructor, or some shared objects.
|
|
56
|
-
orthogonality_center: the orthogonality center of the MPS, or None (in which case
|
|
57
|
-
it will be orthogonalized when needed)
|
|
58
|
-
precision: the precision with which to keep this MPS
|
|
59
|
-
max_bond_dim: the maximum bond dimension to allow for this MPS
|
|
60
|
-
num_gpus_to_use: distribute the factors over this many GPUs
|
|
61
|
-
0=all factors to cpu, None=keep the existing device assignment.
|
|
62
|
-
"""
|
|
63
68
|
super().__init__(eigenstates=eigenstates)
|
|
64
69
|
self.precision = precision
|
|
65
70
|
self.max_bond_dim = max_bond_dim
|
|
@@ -555,8 +560,8 @@ class MPS(State[complex, torch.Tensor]):
|
|
|
555
560
|
) -> torch.Tensor:
|
|
556
561
|
"""
|
|
557
562
|
Efficiently compute the symmetric correlation matrix
|
|
558
|
-
|
|
559
|
-
in basis ("r", "g").
|
|
563
|
+
$C_{ij} = \\langle \\text{self}|\\text{operator}_i \\text{operator}_j|\\text{self}\\rangle$
|
|
564
|
+
in basis ("r", "g"), ("0","1"), and ("r","g","x").
|
|
560
565
|
|
|
561
566
|
Args:
|
|
562
567
|
operator: a 2x2 (or 3x3) Torch tensor to use
|
|
@@ -609,13 +614,22 @@ class MPS(State[complex, torch.Tensor]):
|
|
|
609
614
|
|
|
610
615
|
def inner(left: MPS, right: MPS) -> torch.Tensor:
|
|
611
616
|
"""
|
|
612
|
-
|
|
617
|
+
Computes the inner product ⟨left|right⟩ between two MPS states
|
|
618
|
+
(convenience wrapper for MPS.inner). Both MPS must have the same number of
|
|
619
|
+
sites and the same local (physical) dimension at each site.
|
|
613
620
|
|
|
614
621
|
Args:
|
|
615
|
-
left: the
|
|
616
|
-
right:
|
|
622
|
+
left: Left state (conjugated in the inner product).
|
|
623
|
+
right: Right state (not conjugated).
|
|
617
624
|
|
|
618
625
|
Returns:
|
|
619
|
-
|
|
626
|
+
A scalar torch.Tensor equal to ⟨left|right⟩ (typically complex-valued).
|
|
627
|
+
Use result.item() to convert to a Python number.
|
|
628
|
+
|
|
629
|
+
Raises:
|
|
630
|
+
ValueError: If the MPS are incompatible (e.g., different lengths or
|
|
631
|
+
dimensions).
|
|
632
|
+
RuntimeError: If tensors are on incompatible
|
|
633
|
+
devices/dtypes (as raised by PyTorch).
|
|
620
634
|
"""
|
|
621
635
|
return left.inner(right)
|
emu_mps/mps_backend.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
from pulser.backend import EmulatorBackend, Results
|
|
1
|
+
from pulser.backend import EmulatorBackend, Results, BitStrings
|
|
2
2
|
from emu_mps.mps_config import MPSConfig
|
|
3
|
-
from emu_base import init_logging
|
|
3
|
+
from emu_base import init_logging, PulserData
|
|
4
4
|
from emu_mps.mps_backend_impl import create_impl, MPSBackendImpl
|
|
5
5
|
import pickle
|
|
6
6
|
import os
|
|
@@ -12,9 +12,13 @@ class MPSBackend(EmulatorBackend):
|
|
|
12
12
|
"""
|
|
13
13
|
A backend for emulating Pulser sequences using Matrix Product States (MPS),
|
|
14
14
|
aka tensor trains.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
config (MPSConfig): Configuration for the MPS backend.
|
|
18
|
+
|
|
15
19
|
"""
|
|
16
20
|
|
|
17
|
-
default_config = MPSConfig()
|
|
21
|
+
default_config = MPSConfig(observables=[BitStrings(evaluation_times=[1.0])])
|
|
18
22
|
|
|
19
23
|
@staticmethod
|
|
20
24
|
def resume(autosave_file: str | pathlib.Path) -> Results:
|
|
@@ -34,9 +38,9 @@ class MPSBackend(EmulatorBackend):
|
|
|
34
38
|
|
|
35
39
|
impl.autosave_file = autosave_file
|
|
36
40
|
impl.last_save_time = time.time()
|
|
37
|
-
init_logging(impl.config.log_level, impl.config.log_file)
|
|
41
|
+
logger = init_logging(impl.config.log_level, impl.config.log_file)
|
|
38
42
|
|
|
39
|
-
|
|
43
|
+
logger.warning(
|
|
40
44
|
f"Resuming simulation from file {autosave_file}\n"
|
|
41
45
|
f"Saving simulation state every {impl.config.autosave_dt} seconds"
|
|
42
46
|
)
|
|
@@ -52,15 +56,22 @@ class MPSBackend(EmulatorBackend):
|
|
|
52
56
|
"""
|
|
53
57
|
assert isinstance(self._config, MPSConfig)
|
|
54
58
|
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
results =
|
|
59
|
-
|
|
60
|
-
|
|
59
|
+
pulser_data = PulserData(
|
|
60
|
+
sequence=self._sequence, config=self._config, dt=self._config.dt
|
|
61
|
+
)
|
|
62
|
+
results = []
|
|
63
|
+
for sequence_data in pulser_data.get_sequences():
|
|
64
|
+
impl = create_impl(sequence_data, self._config)
|
|
65
|
+
impl.init() # This is separate from the constructor for testing purposes.
|
|
66
|
+
result = self._run(impl)
|
|
67
|
+
results.append(
|
|
68
|
+
impl.permute_results(result, self._config.optimize_qubit_ordering)
|
|
69
|
+
)
|
|
70
|
+
return Results.aggregate(results)
|
|
61
71
|
|
|
62
72
|
@staticmethod
|
|
63
73
|
def _run(impl: MPSBackendImpl) -> Results:
|
|
74
|
+
impl.fill_results() # at t == 0 for pulser compatibility
|
|
64
75
|
while not impl.is_finished():
|
|
65
76
|
impl.progress()
|
|
66
77
|
|
emu_mps/mps_backend_impl.py
CHANGED
|
@@ -6,6 +6,7 @@ import random
|
|
|
6
6
|
import time
|
|
7
7
|
import typing
|
|
8
8
|
import uuid
|
|
9
|
+
import logging
|
|
9
10
|
|
|
10
11
|
from copy import deepcopy
|
|
11
12
|
from collections import Counter
|
|
@@ -14,10 +15,9 @@ from types import MethodType
|
|
|
14
15
|
from typing import Any, Optional
|
|
15
16
|
|
|
16
17
|
import torch
|
|
17
|
-
from pulser import Sequence
|
|
18
18
|
from pulser.backend import EmulationConfig, Observable, Results, State
|
|
19
19
|
|
|
20
|
-
from emu_base import DEVICE_COUNT,
|
|
20
|
+
from emu_base import DEVICE_COUNT, SequenceData, get_max_rss
|
|
21
21
|
from emu_base.math.brents_root_finding import BrentsRootFinder
|
|
22
22
|
from emu_base.utils import deallocate_tensor
|
|
23
23
|
|
|
@@ -72,7 +72,7 @@ class Statistics(Observable):
|
|
|
72
72
|
duration = self.data[-1]
|
|
73
73
|
max_mem = get_max_rss(state.factors[0].is_cuda)
|
|
74
74
|
|
|
75
|
-
|
|
75
|
+
logging.getLogger("emulators").info(
|
|
76
76
|
f"step = {len(self.data)}/{self.timestep_count}, "
|
|
77
77
|
+ f"χ = {state.get_max_bond_dim()}, "
|
|
78
78
|
+ f"|ψ| = {state.get_memory_footprint():.3f} MB, "
|
|
@@ -107,7 +107,7 @@ class MPSBackendImpl:
|
|
|
107
107
|
target_time: float
|
|
108
108
|
results: Results
|
|
109
109
|
|
|
110
|
-
def __init__(self, mps_config: MPSConfig, pulser_data:
|
|
110
|
+
def __init__(self, mps_config: MPSConfig, pulser_data: SequenceData):
|
|
111
111
|
self.config = mps_config
|
|
112
112
|
self.target_times = pulser_data.target_times
|
|
113
113
|
self.target_time = self.target_times[1]
|
|
@@ -118,7 +118,7 @@ class MPSBackendImpl:
|
|
|
118
118
|
self.delta = pulser_data.delta
|
|
119
119
|
self.phi = pulser_data.phi
|
|
120
120
|
self.timestep_count: int = self.omega.shape[0]
|
|
121
|
-
self.has_lindblad_noise = pulser_data.
|
|
121
|
+
self.has_lindblad_noise = len(pulser_data.lindblad_ops) > 0
|
|
122
122
|
self.eigenstates = pulser_data.eigenstates
|
|
123
123
|
self.dim = pulser_data.dim
|
|
124
124
|
self.lindblad_noise = torch.zeros(self.dim, self.dim, dtype=dtype)
|
|
@@ -146,7 +146,7 @@ class MPSBackendImpl:
|
|
|
146
146
|
atom_order=optimat.permute_tuple(
|
|
147
147
|
pulser_data.qubit_ids, self.qubit_permutation
|
|
148
148
|
),
|
|
149
|
-
total_duration=self.target_times[-1],
|
|
149
|
+
total_duration=int(self.target_times[-1]),
|
|
150
150
|
)
|
|
151
151
|
self.statistics = Statistics(
|
|
152
152
|
evaluation_times=[t / self.target_times[-1] for t in self.target_times],
|
|
@@ -154,7 +154,7 @@ class MPSBackendImpl:
|
|
|
154
154
|
timestep_count=self.timestep_count,
|
|
155
155
|
)
|
|
156
156
|
self.autosave_file = self._get_autosave_filepath(self.config.autosave_prefix)
|
|
157
|
-
|
|
157
|
+
logging.getLogger("emulators").debug(
|
|
158
158
|
f"""Will save simulation state to file "{self.autosave_file.name}"
|
|
159
159
|
every {self.config.autosave_dt} seconds.\n"""
|
|
160
160
|
f"""To resume: `MPSBackend().resume("{self.autosave_file}")`"""
|
|
@@ -165,7 +165,7 @@ class MPSBackendImpl:
|
|
|
165
165
|
if requested_num_gpus is None:
|
|
166
166
|
requested_num_gpus = DEVICE_COUNT
|
|
167
167
|
elif requested_num_gpus > DEVICE_COUNT:
|
|
168
|
-
|
|
168
|
+
logging.getLogger("emulators").warning(
|
|
169
169
|
f"Requested to use {requested_num_gpus} GPU(s) "
|
|
170
170
|
f"but only {DEVICE_COUNT if DEVICE_COUNT > 0 else 'cpu'} available"
|
|
171
171
|
)
|
|
@@ -193,8 +193,8 @@ class MPSBackendImpl:
|
|
|
193
193
|
|
|
194
194
|
def init_dark_qubits(self) -> None:
|
|
195
195
|
# has_state_preparation_error
|
|
196
|
-
if self.
|
|
197
|
-
bad_atoms = self.pulser_data.
|
|
196
|
+
if self.pulser_data.noise_model.state_prep_error > 0.0:
|
|
197
|
+
bad_atoms = self.pulser_data.bad_atoms
|
|
198
198
|
self.well_prepared_qubits_filter = torch.logical_not(
|
|
199
199
|
torch.tensor(list(bool(x) for x in bad_atoms.values()))
|
|
200
200
|
)
|
|
@@ -500,59 +500,80 @@ class MPSBackendImpl:
|
|
|
500
500
|
|
|
501
501
|
self.last_save_time = time.time()
|
|
502
502
|
|
|
503
|
-
|
|
503
|
+
logging.getLogger("emulators").debug(
|
|
504
504
|
f"Saved simulation state in file {self.autosave_file} ({autosave_filesize}MB)"
|
|
505
505
|
)
|
|
506
506
|
|
|
507
|
+
def _is_evaluation_time(
|
|
508
|
+
self,
|
|
509
|
+
observable: Observable,
|
|
510
|
+
t: float,
|
|
511
|
+
tolerance: float = 1e-10,
|
|
512
|
+
) -> bool:
|
|
513
|
+
"""Return True if ``t`` is a genuine sampling time for this observable.
|
|
514
|
+
|
|
515
|
+
Filters out nearby points that are close to, but not in, the
|
|
516
|
+
observable's evaluation times (within ``tolerance``).
|
|
517
|
+
Prevent false matches by using Pulser's tolerance
|
|
518
|
+
tol = 0.5 / total_duration. (deep inside pulser Observable class)
|
|
519
|
+
"""
|
|
520
|
+
times = observable.evaluation_times
|
|
521
|
+
|
|
522
|
+
is_observable_eval_time = (
|
|
523
|
+
times is not None
|
|
524
|
+
and self.config.is_time_in_evaluation_times(t, times, tol=tolerance)
|
|
525
|
+
)
|
|
526
|
+
|
|
527
|
+
is_default_eval_time = self.config.is_evaluation_time(t, tol=tolerance)
|
|
528
|
+
|
|
529
|
+
return is_observable_eval_time or is_default_eval_time
|
|
530
|
+
|
|
507
531
|
def fill_results(self) -> None:
|
|
508
532
|
normalized_state = 1 / self.state.norm() * self.state
|
|
509
533
|
|
|
510
|
-
current_time_int: int = round(self.current_time)
|
|
511
534
|
fractional_time = self.current_time / self.target_times[-1]
|
|
512
|
-
assert abs(self.current_time - current_time_int) < 1e-10
|
|
513
535
|
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
self.hamiltonian,
|
|
521
|
-
self.results,
|
|
522
|
-
)
|
|
536
|
+
callbacks_for_current_time_step = [
|
|
537
|
+
callback
|
|
538
|
+
for callback in self.config.observables
|
|
539
|
+
if self._is_evaluation_time(callback, fractional_time)
|
|
540
|
+
]
|
|
541
|
+
if not callbacks_for_current_time_step:
|
|
523
542
|
return
|
|
524
543
|
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
544
|
+
if self.well_prepared_qubits_filter is None:
|
|
545
|
+
state = normalized_state
|
|
546
|
+
hamiltonian = self.hamiltonian
|
|
547
|
+
else:
|
|
548
|
+
# Only do this potentially expensive step once and when needed.
|
|
549
|
+
full_mpo = MPO(
|
|
550
|
+
extended_mpo_factors(
|
|
551
|
+
self.hamiltonian.factors, self.well_prepared_qubits_filter
|
|
532
552
|
)
|
|
533
|
-
)
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
553
|
+
)
|
|
554
|
+
full_state = MPS(
|
|
555
|
+
extended_mps_factors(
|
|
556
|
+
normalized_state.factors,
|
|
557
|
+
self.well_prepared_qubits_filter,
|
|
558
|
+
),
|
|
559
|
+
num_gpus_to_use=None, # Keep the already assigned devices.
|
|
560
|
+
orthogonality_center=get_extended_site_index(
|
|
561
|
+
self.well_prepared_qubits_filter,
|
|
562
|
+
normalized_state.orthogonality_center,
|
|
563
|
+
),
|
|
564
|
+
eigenstates=normalized_state.eigenstates,
|
|
565
|
+
)
|
|
566
|
+
state = full_state
|
|
567
|
+
hamiltonian = full_mpo
|
|
568
|
+
|
|
569
|
+
for callback in callbacks_for_current_time_step:
|
|
570
|
+
callback(
|
|
571
|
+
self.config,
|
|
572
|
+
fractional_time,
|
|
573
|
+
state,
|
|
574
|
+
hamiltonian,
|
|
575
|
+
self.results,
|
|
576
|
+
)
|
|
556
577
|
|
|
557
578
|
def permute_results(self, results: Results, permute: bool) -> Results:
|
|
558
579
|
if permute:
|
|
@@ -608,12 +629,12 @@ class NoisyMPSBackendImpl(MPSBackendImpl):
|
|
|
608
629
|
norm_gap_before_jump: float
|
|
609
630
|
root_finder: Optional[BrentsRootFinder]
|
|
610
631
|
|
|
611
|
-
def __init__(self, config: MPSConfig, pulser_data:
|
|
632
|
+
def __init__(self, config: MPSConfig, pulser_data: SequenceData):
|
|
612
633
|
super().__init__(config, pulser_data)
|
|
613
634
|
self.lindblad_ops = pulser_data.lindblad_ops
|
|
614
635
|
self.root_finder = None
|
|
615
636
|
|
|
616
|
-
assert self.
|
|
637
|
+
assert self.lindblad_ops
|
|
617
638
|
|
|
618
639
|
def init_lindblad_noise(self) -> None:
|
|
619
640
|
stacked = torch.stack(self.lindblad_ops)
|
|
@@ -685,7 +706,7 @@ class NoisyMPSBackendImpl(MPSBackendImpl):
|
|
|
685
706
|
assert math.isclose(norm_after_normalizing, 1, abs_tol=1e-10)
|
|
686
707
|
self.set_jump_threshold(norm_after_normalizing**2)
|
|
687
708
|
|
|
688
|
-
def
|
|
709
|
+
def remove_noise_from_hamiltonian(self) -> None:
|
|
689
710
|
# Remove the noise from self.hamiltonian for the callbacks.
|
|
690
711
|
# Since update_H is called at the start of do_time_step this is safe.
|
|
691
712
|
update_H(
|
|
@@ -696,22 +717,24 @@ class NoisyMPSBackendImpl(MPSBackendImpl):
|
|
|
696
717
|
noise=torch.zeros(self.dim, self.dim, dtype=dtype), # no noise
|
|
697
718
|
)
|
|
698
719
|
|
|
699
|
-
|
|
720
|
+
def timestep_complete(self) -> None:
|
|
721
|
+
self.remove_noise_from_hamiltonian()
|
|
722
|
+
super().timestep_complete()
|
|
700
723
|
|
|
701
724
|
|
|
702
725
|
class DMRGBackendImpl(MPSBackendImpl):
|
|
703
726
|
def __init__(
|
|
704
727
|
self,
|
|
705
728
|
mps_config: MPSConfig,
|
|
706
|
-
pulser_data:
|
|
729
|
+
pulser_data: SequenceData,
|
|
707
730
|
energy_tolerance: float = 1e-5,
|
|
708
731
|
max_sweeps: int = 2000,
|
|
709
732
|
):
|
|
710
733
|
|
|
711
|
-
if
|
|
734
|
+
if pulser_data.noise_model.noise_types != ():
|
|
712
735
|
raise NotImplementedError(
|
|
713
736
|
"DMRG solver does not currently support noise types"
|
|
714
|
-
f"you are using: {
|
|
737
|
+
f"you are using: {pulser_data.noise_model.noise_types}"
|
|
715
738
|
)
|
|
716
739
|
super().__init__(mps_config, pulser_data)
|
|
717
740
|
self.previous_energy: Optional[float] = None
|
|
@@ -810,11 +833,10 @@ class DMRGBackendImpl(MPSBackendImpl):
|
|
|
810
833
|
self.current_energy = None
|
|
811
834
|
|
|
812
835
|
|
|
813
|
-
def create_impl(
|
|
814
|
-
pulser_data = PulserData(sequence=sequence, config=config, dt=config.dt)
|
|
836
|
+
def create_impl(data: SequenceData, config: MPSConfig) -> MPSBackendImpl:
|
|
815
837
|
|
|
816
|
-
if
|
|
817
|
-
return NoisyMPSBackendImpl(config,
|
|
838
|
+
if data.lindblad_ops:
|
|
839
|
+
return NoisyMPSBackendImpl(config, data)
|
|
818
840
|
if config.solver == Solver.DMRG:
|
|
819
|
-
return DMRGBackendImpl(config,
|
|
820
|
-
return MPSBackendImpl(config,
|
|
841
|
+
return DMRGBackendImpl(config, data)
|
|
842
|
+
return MPSBackendImpl(config, data)
|
emu_mps/mps_config.py
CHANGED
|
@@ -20,7 +20,6 @@ from pulser.backend import (
|
|
|
20
20
|
Energy,
|
|
21
21
|
EnergySecondMoment,
|
|
22
22
|
EnergyVariance,
|
|
23
|
-
BitStrings,
|
|
24
23
|
EmulationConfig,
|
|
25
24
|
)
|
|
26
25
|
import logging
|
|
@@ -77,11 +76,13 @@ class MPSConfig(EmulationConfig):
|
|
|
77
76
|
kwargs: Arguments that are passed to the base class
|
|
78
77
|
|
|
79
78
|
Examples:
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
79
|
+
```python
|
|
80
|
+
num_gpus_to_use = 2 #use 2 gpus if available, otherwise 1 or cpu
|
|
81
|
+
dt = 1.0 #this will impact the runtime
|
|
82
|
+
precision = 1e-6 #smaller dt requires better precision, generally
|
|
83
|
+
MPSConfig(num_gpus_to_use=num_gpus_to_use, dt=dt, precision=precision,
|
|
84
|
+
with_modulation=True) #the last arg is taken from the base class
|
|
85
|
+
```
|
|
85
86
|
"""
|
|
86
87
|
|
|
87
88
|
# Whether to warn if unexpected kwargs are received
|
|
@@ -92,7 +93,7 @@ class MPSConfig(EmulationConfig):
|
|
|
92
93
|
def __init__(
|
|
93
94
|
self,
|
|
94
95
|
*,
|
|
95
|
-
dt:
|
|
96
|
+
dt: float = 10.0,
|
|
96
97
|
precision: float = DEFAULT_PRECISION,
|
|
97
98
|
max_bond_dim: int = DEFAULT_MAX_BOND_DIM,
|
|
98
99
|
max_krylov_dim: int = 100,
|
|
@@ -103,11 +104,10 @@ class MPSConfig(EmulationConfig):
|
|
|
103
104
|
log_level: int = logging.INFO,
|
|
104
105
|
log_file: pathlib.Path | None = None,
|
|
105
106
|
autosave_prefix: str = "emu_mps_save_",
|
|
106
|
-
autosave_dt:
|
|
107
|
+
autosave_dt: float = float("inf"), # disable autosave by default
|
|
107
108
|
solver: Solver = Solver.TDVP,
|
|
108
109
|
**kwargs: Any,
|
|
109
110
|
):
|
|
110
|
-
kwargs.setdefault("observables", [BitStrings(evaluation_times=[1.0])])
|
|
111
111
|
super().__init__(
|
|
112
112
|
dt=dt,
|
|
113
113
|
precision=precision,
|
|
@@ -124,7 +124,7 @@ class MPSConfig(EmulationConfig):
|
|
|
124
124
|
solver=solver,
|
|
125
125
|
**kwargs,
|
|
126
126
|
)
|
|
127
|
-
|
|
127
|
+
logger = init_logging(log_level, log_file)
|
|
128
128
|
|
|
129
129
|
MIN_AUTOSAVE_DT = 10
|
|
130
130
|
assert (
|
|
@@ -135,7 +135,7 @@ class MPSConfig(EmulationConfig):
|
|
|
135
135
|
prod_tol = precision * extra_krylov_tolerance
|
|
136
136
|
if prod_tol < MIN_KRYLOV_TOL:
|
|
137
137
|
new_extra_krylov_tolerance = MIN_KRYLOV_TOL / precision
|
|
138
|
-
|
|
138
|
+
logger.warning(
|
|
139
139
|
f"Requested Lanczos convergence tolerance "
|
|
140
140
|
f"(precision * extra_krylov_tolerance = {prod_tol:.2e}) "
|
|
141
141
|
f"is below minimum threshold {MIN_KRYLOV_TOL:.2e}. "
|
|
@@ -143,9 +143,9 @@ class MPSConfig(EmulationConfig):
|
|
|
143
143
|
f"{extra_krylov_tolerance:.2e} to {new_extra_krylov_tolerance:.2e} "
|
|
144
144
|
f"to maintain numerical stability."
|
|
145
145
|
)
|
|
146
|
-
self.extra_krylov_tolerance = new_extra_krylov_tolerance
|
|
147
146
|
else:
|
|
148
|
-
|
|
147
|
+
new_extra_krylov_tolerance = extra_krylov_tolerance
|
|
148
|
+
self._backend_options["extra_krylov_tolerance"] = new_extra_krylov_tolerance
|
|
149
149
|
|
|
150
150
|
self.monkeypatch_observables()
|
|
151
151
|
|
|
@@ -153,7 +153,7 @@ class MPSConfig(EmulationConfig):
|
|
|
153
153
|
self.noise_model.samples_per_run != 1
|
|
154
154
|
and self.noise_model.samples_per_run is not None
|
|
155
155
|
):
|
|
156
|
-
|
|
156
|
+
logger.warning(
|
|
157
157
|
"Warning: The runs and samples_per_run values of the NoiseModel are ignored!"
|
|
158
158
|
)
|
|
159
159
|
self._backend_options[
|
|
@@ -202,7 +202,7 @@ class MPSConfig(EmulationConfig):
|
|
|
202
202
|
energy_mps_impl, obs_copy
|
|
203
203
|
)
|
|
204
204
|
obs_list.append(obs_copy)
|
|
205
|
-
self.observables = tuple(obs_list)
|
|
205
|
+
self._backend_options["observables"] = tuple(obs_list)
|
|
206
206
|
|
|
207
207
|
def check_permutable_observables(self) -> bool:
|
|
208
208
|
allowed_permutable_obs = set(
|
|
@@ -220,7 +220,7 @@ class MPSConfig(EmulationConfig):
|
|
|
220
220
|
actual_obs = set([obs._base_tag for obs in self.observables])
|
|
221
221
|
not_allowed = actual_obs.difference(allowed_permutable_obs)
|
|
222
222
|
if not_allowed:
|
|
223
|
-
|
|
223
|
+
logging.getLogger("emulators").warning(
|
|
224
224
|
f"emu-mps allows only {allowed_permutable_obs} observables with"
|
|
225
225
|
" `optimize_qubit_ordering = True`."
|
|
226
226
|
f" you provided unsupported {not_allowed}"
|
emu_mps/solver.py
CHANGED
|
@@ -2,5 +2,16 @@ from enum import Enum
|
|
|
2
2
|
|
|
3
3
|
|
|
4
4
|
class Solver(str, Enum):
|
|
5
|
+
"""Available MPS solvers used by emu-mps. Use these values to
|
|
6
|
+
select the algorithm for time evolution.
|
|
7
|
+
By defatult TDVP is used. In order to use DMRG, set the
|
|
8
|
+
`solver` argument of `MPSConfig` to "dmrg" or `Solver.DMRG`.
|
|
9
|
+
|
|
10
|
+
Args:
|
|
11
|
+
|
|
12
|
+
- Solver.TDVP: Time-Dependent Variational Principle solver.
|
|
13
|
+
- Solver.DMRG: Density Matrix Renormalization Group solver.
|
|
14
|
+
"""
|
|
15
|
+
|
|
5
16
|
TDVP = "tdvp"
|
|
6
17
|
DMRG = "dmrg"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: emu-mps
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.7.0
|
|
4
4
|
Summary: Pasqal MPS based pulse emulator built on PyTorch
|
|
5
5
|
Project-URL: Documentation, https://pasqal-io.github.io/emulators/
|
|
6
6
|
Project-URL: Repository, https://github.com/pasqal-io/emulators
|
|
@@ -25,7 +25,7 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
25
25
|
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
26
26
|
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
|
27
27
|
Requires-Python: >=3.10
|
|
28
|
-
Requires-Dist: emu-base==2.
|
|
28
|
+
Requires-Dist: emu-base==2.7.0
|
|
29
29
|
Description-Content-Type: text/markdown
|
|
30
30
|
|
|
31
31
|
<div align="center">
|
|
@@ -1,19 +1,19 @@
|
|
|
1
|
-
emu_mps/__init__.py,sha256=
|
|
1
|
+
emu_mps/__init__.py,sha256=mJrKjY6IyvYEI0h5KlmMGUwnkWKBa3VphrqFIOJZkiY,708
|
|
2
2
|
emu_mps/algebra.py,sha256=VZ5uaX5PYWGqDCpRKKr819BLMMtT0pKDxc2HrlLCdgU,5423
|
|
3
3
|
emu_mps/custom_callback_implementations.py,sha256=WeczmO6qkvBIipvXLqX45i3D7M4ovOrepusIGs6d2Ts,2420
|
|
4
4
|
emu_mps/hamiltonian.py,sha256=OM0bPNZV7J5Egk6aTUwt4GaWqiUOP68ujZBTuqlBY1k,16289
|
|
5
|
-
emu_mps/mpo.py,sha256=
|
|
6
|
-
emu_mps/mps.py,sha256=
|
|
7
|
-
emu_mps/mps_backend.py,sha256=
|
|
8
|
-
emu_mps/mps_backend_impl.py,sha256=
|
|
9
|
-
emu_mps/mps_config.py,sha256=
|
|
5
|
+
emu_mps/mpo.py,sha256=ZNwfTXhyBJ7UCSoMbNT2iqelINlHOtQrfzgZGEiz6Vc,10402
|
|
6
|
+
emu_mps/mps.py,sha256=zlke09Lvk2IHJOPYdn2zYAiPWKCDw0mU8t80KwyzIc0,22889
|
|
7
|
+
emu_mps/mps_backend.py,sha256=UZOtHBlpU5VkE8C51VgVEaHVAnLT_TCndLQvApZZeZ4,2566
|
|
8
|
+
emu_mps/mps_backend_impl.py,sha256=cFFQQb4gyLRcv9kDmpQXXXtmKqV_QlLqrfmL0AaP0_k,31227
|
|
9
|
+
emu_mps/mps_config.py,sha256=ZwEXWHUkrUSUIPTWUCPqEMQBzQOufuX_HO9t_Cm93mQ,9182
|
|
10
10
|
emu_mps/observables.py,sha256=4C_ewkd3YkJP0xghTrGUTgXUGvJRCQcetb8cU0SjMl0,1900
|
|
11
|
-
emu_mps/solver.py,sha256=
|
|
11
|
+
emu_mps/solver.py,sha256=RpfNZf0ezl8boSWIwSQUJiDkqRLKy3vrlutza8cEcRI,470
|
|
12
12
|
emu_mps/solver_utils.py,sha256=Q1SY8E3Kipe_RfKE8lAMRfD4mSG2VkkkPmk-fU7eAgY,8852
|
|
13
13
|
emu_mps/utils.py,sha256=rL75H55hB5lDMjy8a_O2PpJq51iZKjSx91X4euxB3mY,7293
|
|
14
14
|
emu_mps/optimatrix/__init__.py,sha256=fBXQ7-rgDro4hcaBijCGhx3J69W96qcw5_3mWc7tND4,364
|
|
15
15
|
emu_mps/optimatrix/optimiser.py,sha256=k9suYmKLKlaZ7ozFuIqvXHyCBoCtGgkX1mpen9GOdOo,6977
|
|
16
16
|
emu_mps/optimatrix/permutations.py,sha256=9DDMZtrGGZ01b9F3GkzHR3paX4qNtZiPoI7Z_Kia3Lc,3727
|
|
17
|
-
emu_mps-2.
|
|
18
|
-
emu_mps-2.
|
|
19
|
-
emu_mps-2.
|
|
17
|
+
emu_mps-2.7.0.dist-info/METADATA,sha256=070Tn1jQRcwee0V3gNfoz49ni9EtL4eFfgBWSG5LyKI,3587
|
|
18
|
+
emu_mps-2.7.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
19
|
+
emu_mps-2.7.0.dist-info/RECORD,,
|
|
File without changes
|