emu-base 1.2.7__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emu_base/__init__.py +3 -33
- emu_base/aggregators.py +158 -0
- emu_base/pulser_adapter.py +17 -11
- {emu_base-1.2.7.dist-info → emu_base-2.0.0.dist-info}/METADATA +2 -2
- emu_base-2.0.0.dist-info/RECORD +12 -0
- emu_base/base_classes/__init__.py +0 -31
- emu_base/base_classes/aggregators.py +0 -64
- emu_base/base_classes/backend.py +0 -48
- emu_base/base_classes/callback.py +0 -90
- emu_base/base_classes/config.py +0 -106
- emu_base/base_classes/default_callbacks.py +0 -300
- emu_base/base_classes/operator.py +0 -126
- emu_base/base_classes/results.py +0 -183
- emu_base/base_classes/state.py +0 -97
- emu_base-1.2.7.dist-info/RECORD +0 -20
- {emu_base-1.2.7.dist-info → emu_base-2.0.0.dist-info}/WHEEL +0 -0
emu_base/__init__.py
CHANGED
|
@@ -1,43 +1,13 @@
|
|
|
1
|
-
from .base_classes.results import Results
|
|
2
|
-
from .base_classes.callback import Callback, AggregationType
|
|
3
|
-
from .base_classes.config import BackendConfig
|
|
4
|
-
from .base_classes.operator import Operator
|
|
5
|
-
from .base_classes.state import State
|
|
6
|
-
from .base_classes.backend import Backend
|
|
7
|
-
from .base_classes.default_callbacks import (
|
|
8
|
-
BitStrings,
|
|
9
|
-
CorrelationMatrix,
|
|
10
|
-
Energy,
|
|
11
|
-
EnergyVariance,
|
|
12
|
-
Expectation,
|
|
13
|
-
Fidelity,
|
|
14
|
-
QubitDensity,
|
|
15
|
-
StateResult,
|
|
16
|
-
SecondMomentOfEnergy,
|
|
17
|
-
)
|
|
18
1
|
from .constants import DEVICE_COUNT
|
|
19
2
|
from .pulser_adapter import PulserData, HamiltonianType
|
|
20
3
|
from .math.brents_root_finding import find_root_brents
|
|
21
4
|
from .math.krylov_exp import krylov_exp, DEFAULT_MAX_KRYLOV_DIM
|
|
5
|
+
from .aggregators import AggregationType, aggregate
|
|
22
6
|
|
|
23
7
|
__all__ = [
|
|
24
8
|
"__version__",
|
|
25
|
-
"Results",
|
|
26
|
-
"BackendConfig",
|
|
27
|
-
"Operator",
|
|
28
|
-
"State",
|
|
29
|
-
"Backend",
|
|
30
9
|
"AggregationType",
|
|
31
|
-
"
|
|
32
|
-
"StateResult",
|
|
33
|
-
"BitStrings",
|
|
34
|
-
"QubitDensity",
|
|
35
|
-
"CorrelationMatrix",
|
|
36
|
-
"Expectation",
|
|
37
|
-
"Fidelity",
|
|
38
|
-
"Energy",
|
|
39
|
-
"EnergyVariance",
|
|
40
|
-
"SecondMomentOfEnergy",
|
|
10
|
+
"aggregate",
|
|
41
11
|
"PulserData",
|
|
42
12
|
"find_root_brents",
|
|
43
13
|
"krylov_exp",
|
|
@@ -46,4 +16,4 @@ __all__ = [
|
|
|
46
16
|
"DEVICE_COUNT",
|
|
47
17
|
]
|
|
48
18
|
|
|
49
|
-
__version__ = "
|
|
19
|
+
__version__ = "2.0.0"
|
emu_base/aggregators.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
import statistics
|
|
2
|
+
import torch
|
|
3
|
+
from typing import Any, Callable
|
|
4
|
+
import collections
|
|
5
|
+
from enum import Enum, auto
|
|
6
|
+
from pulser.backend import (
|
|
7
|
+
Results,
|
|
8
|
+
)
|
|
9
|
+
import logging
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
_NUMERIC_TYPES = {int, float, complex}
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class AggregationType(Enum):
|
|
16
|
+
"""
|
|
17
|
+
Defines how to combine multiple values from different simulation results.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
MEAN = auto() # statistics.fmean or list/matrix-wise equivalent
|
|
21
|
+
BAG_UNION = auto() # Counter.__add__
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def mean_aggregator(
|
|
25
|
+
values: list[Any],
|
|
26
|
+
) -> (
|
|
27
|
+
complex
|
|
28
|
+
| float
|
|
29
|
+
| list[complex]
|
|
30
|
+
| list[float]
|
|
31
|
+
| list[list[complex]]
|
|
32
|
+
| list[list[float]]
|
|
33
|
+
| torch.Tensor
|
|
34
|
+
): # FIXME: support tuples?
|
|
35
|
+
if values == []:
|
|
36
|
+
raise ValueError("Cannot average 0 samples")
|
|
37
|
+
|
|
38
|
+
element_type = type(values[0])
|
|
39
|
+
|
|
40
|
+
if element_type in _NUMERIC_TYPES:
|
|
41
|
+
return statistics.fmean(values)
|
|
42
|
+
|
|
43
|
+
if element_type == torch.Tensor:
|
|
44
|
+
acc = torch.zeros_like(values[0])
|
|
45
|
+
for ten in values:
|
|
46
|
+
acc += ten
|
|
47
|
+
return acc / len(values)
|
|
48
|
+
|
|
49
|
+
if element_type != list:
|
|
50
|
+
raise NotImplementedError("Cannot average this type of data")
|
|
51
|
+
|
|
52
|
+
if values[0] == []:
|
|
53
|
+
raise ValueError("Cannot average list of empty lists")
|
|
54
|
+
|
|
55
|
+
sub_element_type = type(values[0][0])
|
|
56
|
+
|
|
57
|
+
if sub_element_type in _NUMERIC_TYPES:
|
|
58
|
+
dim = len(values[0])
|
|
59
|
+
return [statistics.fmean(value[i] for value in values) for i in range(dim)]
|
|
60
|
+
|
|
61
|
+
if sub_element_type != list: # FIXME: ABC.Iterable? Collection? subclass?
|
|
62
|
+
raise ValueError(f"Cannot average list of lists of {sub_element_type}")
|
|
63
|
+
|
|
64
|
+
if values[0][0] == []:
|
|
65
|
+
raise ValueError("Cannot average list of matrices with no columns")
|
|
66
|
+
|
|
67
|
+
if (sub_sub_element_type := type(values[0][0][0])) not in _NUMERIC_TYPES:
|
|
68
|
+
raise ValueError(f"Cannot average list of matrices of {sub_sub_element_type}")
|
|
69
|
+
|
|
70
|
+
dim1 = len(values[0])
|
|
71
|
+
dim2 = len(values[0][0])
|
|
72
|
+
return [
|
|
73
|
+
[statistics.fmean(value[i][j] for value in values) for j in range(dim2)]
|
|
74
|
+
for i in range(dim1)
|
|
75
|
+
]
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def bag_union_aggregator(values: list[collections.Counter]) -> collections.Counter:
|
|
79
|
+
return sum(values, start=collections.Counter())
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
aggregation_types_definitions: dict[AggregationType, Callable] = {
|
|
83
|
+
AggregationType.MEAN: mean_aggregator,
|
|
84
|
+
AggregationType.BAG_UNION: bag_union_aggregator,
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def _get_aggregation_type(tag: str) -> AggregationType | None:
|
|
89
|
+
if tag.startswith("bitstrings"):
|
|
90
|
+
return AggregationType.BAG_UNION
|
|
91
|
+
if tag.startswith("expectation"):
|
|
92
|
+
return AggregationType.MEAN
|
|
93
|
+
if tag.startswith("fidelity"):
|
|
94
|
+
return AggregationType.MEAN
|
|
95
|
+
if tag.startswith("correlation_matrix"):
|
|
96
|
+
return AggregationType.MEAN
|
|
97
|
+
if tag.startswith("occupation"):
|
|
98
|
+
return AggregationType.MEAN
|
|
99
|
+
if tag.startswith("energy"):
|
|
100
|
+
return AggregationType.MEAN
|
|
101
|
+
if tag.startswith("energy_second_moment"):
|
|
102
|
+
return AggregationType.MEAN
|
|
103
|
+
else:
|
|
104
|
+
return None
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def aggregate(
|
|
108
|
+
results_to_aggregate: list[Results],
|
|
109
|
+
**aggregator_functions: Callable[[Any], Any],
|
|
110
|
+
) -> Results:
|
|
111
|
+
if len(results_to_aggregate) == 0:
|
|
112
|
+
raise ValueError("no results to aggregate")
|
|
113
|
+
if len(results_to_aggregate) == 1:
|
|
114
|
+
return results_to_aggregate[0]
|
|
115
|
+
stored_callbacks = set(results_to_aggregate[0].get_result_tags())
|
|
116
|
+
if not all(
|
|
117
|
+
set(results.get_result_tags()) == stored_callbacks
|
|
118
|
+
for results in results_to_aggregate
|
|
119
|
+
):
|
|
120
|
+
raise ValueError(
|
|
121
|
+
"Monte-Carlo results seem to provide from incompatible simulations: "
|
|
122
|
+
"they do not all contain the same observables"
|
|
123
|
+
)
|
|
124
|
+
aggregated = Results(
|
|
125
|
+
atom_order=results_to_aggregate[0].atom_order,
|
|
126
|
+
total_duration=results_to_aggregate[0].total_duration,
|
|
127
|
+
)
|
|
128
|
+
for tag in stored_callbacks:
|
|
129
|
+
aggregation_type = aggregator_functions.get(
|
|
130
|
+
tag,
|
|
131
|
+
_get_aggregation_type(tag),
|
|
132
|
+
)
|
|
133
|
+
if aggregation_type is None:
|
|
134
|
+
logging.getLogger("global_logger").warning(f"Skipping aggregation of `{tag}`")
|
|
135
|
+
continue
|
|
136
|
+
aggregation_function: Any = (
|
|
137
|
+
aggregation_type
|
|
138
|
+
if callable(aggregation_type)
|
|
139
|
+
else aggregation_types_definitions[aggregation_type]
|
|
140
|
+
)
|
|
141
|
+
evaluation_times = results_to_aggregate[0].get_result_times(tag)
|
|
142
|
+
if not all(
|
|
143
|
+
results.get_result_times(tag) == evaluation_times
|
|
144
|
+
for results in results_to_aggregate
|
|
145
|
+
):
|
|
146
|
+
raise ValueError(
|
|
147
|
+
"Monte-Carlo results seem to provide from incompatible simulations: "
|
|
148
|
+
"the callbacks are not stored at the same times"
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
uuid = results_to_aggregate[0]._find_uuid(tag)
|
|
152
|
+
for t in results_to_aggregate[0].get_result_times(tag):
|
|
153
|
+
v = aggregation_function(
|
|
154
|
+
[result.get_result(tag, t) for result in results_to_aggregate]
|
|
155
|
+
)
|
|
156
|
+
aggregated._store_raw(uuid=uuid, tag=tag, time=t, value=v)
|
|
157
|
+
|
|
158
|
+
return aggregated
|
emu_base/pulser_adapter.py
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
import pulser
|
|
2
|
-
from typing import Tuple
|
|
2
|
+
from typing import Tuple, Sequence
|
|
3
3
|
import torch
|
|
4
4
|
import math
|
|
5
5
|
from pulser.noise_model import NoiseModel
|
|
6
|
+
from pulser.register.base_register import BaseRegister
|
|
6
7
|
from enum import Enum
|
|
7
8
|
|
|
8
|
-
from pulser.
|
|
9
|
+
from pulser.backend.config import EmulationConfig
|
|
9
10
|
|
|
10
|
-
from emu_base.base_classes.config import BackendConfig
|
|
11
11
|
from emu_base.lindblad_operators import get_lindblad_operators
|
|
12
12
|
from emu_base.utils import dist2, dist3
|
|
13
13
|
|
|
@@ -229,15 +229,23 @@ class PulserData:
|
|
|
229
229
|
hamiltonian_type: HamiltonianType
|
|
230
230
|
lindblad_ops: list[torch.Tensor]
|
|
231
231
|
|
|
232
|
-
def __init__(self, *, sequence: pulser.Sequence, config:
|
|
232
|
+
def __init__(self, *, sequence: pulser.Sequence, config: EmulationConfig, dt: int):
|
|
233
233
|
self.qubit_count = len(sequence.register.qubit_ids)
|
|
234
|
-
|
|
234
|
+
sequence_duration = sequence.get_duration()
|
|
235
235
|
# the end value is exclusive, so add +1
|
|
236
236
|
observable_times = set(torch.arange(0, sequence.get_duration() + 1, dt).tolist())
|
|
237
237
|
observable_times.add(sequence.get_duration())
|
|
238
|
-
for obs in config.
|
|
239
|
-
|
|
240
|
-
|
|
238
|
+
for obs in config.observables:
|
|
239
|
+
times: Sequence[float]
|
|
240
|
+
if obs.evaluation_times is not None:
|
|
241
|
+
times = obs.evaluation_times
|
|
242
|
+
elif config.default_evaluation_times != "Full":
|
|
243
|
+
times = (
|
|
244
|
+
config.default_evaluation_times.tolist() # type: ignore[union-attr,assignment]
|
|
245
|
+
)
|
|
246
|
+
observable_times |= set([round(time * sequence_duration) for time in times])
|
|
247
|
+
|
|
248
|
+
self.target_times: list[int] = list(observable_times)
|
|
241
249
|
self.target_times.sort()
|
|
242
250
|
|
|
243
251
|
laser_waist = (
|
|
@@ -266,9 +274,7 @@ class PulserData:
|
|
|
266
274
|
"the interaction matrix"
|
|
267
275
|
)
|
|
268
276
|
|
|
269
|
-
self.full_interaction_matrix =
|
|
270
|
-
config.interaction_matrix, dtype=torch.float64
|
|
271
|
-
)
|
|
277
|
+
self.full_interaction_matrix = config.interaction_matrix.as_tensor()
|
|
272
278
|
elif self.hamiltonian_type == HamiltonianType.Rydberg:
|
|
273
279
|
self.full_interaction_matrix = _rydberg_interaction(sequence)
|
|
274
280
|
elif self.hamiltonian_type == HamiltonianType.XY:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: emu-base
|
|
3
|
-
Version:
|
|
3
|
+
Version: 2.0.0
|
|
4
4
|
Summary: Pasqal base classes for emulators
|
|
5
5
|
Project-URL: Documentation, https://pasqal-io.github.io/emulators/
|
|
6
6
|
Project-URL: Repository, https://github.com/pasqal-io/emulators
|
|
@@ -25,7 +25,7 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
25
25
|
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
26
26
|
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
|
27
27
|
Requires-Python: >=3.10
|
|
28
|
-
Requires-Dist: pulser-core==1.
|
|
28
|
+
Requires-Dist: pulser-core==1.4.*
|
|
29
29
|
Requires-Dist: torch==2.5.0
|
|
30
30
|
Description-Content-Type: text/markdown
|
|
31
31
|
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
emu_base/__init__.py,sha256=b60wKpJR1-oUIEv68t0-WNza2IXSL7joPQVt5Hw-rj8,493
|
|
2
|
+
emu_base/aggregators.py,sha256=bB-rldoDAErxQMpL715K5lpiabGOpkCY0GyxW7mfHuc,5000
|
|
3
|
+
emu_base/constants.py,sha256=41LYkKLUCz-oxPbd-j7nUDZuhIbUrnez6prT0uR0jcE,56
|
|
4
|
+
emu_base/lindblad_operators.py,sha256=Nsl1YrWb8IDM9Z50ucy2Ed44p_IRETnlbr6qaqAgV50,1629
|
|
5
|
+
emu_base/pulser_adapter.py,sha256=dRD80z_dVXkCjDBLRIkmqNGg5M78VEKkQuk3H5JdZSM,11241
|
|
6
|
+
emu_base/utils.py,sha256=RM8O0qfPAJfcdqqAojwEEKV7I3ZfVDklnTisTGhUg5k,233
|
|
7
|
+
emu_base/math/__init__.py,sha256=6BbIytYV5uC-e5jLMtIErkcUl_PvfSNnhmVFY9Il8uQ,97
|
|
8
|
+
emu_base/math/brents_root_finding.py,sha256=AVx6L1Il6rpPJWrLJ7cn6oNmJyZOPRgEaaZaubC9lsU,3711
|
|
9
|
+
emu_base/math/krylov_exp.py,sha256=UCFNeq-j2ukgBsOPC9_Jiv1aqpy88SrslDLiCxIGBwk,3840
|
|
10
|
+
emu_base-2.0.0.dist-info/METADATA,sha256=uoylMuopYijyAJ9G8iY_cxXanQlJGu1ibvkd17Soi2g,3522
|
|
11
|
+
emu_base-2.0.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
12
|
+
emu_base-2.0.0.dist-info/RECORD,,
|
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
from .operator import Operator
|
|
2
|
-
from .state import State
|
|
3
|
-
from .results import Results
|
|
4
|
-
from .callback import Callback
|
|
5
|
-
from .default_callbacks import (
|
|
6
|
-
StateResult,
|
|
7
|
-
BitStrings,
|
|
8
|
-
QubitDensity,
|
|
9
|
-
CorrelationMatrix,
|
|
10
|
-
Expectation,
|
|
11
|
-
Fidelity,
|
|
12
|
-
Energy,
|
|
13
|
-
EnergyVariance,
|
|
14
|
-
SecondMomentOfEnergy,
|
|
15
|
-
)
|
|
16
|
-
|
|
17
|
-
__all__ = [
|
|
18
|
-
"Operator",
|
|
19
|
-
"State",
|
|
20
|
-
"Results",
|
|
21
|
-
"Callback",
|
|
22
|
-
"StateResult",
|
|
23
|
-
"BitStrings",
|
|
24
|
-
"QubitDensity",
|
|
25
|
-
"CorrelationMatrix",
|
|
26
|
-
"Expectation",
|
|
27
|
-
"Fidelity",
|
|
28
|
-
"Energy",
|
|
29
|
-
"EnergyVariance",
|
|
30
|
-
"SecondMomentOfEnergy",
|
|
31
|
-
]
|
|
@@ -1,64 +0,0 @@
|
|
|
1
|
-
import statistics
|
|
2
|
-
from typing import Any, Callable
|
|
3
|
-
import collections
|
|
4
|
-
from emu_base.base_classes.callback import AggregationType
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
_NUMERIC_TYPES = {int, float, complex}
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
def mean_aggregator(
|
|
11
|
-
values: list[Any],
|
|
12
|
-
) -> (
|
|
13
|
-
complex
|
|
14
|
-
| float
|
|
15
|
-
| list[complex]
|
|
16
|
-
| list[float]
|
|
17
|
-
| list[list[complex]]
|
|
18
|
-
| list[list[float]]
|
|
19
|
-
): # FIXME: support tuples?
|
|
20
|
-
if values == []:
|
|
21
|
-
raise ValueError("Cannot average 0 samples")
|
|
22
|
-
|
|
23
|
-
element_type = type(values[0])
|
|
24
|
-
|
|
25
|
-
if element_type in _NUMERIC_TYPES:
|
|
26
|
-
return statistics.fmean(values)
|
|
27
|
-
|
|
28
|
-
if element_type != list:
|
|
29
|
-
raise NotImplementedError("Cannot average this type of data")
|
|
30
|
-
|
|
31
|
-
if values[0] == []:
|
|
32
|
-
raise ValueError("Cannot average list of empty lists")
|
|
33
|
-
|
|
34
|
-
sub_element_type = type(values[0][0])
|
|
35
|
-
|
|
36
|
-
if sub_element_type in _NUMERIC_TYPES:
|
|
37
|
-
dim = len(values[0])
|
|
38
|
-
return [statistics.fmean(value[i] for value in values) for i in range(dim)]
|
|
39
|
-
|
|
40
|
-
if sub_element_type != list: # FIXME: ABC.Iterable? Collection? subclass?
|
|
41
|
-
raise ValueError(f"Cannot average list of lists of {sub_element_type}")
|
|
42
|
-
|
|
43
|
-
if values[0][0] == []:
|
|
44
|
-
raise ValueError("Cannot average list of matrices with no columns")
|
|
45
|
-
|
|
46
|
-
if (sub_sub_element_type := type(values[0][0][0])) not in _NUMERIC_TYPES:
|
|
47
|
-
raise ValueError(f"Cannot average list of matrices of {sub_sub_element_type}")
|
|
48
|
-
|
|
49
|
-
dim1 = len(values[0])
|
|
50
|
-
dim2 = len(values[0][0])
|
|
51
|
-
return [
|
|
52
|
-
[statistics.fmean(value[i][j] for value in values) for j in range(dim2)]
|
|
53
|
-
for i in range(dim1)
|
|
54
|
-
]
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
def bag_union_aggregator(values: list[collections.Counter]) -> collections.Counter:
|
|
58
|
-
return sum(values, start=collections.Counter())
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
aggregation_types_definitions: dict[AggregationType, Callable] = {
|
|
62
|
-
AggregationType.MEAN: mean_aggregator,
|
|
63
|
-
AggregationType.BAG_UNION: bag_union_aggregator,
|
|
64
|
-
}
|
emu_base/base_classes/backend.py
DELETED
|
@@ -1,48 +0,0 @@
|
|
|
1
|
-
import warnings
|
|
2
|
-
from abc import ABC, abstractmethod
|
|
3
|
-
|
|
4
|
-
from pulser import Sequence
|
|
5
|
-
|
|
6
|
-
from emu_base.base_classes.config import BackendConfig
|
|
7
|
-
from emu_base.base_classes.results import Results
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class Backend(ABC):
|
|
11
|
-
"""
|
|
12
|
-
Base class for different emulation backends.
|
|
13
|
-
Forces backends to implement a run method.
|
|
14
|
-
"""
|
|
15
|
-
|
|
16
|
-
@staticmethod
|
|
17
|
-
def validate_sequence(sequence: Sequence) -> None:
|
|
18
|
-
with warnings.catch_warnings():
|
|
19
|
-
warnings.simplefilter("ignore", category=DeprecationWarning)
|
|
20
|
-
|
|
21
|
-
if not isinstance(sequence, Sequence):
|
|
22
|
-
raise TypeError(
|
|
23
|
-
"The provided sequence has to be a valid " "pulser.Sequence instance."
|
|
24
|
-
)
|
|
25
|
-
if sequence.is_parametrized() or sequence.is_register_mappable():
|
|
26
|
-
raise ValueError(
|
|
27
|
-
"Not supported"
|
|
28
|
-
"The provided sequence needs to be built to be simulated. Call"
|
|
29
|
-
" `Sequence.build()` with the necessary parameters."
|
|
30
|
-
)
|
|
31
|
-
if not sequence._schedule:
|
|
32
|
-
raise ValueError("The provided sequence has no declared channels.")
|
|
33
|
-
if all(sequence._schedule[x][-1].tf == 0 for x in sequence.declared_channels):
|
|
34
|
-
raise ValueError("No instructions given for the channels in the sequence.")
|
|
35
|
-
|
|
36
|
-
@abstractmethod
|
|
37
|
-
def run(self, sequence: Sequence, config: BackendConfig) -> Results:
|
|
38
|
-
"""
|
|
39
|
-
Emulates the given sequence.
|
|
40
|
-
|
|
41
|
-
Args:
|
|
42
|
-
sequence: a Pulser sequence to simulate
|
|
43
|
-
config: the config. Should be of the appropriate type for the backend
|
|
44
|
-
|
|
45
|
-
Returns:
|
|
46
|
-
the simulation results
|
|
47
|
-
"""
|
|
48
|
-
pass
|
|
@@ -1,90 +0,0 @@
|
|
|
1
|
-
from abc import ABC, abstractmethod
|
|
2
|
-
from typing import Any, Optional, TYPE_CHECKING
|
|
3
|
-
from enum import Enum, auto
|
|
4
|
-
|
|
5
|
-
from emu_base.base_classes.config import BackendConfig
|
|
6
|
-
from emu_base.base_classes.operator import Operator
|
|
7
|
-
from emu_base.base_classes.state import State
|
|
8
|
-
|
|
9
|
-
if TYPE_CHECKING:
|
|
10
|
-
from emu_base.base_classes.results import Results
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class AggregationType(Enum):
|
|
14
|
-
"""
|
|
15
|
-
Defines how to combine multiple values from different simulation results.
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
MEAN = auto() # statistics.fmean or list/matrix-wise equivalent
|
|
19
|
-
BAG_UNION = auto() # Counter.__add__
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class Callback(ABC):
|
|
23
|
-
def __init__(self, evaluation_times: set[int]):
|
|
24
|
-
"""
|
|
25
|
-
The callback base class that can be subclassed to add new kinds of results
|
|
26
|
-
to the Results object returned by the Backend
|
|
27
|
-
|
|
28
|
-
Args:
|
|
29
|
-
evaluation_times: the times at which to add a result to Results
|
|
30
|
-
"""
|
|
31
|
-
self.evaluation_times = evaluation_times
|
|
32
|
-
|
|
33
|
-
def __call__(
|
|
34
|
-
self, config: BackendConfig, t: int, state: State, H: Operator, result: "Results"
|
|
35
|
-
) -> None:
|
|
36
|
-
"""
|
|
37
|
-
This function is called after each time step performed by the emulator.
|
|
38
|
-
By default it calls apply to compute a result and put it in `result`
|
|
39
|
-
if `t` in `self.evaluation_times`.
|
|
40
|
-
It can be overloaded to define any custom behaviour for a `Callback`.
|
|
41
|
-
|
|
42
|
-
Args:
|
|
43
|
-
config: the config object passed to the run method
|
|
44
|
-
t: the current time in ns
|
|
45
|
-
state: the current state
|
|
46
|
-
H: the Hamiltonian at this time
|
|
47
|
-
result: the results object
|
|
48
|
-
"""
|
|
49
|
-
if t in self.evaluation_times:
|
|
50
|
-
value_to_store = self.apply(config, t, state, H)
|
|
51
|
-
result.store(callback=self, time=t, value=value_to_store)
|
|
52
|
-
|
|
53
|
-
@property
|
|
54
|
-
@abstractmethod
|
|
55
|
-
def name(self) -> str:
|
|
56
|
-
"""
|
|
57
|
-
The name of the observable, can be used to index into the Results object.
|
|
58
|
-
Some Callbacks might have multiple instances, such as a callback to compute
|
|
59
|
-
a fidelity on some given state. In that case, this method could make sure
|
|
60
|
-
each instance has a unique name.
|
|
61
|
-
|
|
62
|
-
Returns:
|
|
63
|
-
the name of the callback
|
|
64
|
-
"""
|
|
65
|
-
pass
|
|
66
|
-
|
|
67
|
-
@abstractmethod
|
|
68
|
-
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
69
|
-
"""
|
|
70
|
-
This method must be implemented by subclasses. The result of this method
|
|
71
|
-
gets put in the Results object.
|
|
72
|
-
|
|
73
|
-
Args:
|
|
74
|
-
config: the config object passed to the run method
|
|
75
|
-
t: the current time in ns
|
|
76
|
-
state: the current state
|
|
77
|
-
H: the Hamiltonian at this time
|
|
78
|
-
|
|
79
|
-
Returns:
|
|
80
|
-
the result to put in Results
|
|
81
|
-
"""
|
|
82
|
-
pass
|
|
83
|
-
|
|
84
|
-
@property
|
|
85
|
-
def default_aggregation_type(self) -> Optional[AggregationType]:
|
|
86
|
-
"""
|
|
87
|
-
Defines how to combine by default multiple values from different simulation results.
|
|
88
|
-
None means no default, therefore aggregator function is always user-provided.
|
|
89
|
-
"""
|
|
90
|
-
return None
|
emu_base/base_classes/config.py
DELETED
|
@@ -1,106 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
from pulser.noise_model import NoiseModel
|
|
3
|
-
import logging
|
|
4
|
-
import sys
|
|
5
|
-
import pathlib
|
|
6
|
-
from typing import TYPE_CHECKING
|
|
7
|
-
import torch
|
|
8
|
-
|
|
9
|
-
if TYPE_CHECKING:
|
|
10
|
-
from emu_base.base_classes.callback import Callback
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class BackendConfig:
|
|
14
|
-
"""The base backend configuration.
|
|
15
|
-
|
|
16
|
-
Args:
|
|
17
|
-
observables: a list of callbacks to compute observables
|
|
18
|
-
with_modulation: if True, run the sequence with hardware modulation
|
|
19
|
-
noise_model: The pulser.NoiseModel to use in the simulation.
|
|
20
|
-
interaction_matrix: When specified, override the interaction terms in the Hamiltonian.
|
|
21
|
-
This corresponds to the $U_{ij}$ terms in the documentation. Must be symmetric.
|
|
22
|
-
interaction_cutoff: set interaction coefficients smaller than this to 0.
|
|
23
|
-
This can improve the memory profile of the application for some backends.
|
|
24
|
-
log_level: The output verbosity. Should be one of the constants from logging.
|
|
25
|
-
log_file: a path to a file where to store the log, instead of printing to stdout
|
|
26
|
-
|
|
27
|
-
Examples:
|
|
28
|
-
>>> observables = [BitStrings(400, 100)] #compute 100 bitstrings at 400ns
|
|
29
|
-
>>> noise_model = pulser.noise_model.NoiseModel()
|
|
30
|
-
>>> interaction_matrix = [[1 for _ in range(nqubits)] for _ in range(nqubits)]
|
|
31
|
-
>>> interaction_cutoff = 2.0 #this will turn off all the above interactions again
|
|
32
|
-
>>> log_level = logging.warn
|
|
33
|
-
"""
|
|
34
|
-
|
|
35
|
-
def __init__(
|
|
36
|
-
self,
|
|
37
|
-
*,
|
|
38
|
-
observables: list[Callback] | None = None,
|
|
39
|
-
with_modulation: bool = False,
|
|
40
|
-
noise_model: NoiseModel | None = None,
|
|
41
|
-
interaction_matrix: list[list[float]] | None = None,
|
|
42
|
-
interaction_cutoff: float = 0.0,
|
|
43
|
-
log_level: int = logging.INFO,
|
|
44
|
-
log_file: pathlib.Path | None = None,
|
|
45
|
-
):
|
|
46
|
-
if observables is None:
|
|
47
|
-
observables = []
|
|
48
|
-
self.callbacks = (
|
|
49
|
-
observables # we can add other types of callbacks, and just stack them
|
|
50
|
-
)
|
|
51
|
-
self.with_modulation = with_modulation
|
|
52
|
-
self.noise_model = noise_model
|
|
53
|
-
|
|
54
|
-
if interaction_matrix is not None and not (
|
|
55
|
-
isinstance(interaction_matrix, list)
|
|
56
|
-
and isinstance(interaction_matrix[0], list)
|
|
57
|
-
and isinstance(interaction_matrix[0][0], float)
|
|
58
|
-
):
|
|
59
|
-
raise ValueError(
|
|
60
|
-
"Interaction matrix must be provided as a Python list of lists of floats"
|
|
61
|
-
)
|
|
62
|
-
|
|
63
|
-
if interaction_matrix is not None:
|
|
64
|
-
int_mat = torch.tensor(interaction_matrix)
|
|
65
|
-
tol = 1e-10
|
|
66
|
-
if not (
|
|
67
|
-
int_mat.numel() != 0
|
|
68
|
-
and torch.all(torch.isreal(int_mat))
|
|
69
|
-
and int_mat.dim() == 2
|
|
70
|
-
and int_mat.shape[0] == int_mat.shape[1]
|
|
71
|
-
and torch.allclose(int_mat, int_mat.T, atol=tol)
|
|
72
|
-
and torch.norm(torch.diag(int_mat)) < tol
|
|
73
|
-
):
|
|
74
|
-
raise ValueError("Interaction matrix is not symmetric and zero diag")
|
|
75
|
-
|
|
76
|
-
self.interaction_matrix = interaction_matrix
|
|
77
|
-
self.interaction_cutoff = interaction_cutoff
|
|
78
|
-
self.logger = logging.getLogger("global_logger")
|
|
79
|
-
self.log_file = log_file
|
|
80
|
-
self.log_level = log_level
|
|
81
|
-
|
|
82
|
-
self.init_logging()
|
|
83
|
-
|
|
84
|
-
if noise_model is not None and (
|
|
85
|
-
noise_model.runs != 1
|
|
86
|
-
or noise_model.samples_per_run != 1
|
|
87
|
-
or noise_model.runs is not None
|
|
88
|
-
or noise_model.samples_per_run is not None
|
|
89
|
-
):
|
|
90
|
-
self.logger.warning(
|
|
91
|
-
"Warning: The runs and samples_per_run values of the NoiseModel are ignored!"
|
|
92
|
-
)
|
|
93
|
-
|
|
94
|
-
def init_logging(self) -> None:
|
|
95
|
-
if self.log_file is None:
|
|
96
|
-
logging.basicConfig(
|
|
97
|
-
level=self.log_level, format="%(message)s", stream=sys.stdout, force=True
|
|
98
|
-
) # default to stream = sys.stderr
|
|
99
|
-
else:
|
|
100
|
-
logging.basicConfig(
|
|
101
|
-
level=self.log_level,
|
|
102
|
-
format="%(message)s",
|
|
103
|
-
filename=str(self.log_file),
|
|
104
|
-
filemode="w",
|
|
105
|
-
force=True,
|
|
106
|
-
)
|
|
@@ -1,300 +0,0 @@
|
|
|
1
|
-
from copy import deepcopy
|
|
2
|
-
from typing import Any
|
|
3
|
-
|
|
4
|
-
from emu_base.base_classes.callback import Callback, AggregationType
|
|
5
|
-
from emu_base.base_classes.config import BackendConfig
|
|
6
|
-
from emu_base.base_classes.operator import Operator
|
|
7
|
-
from emu_base.base_classes.state import State
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class StateResult(Callback):
|
|
11
|
-
"""
|
|
12
|
-
Store the quantum state in whatever format the backend provides
|
|
13
|
-
|
|
14
|
-
Args:
|
|
15
|
-
evaluation_times: the times at which to store the state
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
def __init__(self, evaluation_times: set[int]):
|
|
19
|
-
super().__init__(evaluation_times)
|
|
20
|
-
|
|
21
|
-
name = "state"
|
|
22
|
-
|
|
23
|
-
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
24
|
-
return deepcopy(state)
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
class BitStrings(Callback):
|
|
28
|
-
"""
|
|
29
|
-
Store bitstrings sampled from the current state. Error rates are taken from the config
|
|
30
|
-
passed to the run method of the backend. The bitstrings are stored as a Counter[str].
|
|
31
|
-
|
|
32
|
-
Args:
|
|
33
|
-
evaluation_times: the times at which to sample bitstrings
|
|
34
|
-
num_shots: how many bitstrings to sample each time this observable is computed
|
|
35
|
-
"""
|
|
36
|
-
|
|
37
|
-
def __init__(self, evaluation_times: set[int], num_shots: int = 1000):
|
|
38
|
-
super().__init__(evaluation_times)
|
|
39
|
-
self.num_shots = num_shots
|
|
40
|
-
|
|
41
|
-
name = "bitstrings"
|
|
42
|
-
|
|
43
|
-
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
44
|
-
p_false_pos = (
|
|
45
|
-
0.0 if config.noise_model is None else config.noise_model.p_false_pos
|
|
46
|
-
)
|
|
47
|
-
p_false_neg = (
|
|
48
|
-
0.0 if config.noise_model is None else config.noise_model.p_false_neg
|
|
49
|
-
)
|
|
50
|
-
|
|
51
|
-
return state.sample(self.num_shots, p_false_pos, p_false_neg)
|
|
52
|
-
|
|
53
|
-
default_aggregation_type = AggregationType.BAG_UNION
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
_fidelity_counter = -1
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
class Fidelity(Callback):
|
|
60
|
-
"""
|
|
61
|
-
Store $<ψ|φ(t)>$ for the given state $|ψ>$,
|
|
62
|
-
and the state $|φ(t)>$ obtained by time evolution.
|
|
63
|
-
|
|
64
|
-
Args:
|
|
65
|
-
evaluation_times: the times at which to compute the fidelity
|
|
66
|
-
state: the state |ψ>. Note that this must be of appropriate type for the backend
|
|
67
|
-
|
|
68
|
-
Examples:
|
|
69
|
-
>>> state = State.from_state_string(...) #see State API
|
|
70
|
-
>>> fidelity = Fidelity([400], state) #measure fidelity on state at t=400ns
|
|
71
|
-
"""
|
|
72
|
-
|
|
73
|
-
def __init__(self, evaluation_times: set[int], state: State):
|
|
74
|
-
super().__init__(evaluation_times)
|
|
75
|
-
global _fidelity_counter
|
|
76
|
-
_fidelity_counter += 1
|
|
77
|
-
self.index = _fidelity_counter
|
|
78
|
-
self.state = state
|
|
79
|
-
|
|
80
|
-
@property
|
|
81
|
-
def name(self) -> str:
|
|
82
|
-
return f"fidelity_{self.index}"
|
|
83
|
-
|
|
84
|
-
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
85
|
-
return self.state.inner(state)
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
_expectation_counter = -1
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
class Expectation(Callback):
|
|
92
|
-
"""
|
|
93
|
-
Store the expectation of the given operator on the current state
|
|
94
|
-
(i.e. $\\langle φ(t)|\\mathrm{operator}|φ(t)\\rangle$).
|
|
95
|
-
|
|
96
|
-
Args:
|
|
97
|
-
evaluation_times: the times at which to compute the expectation
|
|
98
|
-
operator: the operator to measure. Must be of appropriate type for the backend.
|
|
99
|
-
|
|
100
|
-
Examples:
|
|
101
|
-
>>> op = Operator.from_operator_string(...) #see Operator API
|
|
102
|
-
>>> expectation = Expectation([400], op) #measure the expecation of op at t=400ns
|
|
103
|
-
"""
|
|
104
|
-
|
|
105
|
-
def __init__(self, evaluation_times: set[int], operator: Operator):
|
|
106
|
-
super().__init__(evaluation_times)
|
|
107
|
-
global _expectation_counter
|
|
108
|
-
_expectation_counter += 1
|
|
109
|
-
self.index = _expectation_counter
|
|
110
|
-
self.operator = operator
|
|
111
|
-
|
|
112
|
-
@property
|
|
113
|
-
def name(self) -> str:
|
|
114
|
-
return f"expectation_{self.index}"
|
|
115
|
-
|
|
116
|
-
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
117
|
-
return self.operator.expect(state)
|
|
118
|
-
|
|
119
|
-
default_aggregation_type = AggregationType.MEAN
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
class CorrelationMatrix(Callback):
|
|
123
|
-
"""
|
|
124
|
-
Store the correlation matrix for the current state.
|
|
125
|
-
Requires specification of the basis used in the emulation
|
|
126
|
-
https://pulser.readthedocs.io/en/stable/conventions.html
|
|
127
|
-
It currently supports
|
|
128
|
-
- the rydberg basis ('r','g')
|
|
129
|
-
- the xy basis ('0', '1')
|
|
130
|
-
and returns
|
|
131
|
-
|
|
132
|
-
`[[<φ(t)|n_i n_j|φ(t)> for j in qubits] for i in qubits]`
|
|
133
|
-
|
|
134
|
-
n_i being the operator that projects qubit i onto the state that measures as 1.
|
|
135
|
-
The diagonal of this matrix is the QubitDensity. The correlation matrix
|
|
136
|
-
is stored as a list of lists.
|
|
137
|
-
|
|
138
|
-
Args:
|
|
139
|
-
evaluation_times: the times at which to compute the correlation matrix
|
|
140
|
-
basis: the basis used by the sequence
|
|
141
|
-
nqubits: the number of qubits in the Register
|
|
142
|
-
|
|
143
|
-
Notes:
|
|
144
|
-
See the API for `Operator.from_operator_string` for an example of what to do with
|
|
145
|
-
basis and nqubits.
|
|
146
|
-
"""
|
|
147
|
-
|
|
148
|
-
def __init__(self, evaluation_times: set[int], basis: tuple[str, ...], nqubits: int):
|
|
149
|
-
super().__init__(evaluation_times)
|
|
150
|
-
self.operators: list[list[Operator]] | None = None
|
|
151
|
-
self.basis = set(basis)
|
|
152
|
-
if self.basis == {"r", "g"}:
|
|
153
|
-
self.op_string = "rr"
|
|
154
|
-
elif self.basis == {"0", "1"}:
|
|
155
|
-
self.op_string = "11"
|
|
156
|
-
else:
|
|
157
|
-
raise ValueError("Unsupported basis provided")
|
|
158
|
-
self.nqubits = nqubits
|
|
159
|
-
|
|
160
|
-
name = "correlation_matrix"
|
|
161
|
-
|
|
162
|
-
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
163
|
-
if hasattr(state, "get_correlation_matrix") and callable(
|
|
164
|
-
state.get_correlation_matrix
|
|
165
|
-
):
|
|
166
|
-
return state.get_correlation_matrix()
|
|
167
|
-
|
|
168
|
-
if self.operators is None or not isinstance(self.operators[0], type(H)):
|
|
169
|
-
self.operators = [
|
|
170
|
-
[
|
|
171
|
-
H.from_operator_string(
|
|
172
|
-
self.basis,
|
|
173
|
-
self.nqubits,
|
|
174
|
-
[(1.0, [({self.op_string: 1.0}, list({i, j}))])],
|
|
175
|
-
)
|
|
176
|
-
for j in range(self.nqubits)
|
|
177
|
-
]
|
|
178
|
-
for i in range(self.nqubits)
|
|
179
|
-
]
|
|
180
|
-
return [[op.expect(state).real for op in ops] for ops in self.operators]
|
|
181
|
-
|
|
182
|
-
default_aggregation_type = AggregationType.MEAN
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
class QubitDensity(Callback):
|
|
186
|
-
"""
|
|
187
|
-
Requires specification of the basis used in the emulation
|
|
188
|
-
https://pulser.readthedocs.io/en/stable/conventions.html
|
|
189
|
-
It currently supports
|
|
190
|
-
- the rydberg basis ('r','g')
|
|
191
|
-
- the xy basis ('0', '1')
|
|
192
|
-
and returns
|
|
193
|
-
|
|
194
|
-
`[<φ(t)|n_i|φ(t)> for i in qubits]`
|
|
195
|
-
|
|
196
|
-
n_i being the operator that projects qubit i onto the state that measures as 1.
|
|
197
|
-
The qubit density is stored as a list.
|
|
198
|
-
|
|
199
|
-
Args:
|
|
200
|
-
evaluation_times: the times at which to compute the density
|
|
201
|
-
basis: the basis used by the sequence
|
|
202
|
-
nqubits: the number of qubits in the Register
|
|
203
|
-
|
|
204
|
-
Notes:
|
|
205
|
-
See the API for `State.from_state_string` for an example of what to do with
|
|
206
|
-
basis and nqubits.
|
|
207
|
-
"""
|
|
208
|
-
|
|
209
|
-
def __init__(self, evaluation_times: set[int], basis: tuple[str, ...], nqubits: int):
|
|
210
|
-
super().__init__(evaluation_times)
|
|
211
|
-
self.operators: list[Operator] | None = None
|
|
212
|
-
self.basis = set(basis)
|
|
213
|
-
if self.basis == {"r", "g"}:
|
|
214
|
-
self.op_string = "rr"
|
|
215
|
-
elif self.basis == {"0", "1"}:
|
|
216
|
-
self.op_string = "11"
|
|
217
|
-
else:
|
|
218
|
-
raise ValueError("Unsupported basis provided")
|
|
219
|
-
self.nqubits = nqubits
|
|
220
|
-
|
|
221
|
-
name = "qubit_density"
|
|
222
|
-
|
|
223
|
-
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
224
|
-
if self.operators is None or not isinstance(self.operators[0], type(H)):
|
|
225
|
-
self.operators = [
|
|
226
|
-
H.from_operator_string(
|
|
227
|
-
self.basis, self.nqubits, [(1.0, [({self.op_string: 1.0}, [i])])]
|
|
228
|
-
)
|
|
229
|
-
for i in range(self.nqubits)
|
|
230
|
-
]
|
|
231
|
-
return [op.expect(state).real for op in self.operators]
|
|
232
|
-
|
|
233
|
-
default_aggregation_type = AggregationType.MEAN
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
class Energy(Callback):
|
|
237
|
-
"""
|
|
238
|
-
Store the expectation value of the current Hamiltonian
|
|
239
|
-
(i.e. $\\langle φ(t)|H(t)|φ(t) \\rangle$)
|
|
240
|
-
|
|
241
|
-
Args:
|
|
242
|
-
evaluation_times: the times at which to compute the expectation
|
|
243
|
-
"""
|
|
244
|
-
|
|
245
|
-
def __init__(self, evaluation_times: set[int]):
|
|
246
|
-
super().__init__(evaluation_times)
|
|
247
|
-
|
|
248
|
-
name = "energy"
|
|
249
|
-
|
|
250
|
-
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
251
|
-
return H.expect(state).real
|
|
252
|
-
|
|
253
|
-
default_aggregation_type = AggregationType.MEAN
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
class EnergyVariance(Callback):
|
|
257
|
-
"""
|
|
258
|
-
Store the variance of the current Hamiltonian
|
|
259
|
-
(i.e. $\\langle φ(t)|H(t)^2|φ(t)\\rangle - \\langle φ(t)|H(t)|φ(t)\\rangle^2$)
|
|
260
|
-
|
|
261
|
-
Args:
|
|
262
|
-
evaluation_times: the times at which to compute the variance
|
|
263
|
-
"""
|
|
264
|
-
|
|
265
|
-
def __init__(self, evaluation_times: set[int]):
|
|
266
|
-
super().__init__(evaluation_times)
|
|
267
|
-
|
|
268
|
-
name = "energy_variance"
|
|
269
|
-
|
|
270
|
-
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
271
|
-
h_squared = H @ H
|
|
272
|
-
return h_squared.expect(state).real - H.expect(state).real ** 2
|
|
273
|
-
|
|
274
|
-
# Explicitely setting this to None out of safety: in the case of MonteCarlo,
|
|
275
|
-
# the aggregated variance cannot be computed from this callback.
|
|
276
|
-
# Instead, one first need to average Energy and SecondMomentOfEnergy,
|
|
277
|
-
# and then compute the variance with the formula:
|
|
278
|
-
# AggregatedEnergyVariance = AveragedSecondMomentOfEnergy - AveragedEnergy**2
|
|
279
|
-
default_aggregation_type = None
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
class SecondMomentOfEnergy(Callback):
|
|
283
|
-
"""
|
|
284
|
-
Store the expectation value $\\langle φ(t)|H(t)^2|φ(t)\\rangle$.
|
|
285
|
-
Useful for computing the variance when averaging over many executions of the program.
|
|
286
|
-
|
|
287
|
-
Args:
|
|
288
|
-
evaluation_times: the times at which to compute the variance
|
|
289
|
-
"""
|
|
290
|
-
|
|
291
|
-
def __init__(self, evaluation_times: set[int]):
|
|
292
|
-
super().__init__(evaluation_times)
|
|
293
|
-
|
|
294
|
-
name = "second_moment_of_energy"
|
|
295
|
-
|
|
296
|
-
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
297
|
-
h_squared = H @ H
|
|
298
|
-
return h_squared.expect(state).real
|
|
299
|
-
|
|
300
|
-
default_aggregation_type = AggregationType.MEAN
|
|
@@ -1,126 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from abc import ABC, abstractmethod
|
|
4
|
-
from typing import Any, Iterable
|
|
5
|
-
|
|
6
|
-
from emu_base.base_classes.state import State
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
QuditOp = dict[str, complex] # single qubit operator
|
|
10
|
-
TensorOp = list[tuple[QuditOp, list[int]]] # QuditOp applied to list of qubits
|
|
11
|
-
FullOp = list[tuple[complex, TensorOp]] # weighted sum of TensorOp
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class Operator(ABC):
|
|
15
|
-
@abstractmethod
|
|
16
|
-
def __mul__(self, other: State) -> State:
|
|
17
|
-
"""
|
|
18
|
-
Apply the operator to a state
|
|
19
|
-
|
|
20
|
-
Args:
|
|
21
|
-
other: the state to apply this operator to
|
|
22
|
-
|
|
23
|
-
Returns:
|
|
24
|
-
the resulting state
|
|
25
|
-
"""
|
|
26
|
-
pass
|
|
27
|
-
|
|
28
|
-
@abstractmethod
|
|
29
|
-
def __add__(self, other: Operator) -> Operator:
|
|
30
|
-
"""
|
|
31
|
-
Computes the sum of two operators.
|
|
32
|
-
|
|
33
|
-
Args:
|
|
34
|
-
other: the other operator
|
|
35
|
-
|
|
36
|
-
Returns:
|
|
37
|
-
the summed operator
|
|
38
|
-
"""
|
|
39
|
-
pass
|
|
40
|
-
|
|
41
|
-
@abstractmethod
|
|
42
|
-
def expect(self, state: State) -> float | complex:
|
|
43
|
-
"""
|
|
44
|
-
Compute the expectation value of self on the given state.
|
|
45
|
-
|
|
46
|
-
Args:
|
|
47
|
-
state: the state with which to compute
|
|
48
|
-
|
|
49
|
-
Returns:
|
|
50
|
-
the expectation
|
|
51
|
-
"""
|
|
52
|
-
|
|
53
|
-
@staticmethod
|
|
54
|
-
@abstractmethod
|
|
55
|
-
def from_operator_string(
|
|
56
|
-
basis: Iterable[str],
|
|
57
|
-
nqubits: int,
|
|
58
|
-
operations: FullOp,
|
|
59
|
-
operators: dict[str, QuditOp] = {},
|
|
60
|
-
/,
|
|
61
|
-
**kwargs: Any,
|
|
62
|
-
) -> Operator:
|
|
63
|
-
"""
|
|
64
|
-
Create an operator in the backend-specific format from the
|
|
65
|
-
pulser abstract representation
|
|
66
|
-
<https://www.notion.so/pasqal/Abstract-State-and-Operator-Definition>
|
|
67
|
-
By default it supports strings 'ij', where i and j in basis,
|
|
68
|
-
to denote |i><j|, but additional symbols can be defined in operators
|
|
69
|
-
For a list of existing bases, see
|
|
70
|
-
<https://pulser.readthedocs.io/en/stable/conventions.html>
|
|
71
|
-
|
|
72
|
-
Args:
|
|
73
|
-
basis: the eigenstates in the basis to use
|
|
74
|
-
nqubits: how many qubits there are in the state
|
|
75
|
-
operations: which bitstrings make up the state with what weight
|
|
76
|
-
operators: additional symbols to be used in operations
|
|
77
|
-
|
|
78
|
-
Returns:
|
|
79
|
-
the operator in whatever format the backend provides.
|
|
80
|
-
|
|
81
|
-
Examples:
|
|
82
|
-
>>> basis = {"r", "g"} #rydberg basis
|
|
83
|
-
>>> nqubits = 3 #or whatever
|
|
84
|
-
>>> x = {"rg": 1.0, "gr": 1.0}
|
|
85
|
-
>>> z = {"gg": 1.0, "rr": -1.0}
|
|
86
|
-
>>> operators = {"X": x, "Z": z} #define X and Z as conveniences
|
|
87
|
-
>>>
|
|
88
|
-
>>> operations = [ # 4 X1X + 3 1Z1
|
|
89
|
-
>>> (
|
|
90
|
-
>>> 1.0,
|
|
91
|
-
>>> [
|
|
92
|
-
>>> ({"X": 2.0}, [0, 2]),
|
|
93
|
-
>>> ({"Z": 3.0}, [1]),
|
|
94
|
-
>>> ],
|
|
95
|
-
>>> )
|
|
96
|
-
>>> ]
|
|
97
|
-
>>> op = Operator.from_operator_string(basis, nqubits, operations, operators)
|
|
98
|
-
"""
|
|
99
|
-
pass
|
|
100
|
-
|
|
101
|
-
@abstractmethod
|
|
102
|
-
def __rmul__(self, scalar: complex) -> Operator:
|
|
103
|
-
"""
|
|
104
|
-
Scale the operator by a scale factor.
|
|
105
|
-
|
|
106
|
-
Args:
|
|
107
|
-
scalar: the scale factor
|
|
108
|
-
|
|
109
|
-
Returns:
|
|
110
|
-
the scaled operator
|
|
111
|
-
"""
|
|
112
|
-
pass
|
|
113
|
-
|
|
114
|
-
@abstractmethod
|
|
115
|
-
def __matmul__(self, other: Operator) -> Operator:
|
|
116
|
-
"""
|
|
117
|
-
Compose two operators. The ordering is that
|
|
118
|
-
self is applied after other.
|
|
119
|
-
|
|
120
|
-
Args:
|
|
121
|
-
other: the operator to compose with self
|
|
122
|
-
|
|
123
|
-
Returns:
|
|
124
|
-
the composed operator
|
|
125
|
-
"""
|
|
126
|
-
pass
|
emu_base/base_classes/results.py
DELETED
|
@@ -1,183 +0,0 @@
|
|
|
1
|
-
from dataclasses import dataclass, field
|
|
2
|
-
from typing import Any, Callable, Optional
|
|
3
|
-
from pathlib import Path
|
|
4
|
-
import json
|
|
5
|
-
import logging
|
|
6
|
-
import torch
|
|
7
|
-
|
|
8
|
-
from emu_base.base_classes.callback import Callback, AggregationType
|
|
9
|
-
from emu_base.base_classes.aggregators import aggregation_types_definitions
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class ResultsEncoder(json.JSONEncoder):
|
|
13
|
-
def default(self, obj: Any) -> Any:
|
|
14
|
-
if isinstance(obj, torch.Tensor):
|
|
15
|
-
return obj.tolist()
|
|
16
|
-
return super().default(obj)
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
@dataclass
|
|
20
|
-
class Results:
|
|
21
|
-
"""
|
|
22
|
-
This class contains emulation results. Since the results written by
|
|
23
|
-
an emulator are defined through callbacks, the contents of this class
|
|
24
|
-
are not known a-priori.
|
|
25
|
-
"""
|
|
26
|
-
|
|
27
|
-
statistics: Any = None # Backend-specific data
|
|
28
|
-
|
|
29
|
-
_results: dict[str, dict[int, Any]] = field(default_factory=dict)
|
|
30
|
-
_default_aggregation_types: dict[str, Optional[AggregationType]] = field(
|
|
31
|
-
default_factory=dict
|
|
32
|
-
)
|
|
33
|
-
|
|
34
|
-
@classmethod
|
|
35
|
-
def aggregate(
|
|
36
|
-
cls,
|
|
37
|
-
results_to_aggregate: list["Results"],
|
|
38
|
-
**aggregator_functions: Callable[[Any], Any],
|
|
39
|
-
) -> "Results":
|
|
40
|
-
if len(results_to_aggregate) == 0:
|
|
41
|
-
raise ValueError("no results to aggregate")
|
|
42
|
-
|
|
43
|
-
if len(results_to_aggregate) == 1:
|
|
44
|
-
return results_to_aggregate[0]
|
|
45
|
-
|
|
46
|
-
stored_callbacks = set(results_to_aggregate[0].get_result_names())
|
|
47
|
-
|
|
48
|
-
if not all(
|
|
49
|
-
set(results.get_result_names()) == stored_callbacks
|
|
50
|
-
for results in results_to_aggregate
|
|
51
|
-
):
|
|
52
|
-
raise ValueError(
|
|
53
|
-
"Monte-Carlo results seem to provide from incompatible simulations: "
|
|
54
|
-
"they do not all contain the same observables"
|
|
55
|
-
)
|
|
56
|
-
|
|
57
|
-
aggregated: Results = cls()
|
|
58
|
-
|
|
59
|
-
for stored_callback in stored_callbacks:
|
|
60
|
-
aggregation_type = aggregator_functions.get(
|
|
61
|
-
stored_callback,
|
|
62
|
-
results_to_aggregate[0].get_aggregation_type(stored_callback),
|
|
63
|
-
)
|
|
64
|
-
|
|
65
|
-
if aggregation_type is None:
|
|
66
|
-
logging.getLogger("global_logger").warning(
|
|
67
|
-
f"Skipping aggregation of `{stored_callback}`"
|
|
68
|
-
)
|
|
69
|
-
continue
|
|
70
|
-
|
|
71
|
-
aggregation_function: Any = (
|
|
72
|
-
aggregation_type
|
|
73
|
-
if callable(aggregation_type)
|
|
74
|
-
else aggregation_types_definitions[aggregation_type]
|
|
75
|
-
)
|
|
76
|
-
|
|
77
|
-
evaluation_times = results_to_aggregate[0].get_result_times(stored_callback)
|
|
78
|
-
if not all(
|
|
79
|
-
results.get_result_times(stored_callback) == evaluation_times
|
|
80
|
-
for results in results_to_aggregate
|
|
81
|
-
):
|
|
82
|
-
raise ValueError(
|
|
83
|
-
"Monte-Carlo results seem to provide from incompatible simulations: "
|
|
84
|
-
"the callbacks are not stored at the same times"
|
|
85
|
-
)
|
|
86
|
-
|
|
87
|
-
aggregated._results[stored_callback] = {
|
|
88
|
-
t: aggregation_function(
|
|
89
|
-
[result[stored_callback, t] for result in results_to_aggregate]
|
|
90
|
-
)
|
|
91
|
-
for t in evaluation_times
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
return aggregated
|
|
95
|
-
|
|
96
|
-
def store(self, *, callback: Callback, time: Any, value: Any) -> None:
|
|
97
|
-
self._results.setdefault(callback.name, {})
|
|
98
|
-
|
|
99
|
-
if time in self._results[callback.name]:
|
|
100
|
-
raise ValueError(
|
|
101
|
-
f"A value is already stored for observable '{callback.name}' at time {time}"
|
|
102
|
-
)
|
|
103
|
-
|
|
104
|
-
self._results[callback.name][time] = value
|
|
105
|
-
self._default_aggregation_types[callback.name] = callback.default_aggregation_type
|
|
106
|
-
|
|
107
|
-
def __getitem__(self, key: Any) -> Any:
|
|
108
|
-
if isinstance(key, tuple):
|
|
109
|
-
# results["energy", t]
|
|
110
|
-
callback_name, time = key
|
|
111
|
-
|
|
112
|
-
if callback_name not in self._results:
|
|
113
|
-
raise ValueError(
|
|
114
|
-
f"No value for observable '{callback_name}' has been stored"
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
if time not in self._results[callback_name]:
|
|
118
|
-
raise ValueError(
|
|
119
|
-
f"No value stored at time {time} for observable '{callback_name}'"
|
|
120
|
-
)
|
|
121
|
-
|
|
122
|
-
return self._results[callback_name][time]
|
|
123
|
-
|
|
124
|
-
# results["energy"][t]
|
|
125
|
-
assert isinstance(key, str)
|
|
126
|
-
callback_name = key
|
|
127
|
-
if callback_name not in self._results:
|
|
128
|
-
raise ValueError(f"No value for observable '{callback_name}' has been stored")
|
|
129
|
-
|
|
130
|
-
return self._results[key]
|
|
131
|
-
|
|
132
|
-
def get_result_names(self) -> list[str]:
|
|
133
|
-
"""
|
|
134
|
-
get a list of results present in this object
|
|
135
|
-
|
|
136
|
-
Args:
|
|
137
|
-
|
|
138
|
-
Returns:
|
|
139
|
-
list of results by name
|
|
140
|
-
|
|
141
|
-
"""
|
|
142
|
-
return list(self._results.keys())
|
|
143
|
-
|
|
144
|
-
def get_result_times(self, name: str) -> list[int]:
|
|
145
|
-
"""
|
|
146
|
-
get a list of times for which the given result has been stored
|
|
147
|
-
|
|
148
|
-
Args:
|
|
149
|
-
name: name of the result to get times of
|
|
150
|
-
|
|
151
|
-
Returns:
|
|
152
|
-
list of times in ns
|
|
153
|
-
|
|
154
|
-
"""
|
|
155
|
-
return list(self._results[name].keys())
|
|
156
|
-
|
|
157
|
-
def get_result(self, name: str, time: int) -> Any:
|
|
158
|
-
"""
|
|
159
|
-
get the given result at the given time
|
|
160
|
-
|
|
161
|
-
Args:
|
|
162
|
-
name: name of the result to get
|
|
163
|
-
time: time in ns at which to get the result
|
|
164
|
-
|
|
165
|
-
Returns:
|
|
166
|
-
the result
|
|
167
|
-
|
|
168
|
-
"""
|
|
169
|
-
return self._results[name][time]
|
|
170
|
-
|
|
171
|
-
def get_aggregation_type(self, name: str) -> Optional[AggregationType]:
|
|
172
|
-
return self._default_aggregation_types[name]
|
|
173
|
-
|
|
174
|
-
def dump(self, file_path: Path) -> None:
|
|
175
|
-
with file_path.open("w") as file_handle:
|
|
176
|
-
json.dump(
|
|
177
|
-
{
|
|
178
|
-
"observables": self._results,
|
|
179
|
-
"statistics": self.statistics,
|
|
180
|
-
},
|
|
181
|
-
file_handle,
|
|
182
|
-
cls=ResultsEncoder,
|
|
183
|
-
)
|
emu_base/base_classes/state.py
DELETED
|
@@ -1,97 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
from typing import Any, Iterable
|
|
3
|
-
from abc import ABC, abstractmethod
|
|
4
|
-
from collections import Counter
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
class State(ABC):
|
|
8
|
-
"""
|
|
9
|
-
Base class enforcing an API for quantum states.
|
|
10
|
-
Each backend will implement its own type of state, and the
|
|
11
|
-
below methods.
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
@abstractmethod
|
|
15
|
-
def inner(self, other: State) -> float | complex:
|
|
16
|
-
"""
|
|
17
|
-
Compute the inner product between this state and other.
|
|
18
|
-
Note that self is the left state in the inner product,
|
|
19
|
-
so this function is linear in other, and anti-linear in self
|
|
20
|
-
|
|
21
|
-
Args:
|
|
22
|
-
other: the other state
|
|
23
|
-
|
|
24
|
-
Returns:
|
|
25
|
-
inner product
|
|
26
|
-
"""
|
|
27
|
-
pass
|
|
28
|
-
|
|
29
|
-
@abstractmethod
|
|
30
|
-
def sample(
|
|
31
|
-
self, num_shots: int, p_false_pos: float = 0.0, p_false_neg: float = 0.0
|
|
32
|
-
) -> Counter[str]:
|
|
33
|
-
"""
|
|
34
|
-
Sample bitstrings from the state, taking into account error rates.
|
|
35
|
-
|
|
36
|
-
Args:
|
|
37
|
-
num_shots: how many bitstrings to sample
|
|
38
|
-
p_false_pos: the rate at which a 0 is read as a 1
|
|
39
|
-
p_false_neg: the rate at which a 1 is read as a 0
|
|
40
|
-
|
|
41
|
-
Returns:
|
|
42
|
-
the measured bitstrings, by count
|
|
43
|
-
"""
|
|
44
|
-
pass
|
|
45
|
-
|
|
46
|
-
@abstractmethod
|
|
47
|
-
def __add__(self, other: State) -> State:
|
|
48
|
-
"""
|
|
49
|
-
Computes the sum of two states.
|
|
50
|
-
|
|
51
|
-
Args:
|
|
52
|
-
other: the other state
|
|
53
|
-
|
|
54
|
-
Returns:
|
|
55
|
-
the summed state
|
|
56
|
-
"""
|
|
57
|
-
pass
|
|
58
|
-
|
|
59
|
-
@abstractmethod
|
|
60
|
-
def __rmul__(self, scalar: complex) -> State:
|
|
61
|
-
"""
|
|
62
|
-
Scale the state by a scale factor.
|
|
63
|
-
|
|
64
|
-
Args:
|
|
65
|
-
scalar: the scale factor
|
|
66
|
-
|
|
67
|
-
Returns:
|
|
68
|
-
the scaled state
|
|
69
|
-
"""
|
|
70
|
-
pass
|
|
71
|
-
|
|
72
|
-
@staticmethod
|
|
73
|
-
@abstractmethod
|
|
74
|
-
def from_state_string(
|
|
75
|
-
*, basis: Iterable[str], nqubits: int, strings: dict[str, complex], **kwargs: Any
|
|
76
|
-
) -> State:
|
|
77
|
-
"""
|
|
78
|
-
Construct a state from the pulser abstract representation
|
|
79
|
-
<https://www.notion.so/pasqal/Abstract-State-and-Operator-Definition>
|
|
80
|
-
For a list of existing bases, see
|
|
81
|
-
<https://pulser.readthedocs.io/en/stable/conventions.html>
|
|
82
|
-
|
|
83
|
-
Args:
|
|
84
|
-
basis: A tuple containing the basis states.
|
|
85
|
-
nqubits: the number of qubits.
|
|
86
|
-
strings: A dictionary mapping state strings to complex or floats amplitudes
|
|
87
|
-
|
|
88
|
-
Returns:
|
|
89
|
-
the state in whatever format the backend provides.
|
|
90
|
-
|
|
91
|
-
Examples:
|
|
92
|
-
>>> afm_string_state = {"rrr": 1.0 / math.sqrt(2), "ggg": 1.0 / math.sqrt(2)}
|
|
93
|
-
>>> afm_state = State.from_state_string(
|
|
94
|
-
>>> basis=("r", "g"), nqubits=3, strings=afm_string_state
|
|
95
|
-
>>> )
|
|
96
|
-
"""
|
|
97
|
-
pass
|
emu_base-1.2.7.dist-info/RECORD
DELETED
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
emu_base/__init__.py,sha256=i2Gv8rIusAD2hXepDFdFTfC5FQOOpNXtB0F1xoSd6QQ,1186
|
|
2
|
-
emu_base/constants.py,sha256=41LYkKLUCz-oxPbd-j7nUDZuhIbUrnez6prT0uR0jcE,56
|
|
3
|
-
emu_base/lindblad_operators.py,sha256=Nsl1YrWb8IDM9Z50ucy2Ed44p_IRETnlbr6qaqAgV50,1629
|
|
4
|
-
emu_base/pulser_adapter.py,sha256=SIkw3Mrob4RPTt4QNYzC8CUJ0tj-sTj-t-fEKEyRTtU,10858
|
|
5
|
-
emu_base/utils.py,sha256=RM8O0qfPAJfcdqqAojwEEKV7I3ZfVDklnTisTGhUg5k,233
|
|
6
|
-
emu_base/base_classes/__init__.py,sha256=Su6fHtjCyg0fw-7y7e7nbMfDASppNRQs8iGaAOkO3c4,570
|
|
7
|
-
emu_base/base_classes/aggregators.py,sha256=BDzFq12q36p12TXp2qv3g7a9cHnXbT1hhMZJLVojkDU,1862
|
|
8
|
-
emu_base/base_classes/backend.py,sha256=7tnwb9MnRbwRN1_JTqliYftjqExuOE-Rrwz9AU2Pc4c,1645
|
|
9
|
-
emu_base/base_classes/callback.py,sha256=JXah_ZDNM8iyPWy7IOwW481qRFyqVvlSM-0OkjBzV0A,3055
|
|
10
|
-
emu_base/base_classes/config.py,sha256=oLS2jwmxqwMbLKIPdqohK-KPIcXdtpG3sRr_Y12poNQ,4105
|
|
11
|
-
emu_base/base_classes/default_callbacks.py,sha256=F44kkuwWdVcvMGZ9vJ2q7ug-_P8IQyJv-SVxSVWHW_w,9940
|
|
12
|
-
emu_base/base_classes/operator.py,sha256=MJjuDUTwJLbaSJzSNCKDWGvmGCGAEIEWISLoPSSzNsU,3501
|
|
13
|
-
emu_base/base_classes/results.py,sha256=w4js7gThb49sNjEEDfEf0tYRwK004GrafWPeEOvyeCg,5810
|
|
14
|
-
emu_base/base_classes/state.py,sha256=7iIyZmBqqJ6G4SyYZ3kyylWjAqiYIx0aW5B0T74EPZI,2707
|
|
15
|
-
emu_base/math/__init__.py,sha256=6BbIytYV5uC-e5jLMtIErkcUl_PvfSNnhmVFY9Il8uQ,97
|
|
16
|
-
emu_base/math/brents_root_finding.py,sha256=AVx6L1Il6rpPJWrLJ7cn6oNmJyZOPRgEaaZaubC9lsU,3711
|
|
17
|
-
emu_base/math/krylov_exp.py,sha256=UCFNeq-j2ukgBsOPC9_Jiv1aqpy88SrslDLiCxIGBwk,3840
|
|
18
|
-
emu_base-1.2.7.dist-info/METADATA,sha256=JpfSGDOQtIu2hwwoHXgBNgf8ZmIkUh0MUDiwJYPZL_Q,3522
|
|
19
|
-
emu_base-1.2.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
20
|
-
emu_base-1.2.7.dist-info/RECORD,,
|
|
File without changes
|