emu-base 1.2.6__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emu_base/__init__.py +3 -33
- emu_base/aggregators.py +158 -0
- emu_base/lindblad_operators.py +4 -1
- emu_base/pulser_adapter.py +36 -23
- {emu_base-1.2.6.dist-info → emu_base-2.0.0.dist-info}/METADATA +2 -2
- emu_base-2.0.0.dist-info/RECORD +12 -0
- emu_base/base_classes/__init__.py +0 -31
- emu_base/base_classes/aggregators.py +0 -59
- emu_base/base_classes/backend.py +0 -48
- emu_base/base_classes/callback.py +0 -90
- emu_base/base_classes/config.py +0 -106
- emu_base/base_classes/default_callbacks.py +0 -300
- emu_base/base_classes/operator.py +0 -126
- emu_base/base_classes/results.py +0 -174
- emu_base/base_classes/state.py +0 -97
- emu_base-1.2.6.dist-info/RECORD +0 -20
- {emu_base-1.2.6.dist-info → emu_base-2.0.0.dist-info}/WHEEL +0 -0
emu_base/__init__.py
CHANGED
|
@@ -1,43 +1,13 @@
|
|
|
1
|
-
from .base_classes.results import Results
|
|
2
|
-
from .base_classes.callback import Callback, AggregationType
|
|
3
|
-
from .base_classes.config import BackendConfig
|
|
4
|
-
from .base_classes.operator import Operator
|
|
5
|
-
from .base_classes.state import State
|
|
6
|
-
from .base_classes.backend import Backend
|
|
7
|
-
from .base_classes.default_callbacks import (
|
|
8
|
-
BitStrings,
|
|
9
|
-
CorrelationMatrix,
|
|
10
|
-
Energy,
|
|
11
|
-
EnergyVariance,
|
|
12
|
-
Expectation,
|
|
13
|
-
Fidelity,
|
|
14
|
-
QubitDensity,
|
|
15
|
-
StateResult,
|
|
16
|
-
SecondMomentOfEnergy,
|
|
17
|
-
)
|
|
18
1
|
from .constants import DEVICE_COUNT
|
|
19
2
|
from .pulser_adapter import PulserData, HamiltonianType
|
|
20
3
|
from .math.brents_root_finding import find_root_brents
|
|
21
4
|
from .math.krylov_exp import krylov_exp, DEFAULT_MAX_KRYLOV_DIM
|
|
5
|
+
from .aggregators import AggregationType, aggregate
|
|
22
6
|
|
|
23
7
|
__all__ = [
|
|
24
8
|
"__version__",
|
|
25
|
-
"Results",
|
|
26
|
-
"BackendConfig",
|
|
27
|
-
"Operator",
|
|
28
|
-
"State",
|
|
29
|
-
"Backend",
|
|
30
9
|
"AggregationType",
|
|
31
|
-
"
|
|
32
|
-
"StateResult",
|
|
33
|
-
"BitStrings",
|
|
34
|
-
"QubitDensity",
|
|
35
|
-
"CorrelationMatrix",
|
|
36
|
-
"Expectation",
|
|
37
|
-
"Fidelity",
|
|
38
|
-
"Energy",
|
|
39
|
-
"EnergyVariance",
|
|
40
|
-
"SecondMomentOfEnergy",
|
|
10
|
+
"aggregate",
|
|
41
11
|
"PulserData",
|
|
42
12
|
"find_root_brents",
|
|
43
13
|
"krylov_exp",
|
|
@@ -46,4 +16,4 @@ __all__ = [
|
|
|
46
16
|
"DEVICE_COUNT",
|
|
47
17
|
]
|
|
48
18
|
|
|
49
|
-
__version__ = "
|
|
19
|
+
__version__ = "2.0.0"
|
emu_base/aggregators.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
import statistics
|
|
2
|
+
import torch
|
|
3
|
+
from typing import Any, Callable
|
|
4
|
+
import collections
|
|
5
|
+
from enum import Enum, auto
|
|
6
|
+
from pulser.backend import (
|
|
7
|
+
Results,
|
|
8
|
+
)
|
|
9
|
+
import logging
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
_NUMERIC_TYPES = {int, float, complex}
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class AggregationType(Enum):
|
|
16
|
+
"""
|
|
17
|
+
Defines how to combine multiple values from different simulation results.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
MEAN = auto() # statistics.fmean or list/matrix-wise equivalent
|
|
21
|
+
BAG_UNION = auto() # Counter.__add__
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def mean_aggregator(
|
|
25
|
+
values: list[Any],
|
|
26
|
+
) -> (
|
|
27
|
+
complex
|
|
28
|
+
| float
|
|
29
|
+
| list[complex]
|
|
30
|
+
| list[float]
|
|
31
|
+
| list[list[complex]]
|
|
32
|
+
| list[list[float]]
|
|
33
|
+
| torch.Tensor
|
|
34
|
+
): # FIXME: support tuples?
|
|
35
|
+
if values == []:
|
|
36
|
+
raise ValueError("Cannot average 0 samples")
|
|
37
|
+
|
|
38
|
+
element_type = type(values[0])
|
|
39
|
+
|
|
40
|
+
if element_type in _NUMERIC_TYPES:
|
|
41
|
+
return statistics.fmean(values)
|
|
42
|
+
|
|
43
|
+
if element_type == torch.Tensor:
|
|
44
|
+
acc = torch.zeros_like(values[0])
|
|
45
|
+
for ten in values:
|
|
46
|
+
acc += ten
|
|
47
|
+
return acc / len(values)
|
|
48
|
+
|
|
49
|
+
if element_type != list:
|
|
50
|
+
raise NotImplementedError("Cannot average this type of data")
|
|
51
|
+
|
|
52
|
+
if values[0] == []:
|
|
53
|
+
raise ValueError("Cannot average list of empty lists")
|
|
54
|
+
|
|
55
|
+
sub_element_type = type(values[0][0])
|
|
56
|
+
|
|
57
|
+
if sub_element_type in _NUMERIC_TYPES:
|
|
58
|
+
dim = len(values[0])
|
|
59
|
+
return [statistics.fmean(value[i] for value in values) for i in range(dim)]
|
|
60
|
+
|
|
61
|
+
if sub_element_type != list: # FIXME: ABC.Iterable? Collection? subclass?
|
|
62
|
+
raise ValueError(f"Cannot average list of lists of {sub_element_type}")
|
|
63
|
+
|
|
64
|
+
if values[0][0] == []:
|
|
65
|
+
raise ValueError("Cannot average list of matrices with no columns")
|
|
66
|
+
|
|
67
|
+
if (sub_sub_element_type := type(values[0][0][0])) not in _NUMERIC_TYPES:
|
|
68
|
+
raise ValueError(f"Cannot average list of matrices of {sub_sub_element_type}")
|
|
69
|
+
|
|
70
|
+
dim1 = len(values[0])
|
|
71
|
+
dim2 = len(values[0][0])
|
|
72
|
+
return [
|
|
73
|
+
[statistics.fmean(value[i][j] for value in values) for j in range(dim2)]
|
|
74
|
+
for i in range(dim1)
|
|
75
|
+
]
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def bag_union_aggregator(values: list[collections.Counter]) -> collections.Counter:
|
|
79
|
+
return sum(values, start=collections.Counter())
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
aggregation_types_definitions: dict[AggregationType, Callable] = {
|
|
83
|
+
AggregationType.MEAN: mean_aggregator,
|
|
84
|
+
AggregationType.BAG_UNION: bag_union_aggregator,
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def _get_aggregation_type(tag: str) -> AggregationType | None:
|
|
89
|
+
if tag.startswith("bitstrings"):
|
|
90
|
+
return AggregationType.BAG_UNION
|
|
91
|
+
if tag.startswith("expectation"):
|
|
92
|
+
return AggregationType.MEAN
|
|
93
|
+
if tag.startswith("fidelity"):
|
|
94
|
+
return AggregationType.MEAN
|
|
95
|
+
if tag.startswith("correlation_matrix"):
|
|
96
|
+
return AggregationType.MEAN
|
|
97
|
+
if tag.startswith("occupation"):
|
|
98
|
+
return AggregationType.MEAN
|
|
99
|
+
if tag.startswith("energy"):
|
|
100
|
+
return AggregationType.MEAN
|
|
101
|
+
if tag.startswith("energy_second_moment"):
|
|
102
|
+
return AggregationType.MEAN
|
|
103
|
+
else:
|
|
104
|
+
return None
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def aggregate(
|
|
108
|
+
results_to_aggregate: list[Results],
|
|
109
|
+
**aggregator_functions: Callable[[Any], Any],
|
|
110
|
+
) -> Results:
|
|
111
|
+
if len(results_to_aggregate) == 0:
|
|
112
|
+
raise ValueError("no results to aggregate")
|
|
113
|
+
if len(results_to_aggregate) == 1:
|
|
114
|
+
return results_to_aggregate[0]
|
|
115
|
+
stored_callbacks = set(results_to_aggregate[0].get_result_tags())
|
|
116
|
+
if not all(
|
|
117
|
+
set(results.get_result_tags()) == stored_callbacks
|
|
118
|
+
for results in results_to_aggregate
|
|
119
|
+
):
|
|
120
|
+
raise ValueError(
|
|
121
|
+
"Monte-Carlo results seem to provide from incompatible simulations: "
|
|
122
|
+
"they do not all contain the same observables"
|
|
123
|
+
)
|
|
124
|
+
aggregated = Results(
|
|
125
|
+
atom_order=results_to_aggregate[0].atom_order,
|
|
126
|
+
total_duration=results_to_aggregate[0].total_duration,
|
|
127
|
+
)
|
|
128
|
+
for tag in stored_callbacks:
|
|
129
|
+
aggregation_type = aggregator_functions.get(
|
|
130
|
+
tag,
|
|
131
|
+
_get_aggregation_type(tag),
|
|
132
|
+
)
|
|
133
|
+
if aggregation_type is None:
|
|
134
|
+
logging.getLogger("global_logger").warning(f"Skipping aggregation of `{tag}`")
|
|
135
|
+
continue
|
|
136
|
+
aggregation_function: Any = (
|
|
137
|
+
aggregation_type
|
|
138
|
+
if callable(aggregation_type)
|
|
139
|
+
else aggregation_types_definitions[aggregation_type]
|
|
140
|
+
)
|
|
141
|
+
evaluation_times = results_to_aggregate[0].get_result_times(tag)
|
|
142
|
+
if not all(
|
|
143
|
+
results.get_result_times(tag) == evaluation_times
|
|
144
|
+
for results in results_to_aggregate
|
|
145
|
+
):
|
|
146
|
+
raise ValueError(
|
|
147
|
+
"Monte-Carlo results seem to provide from incompatible simulations: "
|
|
148
|
+
"the callbacks are not stored at the same times"
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
uuid = results_to_aggregate[0]._find_uuid(tag)
|
|
152
|
+
for t in results_to_aggregate[0].get_result_times(tag):
|
|
153
|
+
v = aggregation_function(
|
|
154
|
+
[result.get_result(tag, t) for result in results_to_aggregate]
|
|
155
|
+
)
|
|
156
|
+
aggregated._store_raw(uuid=uuid, tag=tag, time=t, value=v)
|
|
157
|
+
|
|
158
|
+
return aggregated
|
emu_base/lindblad_operators.py
CHANGED
|
@@ -32,7 +32,10 @@ def get_lindblad_operators(
|
|
|
32
32
|
]
|
|
33
33
|
|
|
34
34
|
if noise_type == "eff_noise":
|
|
35
|
-
if not all(
|
|
35
|
+
if not all(
|
|
36
|
+
isinstance(op, torch.Tensor) and op.shape == (2, 2)
|
|
37
|
+
for op in noise_model.eff_noise_opers
|
|
38
|
+
):
|
|
36
39
|
raise ValueError("Only 2 * 2 effective noise operator matrices are supported")
|
|
37
40
|
|
|
38
41
|
return [
|
emu_base/pulser_adapter.py
CHANGED
|
@@ -1,11 +1,13 @@
|
|
|
1
1
|
import pulser
|
|
2
|
-
from typing import Tuple
|
|
2
|
+
from typing import Tuple, Sequence
|
|
3
3
|
import torch
|
|
4
4
|
import math
|
|
5
5
|
from pulser.noise_model import NoiseModel
|
|
6
|
+
from pulser.register.base_register import BaseRegister
|
|
6
7
|
from enum import Enum
|
|
7
8
|
|
|
8
|
-
from
|
|
9
|
+
from pulser.backend.config import EmulationConfig
|
|
10
|
+
|
|
9
11
|
from emu_base.lindblad_operators import get_lindblad_operators
|
|
10
12
|
from emu_base.utils import dist2, dist3
|
|
11
13
|
|
|
@@ -16,7 +18,7 @@ class HamiltonianType(Enum):
|
|
|
16
18
|
|
|
17
19
|
|
|
18
20
|
def _get_qubit_positions(
|
|
19
|
-
register:
|
|
21
|
+
register: BaseRegister,
|
|
20
22
|
) -> list[torch.Tensor]:
|
|
21
23
|
"""Conversion from pulser Register to emu-mps register (torch type).
|
|
22
24
|
Each element will be given as [Rx,Ry,Rz]"""
|
|
@@ -31,7 +33,7 @@ def _get_qubit_positions(
|
|
|
31
33
|
def _rydberg_interaction(sequence: pulser.Sequence) -> torch.Tensor:
|
|
32
34
|
"""
|
|
33
35
|
Computes the Ising interaction matrix from the qubit positions.
|
|
34
|
-
Hᵢⱼ=C₆/R
|
|
36
|
+
Hᵢⱼ=C₆/R⁶ᵢⱼ (nᵢ⊗ nⱼ)
|
|
35
37
|
"""
|
|
36
38
|
|
|
37
39
|
num_qubits = len(sequence.register.qubit_ids)
|
|
@@ -88,7 +90,7 @@ def _xy_interaction(sequence: pulser.Sequence) -> torch.Tensor:
|
|
|
88
90
|
def _extract_omega_delta_phi(
|
|
89
91
|
*,
|
|
90
92
|
sequence: pulser.Sequence,
|
|
91
|
-
|
|
93
|
+
target_times: list[int],
|
|
92
94
|
with_modulation: bool,
|
|
93
95
|
laser_waist: float | None,
|
|
94
96
|
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
|
@@ -125,7 +127,7 @@ def _extract_omega_delta_phi(
|
|
|
125
127
|
|
|
126
128
|
max_duration = sequence.get_duration(include_fall_time=with_modulation)
|
|
127
129
|
|
|
128
|
-
nsamples =
|
|
130
|
+
nsamples = len(target_times) - 1
|
|
129
131
|
omega = torch.zeros(
|
|
130
132
|
nsamples,
|
|
131
133
|
len(sequence.register.qubit_ids),
|
|
@@ -157,12 +159,11 @@ def _extract_omega_delta_phi(
|
|
|
157
159
|
for slot in ch_samples.slots:
|
|
158
160
|
global_times |= set(i for i in range(slot.ti, slot.tf))
|
|
159
161
|
|
|
160
|
-
step = 0
|
|
161
|
-
t = (step + 1 / 2) * dt
|
|
162
162
|
omega_1 = torch.zeros_like(omega[0])
|
|
163
163
|
omega_2 = torch.zeros_like(omega[0])
|
|
164
164
|
|
|
165
|
-
|
|
165
|
+
for i in range(nsamples):
|
|
166
|
+
t = (target_times[i] + target_times[i + 1]) / 2
|
|
166
167
|
# The sampled values correspond to the start of each interval
|
|
167
168
|
# To maximize the order of the solver, we need the values in the middle
|
|
168
169
|
if math.ceil(t) < max_duration:
|
|
@@ -173,10 +174,10 @@ def _extract_omega_delta_phi(
|
|
|
173
174
|
t2 = math.ceil(t)
|
|
174
175
|
omega_1[q_pos] = locals_a_d_p[q_id]["amp"][t1]
|
|
175
176
|
omega_2[q_pos] = locals_a_d_p[q_id]["amp"][t2]
|
|
176
|
-
delta[
|
|
177
|
+
delta[i, q_pos] = (
|
|
177
178
|
locals_a_d_p[q_id]["det"][t1] + locals_a_d_p[q_id]["det"][t2]
|
|
178
179
|
) / 2.0
|
|
179
|
-
phi[
|
|
180
|
+
phi[i, q_pos] = (
|
|
180
181
|
locals_a_d_p[q_id]["phase"][t1] + locals_a_d_p[q_id]["phase"][t2]
|
|
181
182
|
) / 2.0
|
|
182
183
|
# omegas at different times need to have the laser waist applied independently
|
|
@@ -184,21 +185,19 @@ def _extract_omega_delta_phi(
|
|
|
184
185
|
omega_1 *= waist_factors
|
|
185
186
|
if t2 in global_times:
|
|
186
187
|
omega_2 *= waist_factors
|
|
187
|
-
omega[
|
|
188
|
+
omega[i] = 0.5 * (omega_1 + omega_2)
|
|
188
189
|
else:
|
|
189
190
|
# We're in the final step and dt=1, approximate this using linear extrapolation
|
|
190
191
|
# we can reuse omega_1 and omega_2 from before
|
|
191
192
|
for q_pos, q_id in enumerate(sequence.register.qubit_ids):
|
|
192
|
-
delta[
|
|
193
|
+
delta[i, q_pos] = (
|
|
193
194
|
3.0 * locals_a_d_p[q_id]["det"][t2] - locals_a_d_p[q_id]["det"][t1]
|
|
194
195
|
) / 2.0
|
|
195
|
-
phi[
|
|
196
|
+
phi[i, q_pos] = (
|
|
196
197
|
3.0 * locals_a_d_p[q_id]["phase"][t2]
|
|
197
198
|
- locals_a_d_p[q_id]["phase"][t1]
|
|
198
199
|
) / 2.0
|
|
199
|
-
omega[
|
|
200
|
-
step += 1
|
|
201
|
-
t = (step + 1 / 2) * dt
|
|
200
|
+
omega[i] = torch.clamp(0.5 * (3 * omega_2 - omega_1).real, min=0.0)
|
|
202
201
|
|
|
203
202
|
return omega, delta, phi
|
|
204
203
|
|
|
@@ -221,7 +220,7 @@ def _get_all_lindblad_noise_operators(
|
|
|
221
220
|
|
|
222
221
|
|
|
223
222
|
class PulserData:
|
|
224
|
-
slm_end_time:
|
|
223
|
+
slm_end_time: float
|
|
225
224
|
full_interaction_matrix: torch.Tensor
|
|
226
225
|
masked_interaction_matrix: torch.Tensor
|
|
227
226
|
omega: torch.Tensor
|
|
@@ -230,15 +229,31 @@ class PulserData:
|
|
|
230
229
|
hamiltonian_type: HamiltonianType
|
|
231
230
|
lindblad_ops: list[torch.Tensor]
|
|
232
231
|
|
|
233
|
-
def __init__(self, *, sequence: pulser.Sequence, config:
|
|
232
|
+
def __init__(self, *, sequence: pulser.Sequence, config: EmulationConfig, dt: int):
|
|
234
233
|
self.qubit_count = len(sequence.register.qubit_ids)
|
|
234
|
+
sequence_duration = sequence.get_duration()
|
|
235
|
+
# the end value is exclusive, so add +1
|
|
236
|
+
observable_times = set(torch.arange(0, sequence.get_duration() + 1, dt).tolist())
|
|
237
|
+
observable_times.add(sequence.get_duration())
|
|
238
|
+
for obs in config.observables:
|
|
239
|
+
times: Sequence[float]
|
|
240
|
+
if obs.evaluation_times is not None:
|
|
241
|
+
times = obs.evaluation_times
|
|
242
|
+
elif config.default_evaluation_times != "Full":
|
|
243
|
+
times = (
|
|
244
|
+
config.default_evaluation_times.tolist() # type: ignore[union-attr,assignment]
|
|
245
|
+
)
|
|
246
|
+
observable_times |= set([round(time * sequence_duration) for time in times])
|
|
247
|
+
|
|
248
|
+
self.target_times: list[int] = list(observable_times)
|
|
249
|
+
self.target_times.sort()
|
|
235
250
|
|
|
236
251
|
laser_waist = (
|
|
237
252
|
config.noise_model.laser_waist if config.noise_model is not None else None
|
|
238
253
|
)
|
|
239
254
|
self.omega, self.delta, self.phi = _extract_omega_delta_phi(
|
|
240
255
|
sequence=sequence,
|
|
241
|
-
|
|
256
|
+
target_times=self.target_times,
|
|
242
257
|
with_modulation=config.with_modulation,
|
|
243
258
|
laser_waist=laser_waist,
|
|
244
259
|
)
|
|
@@ -259,9 +274,7 @@ class PulserData:
|
|
|
259
274
|
"the interaction matrix"
|
|
260
275
|
)
|
|
261
276
|
|
|
262
|
-
self.full_interaction_matrix =
|
|
263
|
-
config.interaction_matrix, dtype=torch.float64
|
|
264
|
-
)
|
|
277
|
+
self.full_interaction_matrix = config.interaction_matrix.as_tensor()
|
|
265
278
|
elif self.hamiltonian_type == HamiltonianType.Rydberg:
|
|
266
279
|
self.full_interaction_matrix = _rydberg_interaction(sequence)
|
|
267
280
|
elif self.hamiltonian_type == HamiltonianType.XY:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: emu-base
|
|
3
|
-
Version:
|
|
3
|
+
Version: 2.0.0
|
|
4
4
|
Summary: Pasqal base classes for emulators
|
|
5
5
|
Project-URL: Documentation, https://pasqal-io.github.io/emulators/
|
|
6
6
|
Project-URL: Repository, https://github.com/pasqal-io/emulators
|
|
@@ -25,7 +25,7 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
25
25
|
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
26
26
|
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
|
27
27
|
Requires-Python: >=3.10
|
|
28
|
-
Requires-Dist: pulser-core==1.
|
|
28
|
+
Requires-Dist: pulser-core==1.4.*
|
|
29
29
|
Requires-Dist: torch==2.5.0
|
|
30
30
|
Description-Content-Type: text/markdown
|
|
31
31
|
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
emu_base/__init__.py,sha256=b60wKpJR1-oUIEv68t0-WNza2IXSL7joPQVt5Hw-rj8,493
|
|
2
|
+
emu_base/aggregators.py,sha256=bB-rldoDAErxQMpL715K5lpiabGOpkCY0GyxW7mfHuc,5000
|
|
3
|
+
emu_base/constants.py,sha256=41LYkKLUCz-oxPbd-j7nUDZuhIbUrnez6prT0uR0jcE,56
|
|
4
|
+
emu_base/lindblad_operators.py,sha256=Nsl1YrWb8IDM9Z50ucy2Ed44p_IRETnlbr6qaqAgV50,1629
|
|
5
|
+
emu_base/pulser_adapter.py,sha256=dRD80z_dVXkCjDBLRIkmqNGg5M78VEKkQuk3H5JdZSM,11241
|
|
6
|
+
emu_base/utils.py,sha256=RM8O0qfPAJfcdqqAojwEEKV7I3ZfVDklnTisTGhUg5k,233
|
|
7
|
+
emu_base/math/__init__.py,sha256=6BbIytYV5uC-e5jLMtIErkcUl_PvfSNnhmVFY9Il8uQ,97
|
|
8
|
+
emu_base/math/brents_root_finding.py,sha256=AVx6L1Il6rpPJWrLJ7cn6oNmJyZOPRgEaaZaubC9lsU,3711
|
|
9
|
+
emu_base/math/krylov_exp.py,sha256=UCFNeq-j2ukgBsOPC9_Jiv1aqpy88SrslDLiCxIGBwk,3840
|
|
10
|
+
emu_base-2.0.0.dist-info/METADATA,sha256=uoylMuopYijyAJ9G8iY_cxXanQlJGu1ibvkd17Soi2g,3522
|
|
11
|
+
emu_base-2.0.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
12
|
+
emu_base-2.0.0.dist-info/RECORD,,
|
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
from .operator import Operator
|
|
2
|
-
from .state import State
|
|
3
|
-
from .results import Results
|
|
4
|
-
from .callback import Callback
|
|
5
|
-
from .default_callbacks import (
|
|
6
|
-
StateResult,
|
|
7
|
-
BitStrings,
|
|
8
|
-
QubitDensity,
|
|
9
|
-
CorrelationMatrix,
|
|
10
|
-
Expectation,
|
|
11
|
-
Fidelity,
|
|
12
|
-
Energy,
|
|
13
|
-
EnergyVariance,
|
|
14
|
-
SecondMomentOfEnergy,
|
|
15
|
-
)
|
|
16
|
-
|
|
17
|
-
__all__ = [
|
|
18
|
-
"Operator",
|
|
19
|
-
"State",
|
|
20
|
-
"Results",
|
|
21
|
-
"Callback",
|
|
22
|
-
"StateResult",
|
|
23
|
-
"BitStrings",
|
|
24
|
-
"QubitDensity",
|
|
25
|
-
"CorrelationMatrix",
|
|
26
|
-
"Expectation",
|
|
27
|
-
"Fidelity",
|
|
28
|
-
"Energy",
|
|
29
|
-
"EnergyVariance",
|
|
30
|
-
"SecondMomentOfEnergy",
|
|
31
|
-
]
|
|
@@ -1,59 +0,0 @@
|
|
|
1
|
-
import statistics
|
|
2
|
-
from typing import Any, Callable
|
|
3
|
-
import collections
|
|
4
|
-
from emu_base.base_classes.callback import AggregationType
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
_NUMERIC_TYPES = {int, float, complex}
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
def mean_aggregator(
|
|
11
|
-
values: list[Any],
|
|
12
|
-
) -> complex | float | list[complex] | list[float] | list[list[complex]] | list[
|
|
13
|
-
list[float]
|
|
14
|
-
]: # FIXME: support tuples?
|
|
15
|
-
if values == []:
|
|
16
|
-
raise ValueError("Cannot average 0 samples")
|
|
17
|
-
|
|
18
|
-
element_type = type(values[0])
|
|
19
|
-
|
|
20
|
-
if element_type in _NUMERIC_TYPES:
|
|
21
|
-
return statistics.fmean(values)
|
|
22
|
-
|
|
23
|
-
if element_type != list:
|
|
24
|
-
raise NotImplementedError("Cannot average this type of data")
|
|
25
|
-
|
|
26
|
-
if values[0] == []:
|
|
27
|
-
raise ValueError("Cannot average list of empty lists")
|
|
28
|
-
|
|
29
|
-
sub_element_type = type(values[0][0])
|
|
30
|
-
|
|
31
|
-
if sub_element_type in _NUMERIC_TYPES:
|
|
32
|
-
dim = len(values[0])
|
|
33
|
-
return [statistics.fmean(value[i] for value in values) for i in range(dim)]
|
|
34
|
-
|
|
35
|
-
if sub_element_type != list: # FIXME: ABC.Iterable? Collection? subclass?
|
|
36
|
-
raise ValueError(f"Cannot average list of lists of {sub_element_type}")
|
|
37
|
-
|
|
38
|
-
if values[0][0] == []:
|
|
39
|
-
raise ValueError("Cannot average list of matrices with no columns")
|
|
40
|
-
|
|
41
|
-
if (sub_sub_element_type := type(values[0][0][0])) not in _NUMERIC_TYPES:
|
|
42
|
-
raise ValueError(f"Cannot average list of matrices of {sub_sub_element_type}")
|
|
43
|
-
|
|
44
|
-
dim1 = len(values[0])
|
|
45
|
-
dim2 = len(values[0][0])
|
|
46
|
-
return [
|
|
47
|
-
[statistics.fmean(value[i][j] for value in values) for j in range(dim2)]
|
|
48
|
-
for i in range(dim1)
|
|
49
|
-
]
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
def bag_union_aggregator(values: list[collections.Counter]) -> collections.Counter:
|
|
53
|
-
return sum(values, start=collections.Counter())
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
aggregation_types_definitions: dict[AggregationType, Callable] = {
|
|
57
|
-
AggregationType.MEAN: mean_aggregator,
|
|
58
|
-
AggregationType.BAG_UNION: bag_union_aggregator,
|
|
59
|
-
}
|
emu_base/base_classes/backend.py
DELETED
|
@@ -1,48 +0,0 @@
|
|
|
1
|
-
import warnings
|
|
2
|
-
from abc import ABC, abstractmethod
|
|
3
|
-
|
|
4
|
-
from pulser import Sequence
|
|
5
|
-
|
|
6
|
-
from emu_base.base_classes.config import BackendConfig
|
|
7
|
-
from emu_base.base_classes.results import Results
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class Backend(ABC):
|
|
11
|
-
"""
|
|
12
|
-
Base class for different emulation backends.
|
|
13
|
-
Forces backends to implement a run method.
|
|
14
|
-
"""
|
|
15
|
-
|
|
16
|
-
@staticmethod
|
|
17
|
-
def validate_sequence(sequence: Sequence) -> None:
|
|
18
|
-
with warnings.catch_warnings():
|
|
19
|
-
warnings.simplefilter("ignore", category=DeprecationWarning)
|
|
20
|
-
|
|
21
|
-
if not isinstance(sequence, Sequence):
|
|
22
|
-
raise TypeError(
|
|
23
|
-
"The provided sequence has to be a valid " "pulser.Sequence instance."
|
|
24
|
-
)
|
|
25
|
-
if sequence.is_parametrized() or sequence.is_register_mappable():
|
|
26
|
-
raise ValueError(
|
|
27
|
-
"Not supported"
|
|
28
|
-
"The provided sequence needs to be built to be simulated. Call"
|
|
29
|
-
" `Sequence.build()` with the necessary parameters."
|
|
30
|
-
)
|
|
31
|
-
if not sequence._schedule:
|
|
32
|
-
raise ValueError("The provided sequence has no declared channels.")
|
|
33
|
-
if all(sequence._schedule[x][-1].tf == 0 for x in sequence.declared_channels):
|
|
34
|
-
raise ValueError("No instructions given for the channels in the sequence.")
|
|
35
|
-
|
|
36
|
-
@abstractmethod
|
|
37
|
-
def run(self, sequence: Sequence, config: BackendConfig) -> Results:
|
|
38
|
-
"""
|
|
39
|
-
Emulates the given sequence.
|
|
40
|
-
|
|
41
|
-
Args:
|
|
42
|
-
sequence: a Pulser sequence to simulate
|
|
43
|
-
config: the config. Should be of the appropriate type for the backend
|
|
44
|
-
|
|
45
|
-
Returns:
|
|
46
|
-
the simulation results
|
|
47
|
-
"""
|
|
48
|
-
pass
|
|
@@ -1,90 +0,0 @@
|
|
|
1
|
-
from abc import ABC, abstractmethod
|
|
2
|
-
from typing import Any, Optional, TYPE_CHECKING
|
|
3
|
-
from enum import Enum, auto
|
|
4
|
-
|
|
5
|
-
from emu_base.base_classes.config import BackendConfig
|
|
6
|
-
from emu_base.base_classes.operator import Operator
|
|
7
|
-
from emu_base.base_classes.state import State
|
|
8
|
-
|
|
9
|
-
if TYPE_CHECKING:
|
|
10
|
-
from emu_base.base_classes.results import Results
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class AggregationType(Enum):
|
|
14
|
-
"""
|
|
15
|
-
Defines how to combine multiple values from different simulation results.
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
MEAN = auto() # statistics.fmean or list/matrix-wise equivalent
|
|
19
|
-
BAG_UNION = auto() # Counter.__add__
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class Callback(ABC):
|
|
23
|
-
def __init__(self, evaluation_times: set[int]):
|
|
24
|
-
"""
|
|
25
|
-
The callback base class that can be subclassed to add new kinds of results
|
|
26
|
-
to the Results object returned by the Backend
|
|
27
|
-
|
|
28
|
-
Args:
|
|
29
|
-
evaluation_times: the times at which to add a result to Results
|
|
30
|
-
"""
|
|
31
|
-
self.evaluation_times = evaluation_times
|
|
32
|
-
|
|
33
|
-
def __call__(
|
|
34
|
-
self, config: BackendConfig, t: int, state: State, H: Operator, result: "Results"
|
|
35
|
-
) -> None:
|
|
36
|
-
"""
|
|
37
|
-
This function is called after each time step performed by the emulator.
|
|
38
|
-
By default it calls apply to compute a result and put it in `result`
|
|
39
|
-
if `t` in `self.evaluation_times`.
|
|
40
|
-
It can be overloaded to define any custom behaviour for a `Callback`.
|
|
41
|
-
|
|
42
|
-
Args:
|
|
43
|
-
config: the config object passed to the run method
|
|
44
|
-
t: the current time in ns
|
|
45
|
-
state: the current state
|
|
46
|
-
H: the Hamiltonian at this time
|
|
47
|
-
result: the results object
|
|
48
|
-
"""
|
|
49
|
-
if t in self.evaluation_times:
|
|
50
|
-
value_to_store = self.apply(config, t, state, H)
|
|
51
|
-
result.store(callback=self, time=t, value=value_to_store)
|
|
52
|
-
|
|
53
|
-
@property
|
|
54
|
-
@abstractmethod
|
|
55
|
-
def name(self) -> str:
|
|
56
|
-
"""
|
|
57
|
-
The name of the observable, can be used to index into the Results object.
|
|
58
|
-
Some Callbacks might have multiple instances, such as a callback to compute
|
|
59
|
-
a fidelity on some given state. In that case, this method could make sure
|
|
60
|
-
each instance has a unique name.
|
|
61
|
-
|
|
62
|
-
Returns:
|
|
63
|
-
the name of the callback
|
|
64
|
-
"""
|
|
65
|
-
pass
|
|
66
|
-
|
|
67
|
-
@abstractmethod
|
|
68
|
-
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
69
|
-
"""
|
|
70
|
-
This method must be implemented by subclasses. The result of this method
|
|
71
|
-
gets put in the Results object.
|
|
72
|
-
|
|
73
|
-
Args:
|
|
74
|
-
config: the config object passed to the run method
|
|
75
|
-
t: the current time in ns
|
|
76
|
-
state: the current state
|
|
77
|
-
H: the Hamiltonian at this time
|
|
78
|
-
|
|
79
|
-
Returns:
|
|
80
|
-
the result to put in Results
|
|
81
|
-
"""
|
|
82
|
-
pass
|
|
83
|
-
|
|
84
|
-
@property
|
|
85
|
-
def default_aggregation_type(self) -> Optional[AggregationType]:
|
|
86
|
-
"""
|
|
87
|
-
Defines how to combine by default multiple values from different simulation results.
|
|
88
|
-
None means no default, therefore aggregator function is always user-provided.
|
|
89
|
-
"""
|
|
90
|
-
return None
|