emu-base 1.2.6__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,106 +0,0 @@
1
- from __future__ import annotations
2
- from pulser.noise_model import NoiseModel
3
- import logging
4
- import sys
5
- import pathlib
6
- from typing import TYPE_CHECKING
7
- import torch
8
-
9
- if TYPE_CHECKING:
10
- from emu_base.base_classes.callback import Callback
11
-
12
-
13
- class BackendConfig:
14
- """The base backend configuration.
15
-
16
- Args:
17
- observables: a list of callbacks to compute observables
18
- with_modulation: if True, run the sequence with hardware modulation
19
- noise_model: The pulser.NoiseModel to use in the simulation.
20
- interaction_matrix: When specified, override the interaction terms in the Hamiltonian.
21
- This corresponds to the $U_{ij}$ terms in the documentation. Must be symmetric.
22
- interaction_cutoff: set interaction coefficients smaller than this to 0.
23
- This can improve the memory profile of the application for some backends.
24
- log_level: The output verbosity. Should be one of the constants from logging.
25
- log_file: a path to a file where to store the log, instead of printing to stdout
26
-
27
- Examples:
28
- >>> observables = [BitStrings(400, 100)] #compute 100 bitstrings at 400ns
29
- >>> noise_model = pulser.noise_model.NoiseModel()
30
- >>> interaction_matrix = [[1 for _ in range(nqubits)] for _ in range(nqubits)]
31
- >>> interaction_cutoff = 2.0 #this will turn off all the above interactions again
32
- >>> log_level = logging.warn
33
- """
34
-
35
- def __init__(
36
- self,
37
- *,
38
- observables: list[Callback] | None = None,
39
- with_modulation: bool = False,
40
- noise_model: NoiseModel = None,
41
- interaction_matrix: list[list[float]] | None = None,
42
- interaction_cutoff: float = 0.0,
43
- log_level: int = logging.INFO,
44
- log_file: pathlib.Path | None = None,
45
- ):
46
- if observables is None:
47
- observables = []
48
- self.callbacks = (
49
- observables # we can add other types of callbacks, and just stack them
50
- )
51
- self.with_modulation = with_modulation
52
- self.noise_model = noise_model
53
-
54
- if interaction_matrix is not None and not (
55
- isinstance(interaction_matrix, list)
56
- and isinstance(interaction_matrix[0], list)
57
- and isinstance(interaction_matrix[0][0], float)
58
- ):
59
- raise ValueError(
60
- "Interaction matrix must be provided as a Python list of lists of floats"
61
- )
62
-
63
- if interaction_matrix is not None:
64
- int_mat = torch.tensor(interaction_matrix)
65
- tol = 1e-10
66
- if not (
67
- int_mat.numel() != 0
68
- and torch.all(torch.isreal(int_mat))
69
- and int_mat.dim() == 2
70
- and int_mat.shape[0] == int_mat.shape[1]
71
- and torch.allclose(int_mat, int_mat.T, atol=tol)
72
- and torch.norm(torch.diag(int_mat)) < tol
73
- ):
74
- raise ValueError("Interaction matrix is not symmetric and zero diag")
75
-
76
- self.interaction_matrix = interaction_matrix
77
- self.interaction_cutoff = interaction_cutoff
78
- self.logger = logging.getLogger("global_logger")
79
- self.log_file = log_file
80
- self.log_level = log_level
81
-
82
- self.init_logging()
83
-
84
- if noise_model is not None and (
85
- noise_model.runs != 1
86
- or noise_model.samples_per_run != 1
87
- or noise_model.runs is not None
88
- or noise_model.samples_per_run is not None
89
- ):
90
- self.logger.warning(
91
- "Warning: The runs and samples_per_run values of the NoiseModel are ignored!"
92
- )
93
-
94
- def init_logging(self) -> None:
95
- if self.log_file is None:
96
- logging.basicConfig(
97
- level=self.log_level, format="%(message)s", stream=sys.stdout, force=True
98
- ) # default to stream = sys.stderr
99
- else:
100
- logging.basicConfig(
101
- level=self.log_level,
102
- format="%(message)s",
103
- filename=str(self.log_file),
104
- filemode="w",
105
- force=True,
106
- )
@@ -1,300 +0,0 @@
1
- from copy import deepcopy
2
- from typing import Any
3
-
4
- from emu_base.base_classes.callback import Callback, AggregationType
5
- from emu_base.base_classes.config import BackendConfig
6
- from emu_base.base_classes.operator import Operator
7
- from emu_base.base_classes.state import State
8
-
9
-
10
- class StateResult(Callback):
11
- """
12
- Store the quantum state in whatever format the backend provides
13
-
14
- Args:
15
- evaluation_times: the times at which to store the state
16
- """
17
-
18
- def __init__(self, evaluation_times: set[int]):
19
- super().__init__(evaluation_times)
20
-
21
- name = "state"
22
-
23
- def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
24
- return deepcopy(state)
25
-
26
-
27
- class BitStrings(Callback):
28
- """
29
- Store bitstrings sampled from the current state. Error rates are taken from the config
30
- passed to the run method of the backend. The bitstrings are stored as a Counter[str].
31
-
32
- Args:
33
- evaluation_times: the times at which to sample bitstrings
34
- num_shots: how many bitstrings to sample each time this observable is computed
35
- """
36
-
37
- def __init__(self, evaluation_times: set[int], num_shots: int = 1000):
38
- super().__init__(evaluation_times)
39
- self.num_shots = num_shots
40
-
41
- name = "bitstrings"
42
-
43
- def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
44
- p_false_pos = (
45
- 0.0 if config.noise_model is None else config.noise_model.p_false_pos
46
- )
47
- p_false_neg = (
48
- 0.0 if config.noise_model is None else config.noise_model.p_false_neg
49
- )
50
-
51
- return state.sample(self.num_shots, p_false_pos, p_false_neg)
52
-
53
- default_aggregation_type = AggregationType.BAG_UNION
54
-
55
-
56
- _fidelity_counter = -1
57
-
58
-
59
- class Fidelity(Callback):
60
- """
61
- Store $<ψ|φ(t)>$ for the given state $|ψ>$,
62
- and the state $|φ(t)>$ obtained by time evolution.
63
-
64
- Args:
65
- evaluation_times: the times at which to compute the fidelity
66
- state: the state |ψ>. Note that this must be of appropriate type for the backend
67
-
68
- Examples:
69
- >>> state = State.from_state_string(...) #see State API
70
- >>> fidelity = Fidelity([400], state) #measure fidelity on state at t=400ns
71
- """
72
-
73
- def __init__(self, evaluation_times: set[int], state: State):
74
- super().__init__(evaluation_times)
75
- global _fidelity_counter
76
- _fidelity_counter += 1
77
- self.index = _fidelity_counter
78
- self.state = state
79
-
80
- @property
81
- def name(self) -> str:
82
- return f"fidelity_{self.index}"
83
-
84
- def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
85
- return self.state.inner(state)
86
-
87
-
88
- _expectation_counter = -1
89
-
90
-
91
- class Expectation(Callback):
92
- """
93
- Store the expectation of the given operator on the current state
94
- (i.e. $\\langle φ(t)|\\mathrm{operator}|φ(t)\\rangle$).
95
-
96
- Args:
97
- evaluation_times: the times at which to compute the expectation
98
- operator: the operator to measure. Must be of appropriate type for the backend.
99
-
100
- Examples:
101
- >>> op = Operator.from_operator_string(...) #see Operator API
102
- >>> expectation = Expectation([400], op) #measure the expecation of op at t=400ns
103
- """
104
-
105
- def __init__(self, evaluation_times: set[int], operator: Operator):
106
- super().__init__(evaluation_times)
107
- global _expectation_counter
108
- _expectation_counter += 1
109
- self.index = _expectation_counter
110
- self.operator = operator
111
-
112
- @property
113
- def name(self) -> str:
114
- return f"expectation_{self.index}"
115
-
116
- def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
117
- return self.operator.expect(state)
118
-
119
- default_aggregation_type = AggregationType.MEAN
120
-
121
-
122
- class CorrelationMatrix(Callback):
123
- """
124
- Store the correlation matrix for the current state.
125
- Requires specification of the basis used in the emulation
126
- https://pulser.readthedocs.io/en/stable/conventions.html
127
- It currently supports
128
- - the rydberg basis ('r','g')
129
- - the xy basis ('0', '1')
130
- and returns
131
-
132
- `[[<φ(t)|n_i n_j|φ(t)> for j in qubits] for i in qubits]`
133
-
134
- n_i being the operator that projects qubit i onto the state that measures as 1.
135
- The diagonal of this matrix is the QubitDensity. The correlation matrix
136
- is stored as a list of lists.
137
-
138
- Args:
139
- evaluation_times: the times at which to compute the correlation matrix
140
- basis: the basis used by the sequence
141
- nqubits: the number of qubits in the Register
142
-
143
- Notes:
144
- See the API for `Operator.from_operator_string` for an example of what to do with
145
- basis and nqubits.
146
- """
147
-
148
- def __init__(self, evaluation_times: set[int], basis: tuple[str, ...], nqubits: int):
149
- super().__init__(evaluation_times)
150
- self.operators: list[list[Operator]] | None = None
151
- self.basis = set(basis)
152
- if self.basis == {"r", "g"}:
153
- self.op_string = "rr"
154
- elif self.basis == {"0", "1"}:
155
- self.op_string = "11"
156
- else:
157
- raise ValueError("Unsupported basis provided")
158
- self.nqubits = nqubits
159
-
160
- name = "correlation_matrix"
161
-
162
- def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
163
- if hasattr(state, "get_correlation_matrix") and callable(
164
- state.get_correlation_matrix
165
- ):
166
- return state.get_correlation_matrix()
167
-
168
- if self.operators is None or not isinstance(self.operators[0], type(H)):
169
- self.operators = [
170
- [
171
- H.from_operator_string(
172
- self.basis,
173
- self.nqubits,
174
- [(1.0, [({self.op_string: 1.0}, list({i, j}))])],
175
- )
176
- for j in range(self.nqubits)
177
- ]
178
- for i in range(self.nqubits)
179
- ]
180
- return [[op.expect(state).real for op in ops] for ops in self.operators]
181
-
182
- default_aggregation_type = AggregationType.MEAN
183
-
184
-
185
- class QubitDensity(Callback):
186
- """
187
- Requires specification of the basis used in the emulation
188
- https://pulser.readthedocs.io/en/stable/conventions.html
189
- It currently supports
190
- - the rydberg basis ('r','g')
191
- - the xy basis ('0', '1')
192
- and returns
193
-
194
- `[<φ(t)|n_i|φ(t)> for i in qubits]`
195
-
196
- n_i being the operator that projects qubit i onto the state that measures as 1.
197
- The qubit density is stored as a list.
198
-
199
- Args:
200
- evaluation_times: the times at which to compute the density
201
- basis: the basis used by the sequence
202
- nqubits: the number of qubits in the Register
203
-
204
- Notes:
205
- See the API for `State.from_state_string` for an example of what to do with
206
- basis and nqubits.
207
- """
208
-
209
- def __init__(self, evaluation_times: set[int], basis: tuple[str, ...], nqubits: int):
210
- super().__init__(evaluation_times)
211
- self.operators: list[Operator] | None = None
212
- self.basis = set(basis)
213
- if self.basis == {"r", "g"}:
214
- self.op_string = "rr"
215
- elif self.basis == {"0", "1"}:
216
- self.op_string = "11"
217
- else:
218
- raise ValueError("Unsupported basis provided")
219
- self.nqubits = nqubits
220
-
221
- name = "qubit_density"
222
-
223
- def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
224
- if self.operators is None or not isinstance(self.operators[0], type(H)):
225
- self.operators = [
226
- H.from_operator_string(
227
- self.basis, self.nqubits, [(1.0, [({self.op_string: 1.0}, [i])])]
228
- )
229
- for i in range(self.nqubits)
230
- ]
231
- return [op.expect(state).real for op in self.operators]
232
-
233
- default_aggregation_type = AggregationType.MEAN
234
-
235
-
236
- class Energy(Callback):
237
- """
238
- Store the expectation value of the current Hamiltonian
239
- (i.e. $\\langle φ(t)|H(t)|φ(t) \\rangle$)
240
-
241
- Args:
242
- evaluation_times: the times at which to compute the expectation
243
- """
244
-
245
- def __init__(self, evaluation_times: set[int]):
246
- super().__init__(evaluation_times)
247
-
248
- name = "energy"
249
-
250
- def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
251
- return H.expect(state).real
252
-
253
- default_aggregation_type = AggregationType.MEAN
254
-
255
-
256
- class EnergyVariance(Callback):
257
- """
258
- Store the variance of the current Hamiltonian
259
- (i.e. $\\langle φ(t)|H(t)^2|φ(t)\\rangle - \\langle φ(t)|H(t)|φ(t)\\rangle^2$)
260
-
261
- Args:
262
- evaluation_times: the times at which to compute the variance
263
- """
264
-
265
- def __init__(self, evaluation_times: set[int]):
266
- super().__init__(evaluation_times)
267
-
268
- name = "energy_variance"
269
-
270
- def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
271
- h_squared = H @ H
272
- return h_squared.expect(state).real - H.expect(state).real ** 2
273
-
274
- # Explicitely setting this to None out of safety: in the case of MonteCarlo,
275
- # the aggregated variance cannot be computed from this callback.
276
- # Instead, one first need to average Energy and SecondMomentOfEnergy,
277
- # and then compute the variance with the formula:
278
- # AggregatedEnergyVariance = AveragedSecondMomentOfEnergy - AveragedEnergy**2
279
- default_aggregation_type = None
280
-
281
-
282
- class SecondMomentOfEnergy(Callback):
283
- """
284
- Store the expectation value $\\langle φ(t)|H(t)^2|φ(t)\\rangle$.
285
- Useful for computing the variance when averaging over many executions of the program.
286
-
287
- Args:
288
- evaluation_times: the times at which to compute the variance
289
- """
290
-
291
- def __init__(self, evaluation_times: set[int]):
292
- super().__init__(evaluation_times)
293
-
294
- name = "second_moment_of_energy"
295
-
296
- def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
297
- h_squared = H @ H
298
- return h_squared.expect(state).real
299
-
300
- default_aggregation_type = AggregationType.MEAN
@@ -1,126 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from abc import ABC, abstractmethod
4
- from typing import Any, Iterable
5
-
6
- from emu_base.base_classes.state import State
7
-
8
-
9
- QuditOp = dict[str, complex] # single qubit operator
10
- TensorOp = list[tuple[QuditOp, list[int]]] # QuditOp applied to list of qubits
11
- FullOp = list[tuple[complex, TensorOp]] # weighted sum of TensorOp
12
-
13
-
14
- class Operator(ABC):
15
- @abstractmethod
16
- def __mul__(self, other: State) -> State:
17
- """
18
- Apply the operator to a state
19
-
20
- Args:
21
- other: the state to apply this operator to
22
-
23
- Returns:
24
- the resulting state
25
- """
26
- pass
27
-
28
- @abstractmethod
29
- def __add__(self, other: Operator) -> Operator:
30
- """
31
- Computes the sum of two operators.
32
-
33
- Args:
34
- other: the other operator
35
-
36
- Returns:
37
- the summed operator
38
- """
39
- pass
40
-
41
- @abstractmethod
42
- def expect(self, state: State) -> float | complex:
43
- """
44
- Compute the expectation value of self on the given state.
45
-
46
- Args:
47
- state: the state with which to compute
48
-
49
- Returns:
50
- the expectation
51
- """
52
-
53
- @staticmethod
54
- @abstractmethod
55
- def from_operator_string(
56
- basis: Iterable[str],
57
- nqubits: int,
58
- operations: FullOp,
59
- operators: dict[str, QuditOp] = {},
60
- /,
61
- **kwargs: Any,
62
- ) -> Operator:
63
- """
64
- Create an operator in the backend-specific format from the
65
- pulser abstract representation
66
- <https://www.notion.so/pasqal/Abstract-State-and-Operator-Definition>
67
- By default it supports strings 'ij', where i and j in basis,
68
- to denote |i><j|, but additional symbols can be defined in operators
69
- For a list of existing bases, see
70
- <https://pulser.readthedocs.io/en/stable/conventions.html>
71
-
72
- Args:
73
- basis: the eigenstates in the basis to use
74
- nqubits: how many qubits there are in the state
75
- operations: which bitstrings make up the state with what weight
76
- operators: additional symbols to be used in operations
77
-
78
- Returns:
79
- the operator in whatever format the backend provides.
80
-
81
- Examples:
82
- >>> basis = {"r", "g"} #rydberg basis
83
- >>> nqubits = 3 #or whatever
84
- >>> x = {"rg": 1.0, "gr": 1.0}
85
- >>> z = {"gg": 1.0, "rr": -1.0}
86
- >>> operators = {"X": x, "Z": z} #define X and Z as conveniences
87
- >>>
88
- >>> operations = [ # 4 X1X + 3 1Z1
89
- >>> (
90
- >>> 1.0,
91
- >>> [
92
- >>> ({"X": 2.0}, [0, 2]),
93
- >>> ({"Z": 3.0}, [1]),
94
- >>> ],
95
- >>> )
96
- >>> ]
97
- >>> op = Operator.from_operator_string(basis, nqubits, operations, operators)
98
- """
99
- pass
100
-
101
- @abstractmethod
102
- def __rmul__(self, scalar: complex) -> Operator:
103
- """
104
- Scale the operator by a scale factor.
105
-
106
- Args:
107
- scalar: the scale factor
108
-
109
- Returns:
110
- the scaled operator
111
- """
112
- pass
113
-
114
- @abstractmethod
115
- def __matmul__(self, other: Operator) -> Operator:
116
- """
117
- Compose two operators. The ordering is that
118
- self is applied after other.
119
-
120
- Args:
121
- other: the operator to compose with self
122
-
123
- Returns:
124
- the composed operator
125
- """
126
- pass
@@ -1,174 +0,0 @@
1
- from dataclasses import dataclass, field
2
- from typing import Any, Callable, Optional
3
- from pathlib import Path
4
- import json
5
- import logging
6
-
7
- from emu_base.base_classes.callback import Callback, AggregationType
8
- from emu_base.base_classes.aggregators import aggregation_types_definitions
9
-
10
-
11
- @dataclass
12
- class Results:
13
- """
14
- This class contains emulation results. Since the results written by
15
- an emulator are defined through callbacks, the contents of this class
16
- are not known a-priori.
17
- """
18
-
19
- statistics: Any = None # Backend-specific data
20
-
21
- _results: dict[str, dict[int, Any]] = field(default_factory=dict)
22
- _default_aggregation_types: dict[str, Optional[AggregationType]] = field(
23
- default_factory=dict
24
- )
25
-
26
- @classmethod
27
- def aggregate(
28
- cls,
29
- results_to_aggregate: list["Results"],
30
- **aggregator_functions: Callable[[Any], Any],
31
- ) -> "Results":
32
- if len(results_to_aggregate) == 0:
33
- raise ValueError("no results to aggregate")
34
-
35
- if len(results_to_aggregate) == 1:
36
- return results_to_aggregate[0]
37
-
38
- stored_callbacks = set(results_to_aggregate[0].get_result_names())
39
-
40
- if not all(
41
- set(results.get_result_names()) == stored_callbacks
42
- for results in results_to_aggregate
43
- ):
44
- raise ValueError(
45
- "Monte-Carlo results seem to provide from incompatible simulations: "
46
- "they do not all contain the same observables"
47
- )
48
-
49
- aggregated: Results = cls()
50
-
51
- for stored_callback in stored_callbacks:
52
- aggregation_type = aggregator_functions.get(
53
- stored_callback,
54
- results_to_aggregate[0].get_aggregation_type(stored_callback),
55
- )
56
-
57
- if aggregation_type is None:
58
- logging.getLogger("global_logger").warning(
59
- f"Skipping aggregation of `{stored_callback}`"
60
- )
61
- continue
62
-
63
- aggregation_function: Any = (
64
- aggregation_type
65
- if callable(aggregation_type)
66
- else aggregation_types_definitions[aggregation_type]
67
- )
68
-
69
- evaluation_times = results_to_aggregate[0].get_result_times(stored_callback)
70
- if not all(
71
- results.get_result_times(stored_callback) == evaluation_times
72
- for results in results_to_aggregate
73
- ):
74
- raise ValueError(
75
- "Monte-Carlo results seem to provide from incompatible simulations: "
76
- "the callbacks are not stored at the same times"
77
- )
78
-
79
- aggregated._results[stored_callback] = {
80
- t: aggregation_function(
81
- [result[stored_callback, t] for result in results_to_aggregate]
82
- )
83
- for t in evaluation_times
84
- }
85
-
86
- return aggregated
87
-
88
- def store(self, *, callback: Callback, time: Any, value: Any) -> None:
89
- self._results.setdefault(callback.name, {})
90
-
91
- if time in self._results[callback.name]:
92
- raise ValueError(
93
- f"A value is already stored for observable '{callback.name}' at time {time}"
94
- )
95
-
96
- self._results[callback.name][time] = value
97
- self._default_aggregation_types[callback.name] = callback.default_aggregation_type
98
-
99
- def __getitem__(self, key: Any) -> Any:
100
- if isinstance(key, tuple):
101
- # results["energy", t]
102
- callback_name, time = key
103
-
104
- if callback_name not in self._results:
105
- raise ValueError(
106
- f"No value for observable '{callback_name}' has been stored"
107
- )
108
-
109
- if time not in self._results[callback_name]:
110
- raise ValueError(
111
- f"No value stored at time {time} for observable '{callback_name}'"
112
- )
113
-
114
- return self._results[callback_name][time]
115
-
116
- # results["energy"][t]
117
- assert isinstance(key, str)
118
- callback_name = key
119
- if callback_name not in self._results:
120
- raise ValueError(f"No value for observable '{callback_name}' has been stored")
121
-
122
- return self._results[key]
123
-
124
- def get_result_names(self) -> list[str]:
125
- """
126
- get a list of results present in this object
127
-
128
- Args:
129
-
130
- Returns:
131
- list of results by name
132
-
133
- """
134
- return list(self._results.keys())
135
-
136
- def get_result_times(self, name: str) -> list[int]:
137
- """
138
- get a list of times for which the given result has been stored
139
-
140
- Args:
141
- name: name of the result to get times of
142
-
143
- Returns:
144
- list of times in ns
145
-
146
- """
147
- return list(self._results[name].keys())
148
-
149
- def get_result(self, name: str, time: int) -> Any:
150
- """
151
- get the given result at the given time
152
-
153
- Args:
154
- name: name of the result to get
155
- time: time in ns at which to get the result
156
-
157
- Returns:
158
- the result
159
-
160
- """
161
- return self._results[name][time]
162
-
163
- def get_aggregation_type(self, name: str) -> Optional[AggregationType]:
164
- return self._default_aggregation_types[name]
165
-
166
- def dump(self, file_path: Path) -> None:
167
- with file_path.open("w") as file_handle:
168
- json.dump(
169
- {
170
- "observables": self._results,
171
- "statistics": self.statistics,
172
- },
173
- file_handle,
174
- )