emu-base 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emu_base/__init__.py +47 -0
- emu_base/base_classes/__init__.py +31 -0
- emu_base/base_classes/aggregators.py +59 -0
- emu_base/base_classes/backend.py +48 -0
- emu_base/base_classes/callback.py +90 -0
- emu_base/base_classes/config.py +81 -0
- emu_base/base_classes/default_callbacks.py +300 -0
- emu_base/base_classes/operator.py +126 -0
- emu_base/base_classes/results.py +174 -0
- emu_base/base_classes/state.py +97 -0
- emu_base/lindblad_operators.py +44 -0
- emu_base/math/__init__.py +3 -0
- emu_base/math/brents_root_finding.py +121 -0
- emu_base/math/krylov_exp.py +127 -0
- emu_base/pulser_adapter.py +248 -0
- emu_base/utils.py +9 -0
- emu_base-1.2.1.dist-info/METADATA +134 -0
- emu_base-1.2.1.dist-info/RECORD +19 -0
- emu_base-1.2.1.dist-info/WHEEL +4 -0
emu_base/__init__.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from .base_classes.results import Results
|
|
2
|
+
from .base_classes.callback import Callback, AggregationType
|
|
3
|
+
from .base_classes.config import BackendConfig
|
|
4
|
+
from .base_classes.operator import Operator
|
|
5
|
+
from .base_classes.state import State
|
|
6
|
+
from .base_classes.backend import Backend
|
|
7
|
+
from .base_classes.default_callbacks import (
|
|
8
|
+
BitStrings,
|
|
9
|
+
CorrelationMatrix,
|
|
10
|
+
Energy,
|
|
11
|
+
EnergyVariance,
|
|
12
|
+
Expectation,
|
|
13
|
+
Fidelity,
|
|
14
|
+
QubitDensity,
|
|
15
|
+
StateResult,
|
|
16
|
+
SecondMomentOfEnergy,
|
|
17
|
+
)
|
|
18
|
+
from .pulser_adapter import PulserData, HamiltonianType
|
|
19
|
+
from .math.brents_root_finding import find_root_brents
|
|
20
|
+
from .math.krylov_exp import krylov_exp, DEFAULT_MAX_KRYLOV_DIM
|
|
21
|
+
|
|
22
|
+
__all__ = [
|
|
23
|
+
"__version__",
|
|
24
|
+
"Results",
|
|
25
|
+
"BackendConfig",
|
|
26
|
+
"Operator",
|
|
27
|
+
"State",
|
|
28
|
+
"Backend",
|
|
29
|
+
"AggregationType",
|
|
30
|
+
"Callback",
|
|
31
|
+
"StateResult",
|
|
32
|
+
"BitStrings",
|
|
33
|
+
"QubitDensity",
|
|
34
|
+
"CorrelationMatrix",
|
|
35
|
+
"Expectation",
|
|
36
|
+
"Fidelity",
|
|
37
|
+
"Energy",
|
|
38
|
+
"EnergyVariance",
|
|
39
|
+
"SecondMomentOfEnergy",
|
|
40
|
+
"PulserData",
|
|
41
|
+
"find_root_brents",
|
|
42
|
+
"krylov_exp",
|
|
43
|
+
"HamiltonianType",
|
|
44
|
+
"DEFAULT_MAX_KRYLOV_DIM",
|
|
45
|
+
]
|
|
46
|
+
|
|
47
|
+
__version__ = "1.2.1"
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
from .operator import Operator
|
|
2
|
+
from .state import State
|
|
3
|
+
from .results import Results
|
|
4
|
+
from .callback import Callback
|
|
5
|
+
from .default_callbacks import (
|
|
6
|
+
StateResult,
|
|
7
|
+
BitStrings,
|
|
8
|
+
QubitDensity,
|
|
9
|
+
CorrelationMatrix,
|
|
10
|
+
Expectation,
|
|
11
|
+
Fidelity,
|
|
12
|
+
Energy,
|
|
13
|
+
EnergyVariance,
|
|
14
|
+
SecondMomentOfEnergy,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"Operator",
|
|
19
|
+
"State",
|
|
20
|
+
"Results",
|
|
21
|
+
"Callback",
|
|
22
|
+
"StateResult",
|
|
23
|
+
"BitStrings",
|
|
24
|
+
"QubitDensity",
|
|
25
|
+
"CorrelationMatrix",
|
|
26
|
+
"Expectation",
|
|
27
|
+
"Fidelity",
|
|
28
|
+
"Energy",
|
|
29
|
+
"EnergyVariance",
|
|
30
|
+
"SecondMomentOfEnergy",
|
|
31
|
+
]
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import statistics
|
|
2
|
+
from typing import Any, Callable
|
|
3
|
+
import collections
|
|
4
|
+
from emu_base.base_classes.callback import AggregationType
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
_NUMERIC_TYPES = {int, float, complex}
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def mean_aggregator(
|
|
11
|
+
values: list[Any],
|
|
12
|
+
) -> complex | float | list[complex] | list[float] | list[list[complex]] | list[
|
|
13
|
+
list[float]
|
|
14
|
+
]: # FIXME: support tuples?
|
|
15
|
+
if values == []:
|
|
16
|
+
raise ValueError("Cannot average 0 samples")
|
|
17
|
+
|
|
18
|
+
element_type = type(values[0])
|
|
19
|
+
|
|
20
|
+
if element_type in _NUMERIC_TYPES:
|
|
21
|
+
return statistics.fmean(values)
|
|
22
|
+
|
|
23
|
+
if element_type != list:
|
|
24
|
+
raise NotImplementedError("Cannot average this type of data")
|
|
25
|
+
|
|
26
|
+
if values[0] == []:
|
|
27
|
+
raise ValueError("Cannot average list of empty lists")
|
|
28
|
+
|
|
29
|
+
sub_element_type = type(values[0][0])
|
|
30
|
+
|
|
31
|
+
if sub_element_type in _NUMERIC_TYPES:
|
|
32
|
+
dim = len(values[0])
|
|
33
|
+
return [statistics.fmean(value[i] for value in values) for i in range(dim)]
|
|
34
|
+
|
|
35
|
+
if sub_element_type != list: # FIXME: ABC.Iterable? Collection? subclass?
|
|
36
|
+
raise ValueError(f"Cannot average list of lists of {sub_element_type}")
|
|
37
|
+
|
|
38
|
+
if values[0][0] == []:
|
|
39
|
+
raise ValueError("Cannot average list of matrices with no columns")
|
|
40
|
+
|
|
41
|
+
if (sub_sub_element_type := type(values[0][0][0])) not in _NUMERIC_TYPES:
|
|
42
|
+
raise ValueError(f"Cannot average list of matrices of {sub_sub_element_type}")
|
|
43
|
+
|
|
44
|
+
dim1 = len(values[0])
|
|
45
|
+
dim2 = len(values[0][0])
|
|
46
|
+
return [
|
|
47
|
+
[statistics.fmean(value[i][j] for value in values) for j in range(dim2)]
|
|
48
|
+
for i in range(dim1)
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def bag_union_aggregator(values: list[collections.Counter]) -> collections.Counter:
|
|
53
|
+
return sum(values, start=collections.Counter())
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
aggregation_types_definitions: dict[AggregationType, Callable] = {
|
|
57
|
+
AggregationType.MEAN: mean_aggregator,
|
|
58
|
+
AggregationType.BAG_UNION: bag_union_aggregator,
|
|
59
|
+
}
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import warnings
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
|
|
4
|
+
from pulser import Sequence
|
|
5
|
+
|
|
6
|
+
from emu_base.base_classes.config import BackendConfig
|
|
7
|
+
from emu_base.base_classes.results import Results
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class Backend(ABC):
|
|
11
|
+
"""
|
|
12
|
+
Base class for different emulation backends.
|
|
13
|
+
Forces backends to implement a run method.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
@staticmethod
|
|
17
|
+
def validate_sequence(sequence: Sequence) -> None:
|
|
18
|
+
with warnings.catch_warnings():
|
|
19
|
+
warnings.simplefilter("ignore", category=DeprecationWarning)
|
|
20
|
+
|
|
21
|
+
if not isinstance(sequence, Sequence):
|
|
22
|
+
raise TypeError(
|
|
23
|
+
"The provided sequence has to be a valid " "pulser.Sequence instance."
|
|
24
|
+
)
|
|
25
|
+
if sequence.is_parametrized() or sequence.is_register_mappable():
|
|
26
|
+
raise ValueError(
|
|
27
|
+
"Not supported"
|
|
28
|
+
"The provided sequence needs to be built to be simulated. Call"
|
|
29
|
+
" `Sequence.build()` with the necessary parameters."
|
|
30
|
+
)
|
|
31
|
+
if not sequence._schedule:
|
|
32
|
+
raise ValueError("The provided sequence has no declared channels.")
|
|
33
|
+
if all(sequence._schedule[x][-1].tf == 0 for x in sequence.declared_channels):
|
|
34
|
+
raise ValueError("No instructions given for the channels in the sequence.")
|
|
35
|
+
|
|
36
|
+
@abstractmethod
|
|
37
|
+
def run(self, sequence: Sequence, config: BackendConfig) -> Results:
|
|
38
|
+
"""
|
|
39
|
+
Emulates the given sequence.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
sequence: a Pulser sequence to simulate
|
|
43
|
+
config: the config. Should be of the appropriate type for the backend
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
the simulation results
|
|
47
|
+
"""
|
|
48
|
+
pass
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Any, Optional, TYPE_CHECKING
|
|
3
|
+
from enum import Enum, auto
|
|
4
|
+
|
|
5
|
+
from emu_base.base_classes.config import BackendConfig
|
|
6
|
+
from emu_base.base_classes.operator import Operator
|
|
7
|
+
from emu_base.base_classes.state import State
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from emu_base.base_classes.results import Results
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AggregationType(Enum):
|
|
14
|
+
"""
|
|
15
|
+
Defines how to combine multiple values from different simulation results.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
MEAN = auto() # statistics.fmean or list/matrix-wise equivalent
|
|
19
|
+
BAG_UNION = auto() # Counter.__add__
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class Callback(ABC):
|
|
23
|
+
def __init__(self, evaluation_times: set[int]):
|
|
24
|
+
"""
|
|
25
|
+
The callback base class that can be subclassed to add new kinds of results
|
|
26
|
+
to the Results object returned by the Backend
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
evaluation_times: the times at which to add a result to Results
|
|
30
|
+
"""
|
|
31
|
+
self.evaluation_times = evaluation_times
|
|
32
|
+
|
|
33
|
+
def __call__(
|
|
34
|
+
self, config: BackendConfig, t: int, state: State, H: Operator, result: "Results"
|
|
35
|
+
) -> None:
|
|
36
|
+
"""
|
|
37
|
+
This function is called after each time step performed by the emulator.
|
|
38
|
+
By default it calls apply to compute a result and put it in `result`
|
|
39
|
+
if `t` in `self.evaluation_times`.
|
|
40
|
+
It can be overloaded to define any custom behaviour for a `Callback`.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
config: the config object passed to the run method
|
|
44
|
+
t: the current time in ns
|
|
45
|
+
state: the current state
|
|
46
|
+
H: the Hamiltonian at this time
|
|
47
|
+
result: the results object
|
|
48
|
+
"""
|
|
49
|
+
if t in self.evaluation_times:
|
|
50
|
+
value_to_store = self.apply(config, t, state, H)
|
|
51
|
+
result.store(callback=self, time=t, value=value_to_store)
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
@abstractmethod
|
|
55
|
+
def name(self) -> str:
|
|
56
|
+
"""
|
|
57
|
+
The name of the observable, can be used to index into the Results object.
|
|
58
|
+
Some Callbacks might have multiple instances, such as a callback to compute
|
|
59
|
+
a fidelity on some given state. In that case, this method could make sure
|
|
60
|
+
each instance has a unique name.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
the name of the callback
|
|
64
|
+
"""
|
|
65
|
+
pass
|
|
66
|
+
|
|
67
|
+
@abstractmethod
|
|
68
|
+
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
69
|
+
"""
|
|
70
|
+
This method must be implemented by subclasses. The result of this method
|
|
71
|
+
gets put in the Results object.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
config: the config object passed to the run method
|
|
75
|
+
t: the current time in ns
|
|
76
|
+
state: the current state
|
|
77
|
+
H: the Hamiltonian at this time
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
the result to put in Results
|
|
81
|
+
"""
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
@property
|
|
85
|
+
def default_aggregation_type(self) -> Optional[AggregationType]:
|
|
86
|
+
"""
|
|
87
|
+
Defines how to combine by default multiple values from different simulation results.
|
|
88
|
+
None means no default, therefore aggregator function is always user-provided.
|
|
89
|
+
"""
|
|
90
|
+
return None
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from pulser.noise_model import NoiseModel
|
|
3
|
+
import logging
|
|
4
|
+
import sys
|
|
5
|
+
import pathlib
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class BackendConfig:
|
|
9
|
+
"""The base backend configuration.
|
|
10
|
+
|
|
11
|
+
Args:
|
|
12
|
+
observables: a list of callbacks to compute observables
|
|
13
|
+
with_modulation: if True, run the sequence with hardware modulation
|
|
14
|
+
noise_model: The pulser.NoiseModel to use in the simulation.
|
|
15
|
+
interaction_matrix: When specified, override the interaction terms in the Hamiltonian.
|
|
16
|
+
This corresponds to the $U_{ij}$ terms in the documentation. Must be symmetric.
|
|
17
|
+
interaction_cutoff: set interaction coefficients smaller than this to 0.
|
|
18
|
+
This can improve the memory profile of the application for some backends.
|
|
19
|
+
log_level: The output verbosity. Should be one of the constants from logging.
|
|
20
|
+
log_file: a path to a file where to store the log, instead of printing to stdout
|
|
21
|
+
|
|
22
|
+
Examples:
|
|
23
|
+
>>> observables = [BitStrings(400, 100)] #compute 100 bitstrings at 400ns
|
|
24
|
+
>>> noise_model = pulser.noise_model.NoiseModel()
|
|
25
|
+
>>> interaction_matrix = [[1 for _ in range(nqubits)] for _ in range(nqubits)]
|
|
26
|
+
>>> interaction_cutoff = 2.0 #this will turn off all the above interactions again
|
|
27
|
+
>>> log_level = logging.warn
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
*,
|
|
33
|
+
# "Callback" is a forward type reference because of the circular import otherwise.
|
|
34
|
+
observables: list["Callback"] | None = None, # type: ignore # noqa: F821
|
|
35
|
+
with_modulation: bool = False,
|
|
36
|
+
noise_model: NoiseModel = None,
|
|
37
|
+
interaction_matrix: list[list[float]] | None = None,
|
|
38
|
+
interaction_cutoff: float = 0.0,
|
|
39
|
+
log_level: int = logging.INFO,
|
|
40
|
+
log_file: pathlib.Path | None = None,
|
|
41
|
+
):
|
|
42
|
+
if observables is None:
|
|
43
|
+
observables = []
|
|
44
|
+
self.callbacks = (
|
|
45
|
+
observables # we can add other types of callbacks, and just stack them
|
|
46
|
+
)
|
|
47
|
+
self.with_modulation = with_modulation
|
|
48
|
+
self.noise_model = noise_model
|
|
49
|
+
|
|
50
|
+
if interaction_matrix is not None and (
|
|
51
|
+
not isinstance(interaction_matrix, list)
|
|
52
|
+
or not isinstance(interaction_matrix[0], list)
|
|
53
|
+
):
|
|
54
|
+
raise ValueError(
|
|
55
|
+
"Interaction matrix must be provided as a Python list of lists of floats"
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
self.interaction_matrix = interaction_matrix
|
|
59
|
+
self.interaction_cutoff = interaction_cutoff
|
|
60
|
+
self.logger = logging.getLogger("global_logger")
|
|
61
|
+
if log_file is None:
|
|
62
|
+
logging.basicConfig(
|
|
63
|
+
level=log_level, format="%(message)s", stream=sys.stdout, force=True
|
|
64
|
+
) # default to stream = sys.stderr
|
|
65
|
+
else:
|
|
66
|
+
logging.basicConfig(
|
|
67
|
+
level=log_level,
|
|
68
|
+
format="%(message)s",
|
|
69
|
+
filename=str(log_file),
|
|
70
|
+
filemode="w",
|
|
71
|
+
force=True,
|
|
72
|
+
)
|
|
73
|
+
if noise_model is not None and (
|
|
74
|
+
noise_model.runs != 1
|
|
75
|
+
or noise_model.samples_per_run != 1
|
|
76
|
+
or noise_model.runs is not None
|
|
77
|
+
or noise_model.samples_per_run is not None
|
|
78
|
+
):
|
|
79
|
+
self.logger.warning(
|
|
80
|
+
"Warning: The runs and samples_per_run values of the NoiseModel are ignored!"
|
|
81
|
+
)
|
|
@@ -0,0 +1,300 @@
|
|
|
1
|
+
from copy import deepcopy
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
from emu_base.base_classes.callback import Callback, AggregationType
|
|
5
|
+
from emu_base.base_classes.config import BackendConfig
|
|
6
|
+
from emu_base.base_classes.operator import Operator
|
|
7
|
+
from emu_base.base_classes.state import State
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class StateResult(Callback):
|
|
11
|
+
"""
|
|
12
|
+
Store the quantum state in whatever format the backend provides
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
evaluation_times: the times at which to store the state
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, evaluation_times: set[int]):
|
|
19
|
+
super().__init__(evaluation_times)
|
|
20
|
+
|
|
21
|
+
name = "state"
|
|
22
|
+
|
|
23
|
+
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
24
|
+
return deepcopy(state)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class BitStrings(Callback):
|
|
28
|
+
"""
|
|
29
|
+
Store bitstrings sampled from the current state. Error rates are taken from the config
|
|
30
|
+
passed to the run method of the backend. The bitstrings are stored as a Counter[str].
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
evaluation_times: the times at which to sample bitstrings
|
|
34
|
+
num_shots: how many bitstrings to sample each time this observable is computed
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def __init__(self, evaluation_times: set[int], num_shots: int = 1000):
|
|
38
|
+
super().__init__(evaluation_times)
|
|
39
|
+
self.num_shots = num_shots
|
|
40
|
+
|
|
41
|
+
name = "bitstrings"
|
|
42
|
+
|
|
43
|
+
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
44
|
+
p_false_pos = (
|
|
45
|
+
0.0 if config.noise_model is None else config.noise_model.p_false_pos
|
|
46
|
+
)
|
|
47
|
+
p_false_neg = (
|
|
48
|
+
0.0 if config.noise_model is None else config.noise_model.p_false_neg
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
return state.sample(self.num_shots, p_false_pos, p_false_neg)
|
|
52
|
+
|
|
53
|
+
default_aggregation_type = AggregationType.BAG_UNION
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
_fidelity_counter = -1
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class Fidelity(Callback):
|
|
60
|
+
"""
|
|
61
|
+
Store $<ψ|φ(t)>$ for the given state $|ψ>$,
|
|
62
|
+
and the state $|φ(t)>$ obtained by time evolution.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
evaluation_times: the times at which to compute the fidelity
|
|
66
|
+
state: the state |ψ>. Note that this must be of appropriate type for the backend
|
|
67
|
+
|
|
68
|
+
Examples:
|
|
69
|
+
>>> state = State.from_state_string(...) #see State API
|
|
70
|
+
>>> fidelity = Fidelity([400], state) #measure fidelity on state at t=400ns
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def __init__(self, evaluation_times: set[int], state: State):
|
|
74
|
+
super().__init__(evaluation_times)
|
|
75
|
+
global _fidelity_counter
|
|
76
|
+
_fidelity_counter += 1
|
|
77
|
+
self.index = _fidelity_counter
|
|
78
|
+
self.state = state
|
|
79
|
+
|
|
80
|
+
@property
|
|
81
|
+
def name(self) -> str:
|
|
82
|
+
return f"fidelity_{self.index}"
|
|
83
|
+
|
|
84
|
+
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
85
|
+
return self.state.inner(state)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
_expectation_counter = -1
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class Expectation(Callback):
|
|
92
|
+
"""
|
|
93
|
+
Store the expectation of the given operator on the current state
|
|
94
|
+
(i.e. $\\langle φ(t)|\\mathrm{operator}|φ(t)\\rangle$).
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
evaluation_times: the times at which to compute the expectation
|
|
98
|
+
operator: the operator to measure. Must be of appropriate type for the backend.
|
|
99
|
+
|
|
100
|
+
Examples:
|
|
101
|
+
>>> op = Operator.from_operator_string(...) #see Operator API
|
|
102
|
+
>>> expectation = Expectation([400], op) #measure the expecation of op at t=400ns
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
def __init__(self, evaluation_times: set[int], operator: Operator):
|
|
106
|
+
super().__init__(evaluation_times)
|
|
107
|
+
global _expectation_counter
|
|
108
|
+
_expectation_counter += 1
|
|
109
|
+
self.index = _expectation_counter
|
|
110
|
+
self.operator = operator
|
|
111
|
+
|
|
112
|
+
@property
|
|
113
|
+
def name(self) -> str:
|
|
114
|
+
return f"expectation_{self.index}"
|
|
115
|
+
|
|
116
|
+
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
117
|
+
return self.operator.expect(state)
|
|
118
|
+
|
|
119
|
+
default_aggregation_type = AggregationType.MEAN
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class CorrelationMatrix(Callback):
|
|
123
|
+
"""
|
|
124
|
+
Store the correlation matrix for the current state.
|
|
125
|
+
Requires specification of the basis used in the emulation
|
|
126
|
+
https://pulser.readthedocs.io/en/stable/conventions.html
|
|
127
|
+
It currently supports
|
|
128
|
+
- the rydberg basis ('r','g')
|
|
129
|
+
- the xy basis ('0', '1')
|
|
130
|
+
and returns
|
|
131
|
+
|
|
132
|
+
`[[<φ(t)|n_i n_j|φ(t)> for j in qubits] for i in qubits]`
|
|
133
|
+
|
|
134
|
+
n_i being the operator that projects qubit i onto the state that measures as 1.
|
|
135
|
+
The diagonal of this matrix is the QubitDensity. The correlation matrix
|
|
136
|
+
is stored as a list of lists.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
evaluation_times: the times at which to compute the correlation matrix
|
|
140
|
+
basis: the basis used by the sequence
|
|
141
|
+
nqubits: the number of qubits in the Register
|
|
142
|
+
|
|
143
|
+
Notes:
|
|
144
|
+
See the API for `Operator.from_operator_string` for an example of what to do with
|
|
145
|
+
basis and nqubits.
|
|
146
|
+
"""
|
|
147
|
+
|
|
148
|
+
def __init__(self, evaluation_times: set[int], basis: tuple[str, ...], nqubits: int):
|
|
149
|
+
super().__init__(evaluation_times)
|
|
150
|
+
self.operators: list[list[Operator]] | None = None
|
|
151
|
+
self.basis = set(basis)
|
|
152
|
+
if self.basis == {"r", "g"}:
|
|
153
|
+
self.op_string = "rr"
|
|
154
|
+
elif self.basis == {"0", "1"}:
|
|
155
|
+
self.op_string = "11"
|
|
156
|
+
else:
|
|
157
|
+
raise ValueError("Unsupported basis provided")
|
|
158
|
+
self.nqubits = nqubits
|
|
159
|
+
|
|
160
|
+
name = "correlation_matrix"
|
|
161
|
+
|
|
162
|
+
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
163
|
+
if hasattr(state, "get_correlation_matrix") and callable(
|
|
164
|
+
state.get_correlation_matrix
|
|
165
|
+
):
|
|
166
|
+
return state.get_correlation_matrix()
|
|
167
|
+
|
|
168
|
+
if self.operators is None or not isinstance(self.operators[0], type(H)):
|
|
169
|
+
self.operators = [
|
|
170
|
+
[
|
|
171
|
+
H.from_operator_string(
|
|
172
|
+
self.basis,
|
|
173
|
+
self.nqubits,
|
|
174
|
+
[(1.0, [({self.op_string: 1.0}, list({i, j}))])],
|
|
175
|
+
)
|
|
176
|
+
for j in range(self.nqubits)
|
|
177
|
+
]
|
|
178
|
+
for i in range(self.nqubits)
|
|
179
|
+
]
|
|
180
|
+
return [[op.expect(state).real for op in ops] for ops in self.operators]
|
|
181
|
+
|
|
182
|
+
default_aggregation_type = AggregationType.MEAN
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
class QubitDensity(Callback):
|
|
186
|
+
"""
|
|
187
|
+
Requires specification of the basis used in the emulation
|
|
188
|
+
https://pulser.readthedocs.io/en/stable/conventions.html
|
|
189
|
+
It currently supports
|
|
190
|
+
- the rydberg basis ('r','g')
|
|
191
|
+
- the xy basis ('0', '1')
|
|
192
|
+
and returns
|
|
193
|
+
|
|
194
|
+
`[<φ(t)|n_i|φ(t)> for i in qubits]`
|
|
195
|
+
|
|
196
|
+
n_i being the operator that projects qubit i onto the state that measures as 1.
|
|
197
|
+
The qubit density is stored as a list.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
evaluation_times: the times at which to compute the density
|
|
201
|
+
basis: the basis used by the sequence
|
|
202
|
+
nqubits: the number of qubits in the Register
|
|
203
|
+
|
|
204
|
+
Notes:
|
|
205
|
+
See the API for `State.from_state_string` for an example of what to do with
|
|
206
|
+
basis and nqubits.
|
|
207
|
+
"""
|
|
208
|
+
|
|
209
|
+
def __init__(self, evaluation_times: set[int], basis: tuple[str, ...], nqubits: int):
|
|
210
|
+
super().__init__(evaluation_times)
|
|
211
|
+
self.operators: list[Operator] | None = None
|
|
212
|
+
self.basis = set(basis)
|
|
213
|
+
if self.basis == {"r", "g"}:
|
|
214
|
+
self.op_string = "rr"
|
|
215
|
+
elif self.basis == {"0", "1"}:
|
|
216
|
+
self.op_string = "11"
|
|
217
|
+
else:
|
|
218
|
+
raise ValueError("Unsupported basis provided")
|
|
219
|
+
self.nqubits = nqubits
|
|
220
|
+
|
|
221
|
+
name = "qubit_density"
|
|
222
|
+
|
|
223
|
+
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
224
|
+
if self.operators is None or not isinstance(self.operators[0], type(H)):
|
|
225
|
+
self.operators = [
|
|
226
|
+
H.from_operator_string(
|
|
227
|
+
self.basis, self.nqubits, [(1.0, [({self.op_string: 1.0}, [i])])]
|
|
228
|
+
)
|
|
229
|
+
for i in range(self.nqubits)
|
|
230
|
+
]
|
|
231
|
+
return [op.expect(state).real for op in self.operators]
|
|
232
|
+
|
|
233
|
+
default_aggregation_type = AggregationType.MEAN
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
class Energy(Callback):
|
|
237
|
+
"""
|
|
238
|
+
Store the expectation value of the current Hamiltonian
|
|
239
|
+
(i.e. $\\langle φ(t)|H(t)|φ(t) \\rangle$)
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
evaluation_times: the times at which to compute the expectation
|
|
243
|
+
"""
|
|
244
|
+
|
|
245
|
+
def __init__(self, evaluation_times: set[int]):
|
|
246
|
+
super().__init__(evaluation_times)
|
|
247
|
+
|
|
248
|
+
name = "energy"
|
|
249
|
+
|
|
250
|
+
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
251
|
+
return H.expect(state).real
|
|
252
|
+
|
|
253
|
+
default_aggregation_type = AggregationType.MEAN
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
class EnergyVariance(Callback):
|
|
257
|
+
"""
|
|
258
|
+
Store the variance of the current Hamiltonian
|
|
259
|
+
(i.e. $\\langle φ(t)|H(t)^2|φ(t)\\rangle - \\langle φ(t)|H(t)|φ(t)\\rangle^2$)
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
evaluation_times: the times at which to compute the variance
|
|
263
|
+
"""
|
|
264
|
+
|
|
265
|
+
def __init__(self, evaluation_times: set[int]):
|
|
266
|
+
super().__init__(evaluation_times)
|
|
267
|
+
|
|
268
|
+
name = "energy_variance"
|
|
269
|
+
|
|
270
|
+
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
271
|
+
h_squared = H @ H
|
|
272
|
+
return h_squared.expect(state).real - H.expect(state).real ** 2
|
|
273
|
+
|
|
274
|
+
# Explicitely setting this to None out of safety: in the case of MonteCarlo,
|
|
275
|
+
# the aggregated variance cannot be computed from this callback.
|
|
276
|
+
# Instead, one first need to average Energy and SecondMomentOfEnergy,
|
|
277
|
+
# and then compute the variance with the formula:
|
|
278
|
+
# AggregatedEnergyVariance = AveragedSecondMomentOfEnergy - AveragedEnergy**2
|
|
279
|
+
default_aggregation_type = None
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
class SecondMomentOfEnergy(Callback):
|
|
283
|
+
"""
|
|
284
|
+
Store the expectation value $\\langle φ(t)|H(t)^2|φ(t)\\rangle$.
|
|
285
|
+
Useful for computing the variance when averaging over many executions of the program.
|
|
286
|
+
|
|
287
|
+
Args:
|
|
288
|
+
evaluation_times: the times at which to compute the variance
|
|
289
|
+
"""
|
|
290
|
+
|
|
291
|
+
def __init__(self, evaluation_times: set[int]):
|
|
292
|
+
super().__init__(evaluation_times)
|
|
293
|
+
|
|
294
|
+
name = "second_moment_of_energy"
|
|
295
|
+
|
|
296
|
+
def apply(self, config: BackendConfig, t: int, state: State, H: Operator) -> Any:
|
|
297
|
+
h_squared = H @ H
|
|
298
|
+
return h_squared.expect(state).real
|
|
299
|
+
|
|
300
|
+
default_aggregation_type = AggregationType.MEAN
|