iqm-benchmarks 1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of iqm-benchmarks might be problematic. Click here for more details.
- iqm/benchmarks/__init__.py +31 -0
- iqm/benchmarks/benchmark.py +109 -0
- iqm/benchmarks/benchmark_definition.py +264 -0
- iqm/benchmarks/benchmark_experiment.py +163 -0
- iqm/benchmarks/compressive_gst/__init__.py +20 -0
- iqm/benchmarks/compressive_gst/compressive_gst.py +1029 -0
- iqm/benchmarks/entanglement/__init__.py +18 -0
- iqm/benchmarks/entanglement/ghz.py +802 -0
- iqm/benchmarks/logging_config.py +29 -0
- iqm/benchmarks/optimization/__init__.py +18 -0
- iqm/benchmarks/optimization/qscore.py +719 -0
- iqm/benchmarks/quantum_volume/__init__.py +21 -0
- iqm/benchmarks/quantum_volume/clops.py +726 -0
- iqm/benchmarks/quantum_volume/quantum_volume.py +854 -0
- iqm/benchmarks/randomized_benchmarking/__init__.py +18 -0
- iqm/benchmarks/randomized_benchmarking/clifford_1q.pkl +0 -0
- iqm/benchmarks/randomized_benchmarking/clifford_2q.pkl +0 -0
- iqm/benchmarks/randomized_benchmarking/clifford_rb/__init__.py +19 -0
- iqm/benchmarks/randomized_benchmarking/clifford_rb/clifford_rb.py +386 -0
- iqm/benchmarks/randomized_benchmarking/interleaved_rb/__init__.py +19 -0
- iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py +555 -0
- iqm/benchmarks/randomized_benchmarking/mirror_rb/__init__.py +19 -0
- iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py +810 -0
- iqm/benchmarks/randomized_benchmarking/multi_lmfit.py +86 -0
- iqm/benchmarks/randomized_benchmarking/randomized_benchmarking_common.py +892 -0
- iqm/benchmarks/readout_mitigation.py +290 -0
- iqm/benchmarks/utils.py +521 -0
- iqm_benchmarks-1.3.dist-info/LICENSE +205 -0
- iqm_benchmarks-1.3.dist-info/METADATA +190 -0
- iqm_benchmarks-1.3.dist-info/RECORD +42 -0
- iqm_benchmarks-1.3.dist-info/WHEEL +5 -0
- iqm_benchmarks-1.3.dist-info/top_level.txt +2 -0
- mGST/LICENSE +21 -0
- mGST/README.md +54 -0
- mGST/additional_fns.py +962 -0
- mGST/algorithm.py +733 -0
- mGST/compatibility.py +238 -0
- mGST/low_level_jit.py +694 -0
- mGST/optimization.py +349 -0
- mGST/qiskit_interface.py +282 -0
- mGST/reporting/figure_gen.py +334 -0
- mGST/reporting/reporting.py +710 -0
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# Copyright 2024 IQM Benchmarks developers
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""
|
|
16
|
+
IQM's Python Library Benchmarking Suite QCVV.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from importlib.metadata import PackageNotFoundError, version
|
|
20
|
+
|
|
21
|
+
from .benchmark_definition import AnalysisResult, Benchmark, RunResult
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
try:
|
|
25
|
+
# Change here if project is renamed and does not equal the package name
|
|
26
|
+
dist_name = "iqm-benchmarks"
|
|
27
|
+
__version__ = version(dist_name)
|
|
28
|
+
except PackageNotFoundError: # pragma: no cover
|
|
29
|
+
__version__ = "unknown"
|
|
30
|
+
finally:
|
|
31
|
+
del version, PackageNotFoundError
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
# Copyright 2024 IQM Benchmarks developers
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""
|
|
16
|
+
Generic Benchmark class
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from abc import ABC, abstractmethod
|
|
20
|
+
from copy import deepcopy
|
|
21
|
+
from typing import Dict, Literal, Optional, OrderedDict, Type
|
|
22
|
+
|
|
23
|
+
from matplotlib.figure import Figure
|
|
24
|
+
from pydantic import BaseModel
|
|
25
|
+
|
|
26
|
+
from iqm.qiskit_iqm.iqm_backend import IQMBackendBase
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class BenchmarkBase(ABC):
|
|
30
|
+
"""
|
|
31
|
+
The base implementation of all benchmarks, from which they inherit.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
backend: IQMBackendBase,
|
|
37
|
+
configuration: "BenchmarkConfigurationBase",
|
|
38
|
+
):
|
|
39
|
+
"""Construct the BenchmarkBase class.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
backend (IQMBackendBase): the backend to execute the benchmark on
|
|
43
|
+
configuration (BenchmarkConfigurationBase): the configuration of the benchmark
|
|
44
|
+
"""
|
|
45
|
+
self.backend = backend
|
|
46
|
+
self.configuration = configuration
|
|
47
|
+
self.serializable_configuration = deepcopy(self.configuration)
|
|
48
|
+
self.serializable_configuration.benchmark = self.name()
|
|
49
|
+
|
|
50
|
+
self.shots = self.configuration.shots
|
|
51
|
+
self.calset_id = self.configuration.calset_id
|
|
52
|
+
self.max_gates_per_batch = self.configuration.max_gates_per_batch
|
|
53
|
+
|
|
54
|
+
self.routing_method = self.configuration.routing_method
|
|
55
|
+
self.physical_layout = self.configuration.physical_layout
|
|
56
|
+
|
|
57
|
+
self.raw_data: Dict = {}
|
|
58
|
+
self.job_meta: Dict = {}
|
|
59
|
+
self.results: Dict = {}
|
|
60
|
+
self.raw_results: Dict = {}
|
|
61
|
+
|
|
62
|
+
self.untranspiled_circuits: Dict[str, Dict[int, list]] = {}
|
|
63
|
+
self.transpiled_circuits: Dict[str, Dict[int, list]] = {}
|
|
64
|
+
|
|
65
|
+
self.figures: Dict[str, Figure] = {}
|
|
66
|
+
|
|
67
|
+
@classmethod
|
|
68
|
+
@abstractmethod
|
|
69
|
+
def name(cls):
|
|
70
|
+
"""Return the name of the benchmark."""
|
|
71
|
+
|
|
72
|
+
@abstractmethod
|
|
73
|
+
def execute_full_benchmark(self):
|
|
74
|
+
"""Execute the full benchmark on the given backend."""
|
|
75
|
+
|
|
76
|
+
@staticmethod
|
|
77
|
+
def check_requirements(all_benchmarks: OrderedDict[str, "BenchmarkBase"]) -> OrderedDict[str, "BenchmarkBase"]:
|
|
78
|
+
"""Check whether the requirements for the benchmark are met, returning a valid benchmark dictionary."""
|
|
79
|
+
return all_benchmarks
|
|
80
|
+
|
|
81
|
+
def generate_requirements(self, all_benchmarks: OrderedDict[str, "BenchmarkBase"]) -> None:
|
|
82
|
+
"""Generate the required attributes for execution."""
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class BenchmarkConfigurationBase(BaseModel):
|
|
86
|
+
"""Benchmark configuration base.
|
|
87
|
+
|
|
88
|
+
Attributes:
|
|
89
|
+
benchmark (Type[BenchmarkBase]): the benchmark configuration.
|
|
90
|
+
shots (int): the number of shots to use in circuit execution.
|
|
91
|
+
* Default for all benchmarks is 2**8.
|
|
92
|
+
max_gates_per_batch (Optional[int]): the maximum number of gates per circuit batch.
|
|
93
|
+
* Default for all benchmarks is None.
|
|
94
|
+
calset_id (Optional[str]): the calibration ID to use in circuit execution.
|
|
95
|
+
* Default for all benchmarks is None (uses last available calibration ID).
|
|
96
|
+
routing_method (Literal["basic", "lookahead", "stochastic", "sabre", "none"]): the Qiskit routing method to use in transpilation.
|
|
97
|
+
* Default for all benchmarks is "sabre".
|
|
98
|
+
physical_layout (Literal["fixed", "batching"]): whether physical layout is constrained during transpilation to selected physical qubits.
|
|
99
|
+
- "fixed": physical layout is constrained during transpilation to the selected initial physical qubits.
|
|
100
|
+
- "batching": physical layout is allowed to use any other physical qubits, and circuits are batched according to final measured qubits.
|
|
101
|
+
* Default for all benchmarks is "fixed".
|
|
102
|
+
"""
|
|
103
|
+
|
|
104
|
+
benchmark: Type[BenchmarkBase]
|
|
105
|
+
shots: int = 2**8
|
|
106
|
+
max_gates_per_batch: Optional[int] = None
|
|
107
|
+
calset_id: Optional[str] = None
|
|
108
|
+
routing_method: Literal["basic", "lookahead", "stochastic", "sabre", "none"] = "sabre"
|
|
109
|
+
physical_layout: Literal["fixed", "batching"] = "fixed"
|
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
# Copyright 2024 IQM Benchmarks developers
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""
|
|
16
|
+
This module implements the base class for defining a benchmark.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from abc import ABC, abstractmethod
|
|
20
|
+
import copy
|
|
21
|
+
from copy import deepcopy
|
|
22
|
+
from dataclasses import dataclass, field
|
|
23
|
+
import functools
|
|
24
|
+
from typing import Any, Dict, List, Union
|
|
25
|
+
import uuid
|
|
26
|
+
|
|
27
|
+
from matplotlib.figure import Figure
|
|
28
|
+
import matplotlib.pyplot as plt
|
|
29
|
+
import xarray as xr
|
|
30
|
+
|
|
31
|
+
from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
|
|
32
|
+
from iqm.benchmarks.utils import get_iqm_backend, timeit
|
|
33
|
+
from iqm.qiskit_iqm.iqm_backend import IQMBackendBase
|
|
34
|
+
from iqm.qiskit_iqm.iqm_provider import IQMBackend, IQMFacadeBackend
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass
|
|
38
|
+
class RunResult:
|
|
39
|
+
"""
|
|
40
|
+
A dataclass that stores the results of a single run of a Benchmark.
|
|
41
|
+
|
|
42
|
+
RunResult should contain enough information that the Benchmark can be analyzed based on those
|
|
43
|
+
results.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
dataset: xr.Dataset
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@dataclass
|
|
50
|
+
class AnalysisResult:
|
|
51
|
+
"""
|
|
52
|
+
A dataclass storing the results of the analysis.
|
|
53
|
+
|
|
54
|
+
The result consists of a dataset, plots, and observations. Plots are defined as a dictionary
|
|
55
|
+
that maps a plot name to a figure. Observations are key-value pairs of data that contain the
|
|
56
|
+
main results of the benchmark.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
dataset: xr.Dataset
|
|
60
|
+
plots: dict[str, Figure] = field(default_factory=lambda: ({}))
|
|
61
|
+
observations: dict[str, Any] = field(default_factory=lambda: ({}))
|
|
62
|
+
|
|
63
|
+
def plot(self, plot_name: str):
|
|
64
|
+
"""
|
|
65
|
+
Plots the given figure.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
plot_name: Name of the figure to be plotted.
|
|
69
|
+
"""
|
|
70
|
+
return self.plots[plot_name]
|
|
71
|
+
|
|
72
|
+
def plot_all(self):
|
|
73
|
+
"""
|
|
74
|
+
Plots all the figures defined for the analysis.
|
|
75
|
+
"""
|
|
76
|
+
for fig in self.plots.values():
|
|
77
|
+
show_figure(fig)
|
|
78
|
+
plt.show()
|
|
79
|
+
|
|
80
|
+
@classmethod
|
|
81
|
+
def from_run_result(cls, run: RunResult):
|
|
82
|
+
"""
|
|
83
|
+
Creates a new ``AnalysisResult`` from a ``RunResult``.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
run: A run for which analysis result is created.
|
|
87
|
+
"""
|
|
88
|
+
return cls(dataset=run.dataset)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def default_analysis_function(result: AnalysisResult) -> AnalysisResult:
|
|
92
|
+
"""
|
|
93
|
+
The default analysis that only pass the result through.
|
|
94
|
+
"""
|
|
95
|
+
return result
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def merge_datasets_dac(datasets: List[xr.Dataset]) -> xr.Dataset:
|
|
99
|
+
"""
|
|
100
|
+
Merges a list of datasets recursively to minimize dataset sizes during merge
|
|
101
|
+
Args:
|
|
102
|
+
datasets: List[xr.Dateset]
|
|
103
|
+
A list of xarray datasets
|
|
104
|
+
Returns:
|
|
105
|
+
A list containing a single merged dataset
|
|
106
|
+
"""
|
|
107
|
+
if len(datasets) == 1:
|
|
108
|
+
return datasets[0]
|
|
109
|
+
datasets_new = []
|
|
110
|
+
for i in range(0, len(datasets), 2):
|
|
111
|
+
if i == (len(datasets) - 1):
|
|
112
|
+
datasets_new.append(datasets[i])
|
|
113
|
+
else:
|
|
114
|
+
datasets_new.append(xr.merge(datasets[i : i + 2]))
|
|
115
|
+
return merge_datasets_dac(datasets_new)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
@timeit
|
|
119
|
+
def add_counts_to_dataset(counts: List[Dict[str, int]], identifier: str, dataset: xr.Dataset):
|
|
120
|
+
"""Adds the counts from a cortex job result to the given dataset.
|
|
121
|
+
If counts with the same identifier are already present in the old dataset, then both counts are added together.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
counts (List[Dict[str, int]]): A list of dictionaries with counts of bitstrings.
|
|
125
|
+
identifier (str): A string to identify the current data, for instance the qubit layout.
|
|
126
|
+
dataset (xr.Dataset): Dataset to add results to.
|
|
127
|
+
Returns:
|
|
128
|
+
dataset_merged: xarray.Dataset
|
|
129
|
+
A merged dataset where the new counts are added the input dataset
|
|
130
|
+
"""
|
|
131
|
+
if not isinstance(counts, list):
|
|
132
|
+
counts = [counts]
|
|
133
|
+
datasets = []
|
|
134
|
+
for ii, _ in enumerate(counts):
|
|
135
|
+
ds_temp = xr.Dataset()
|
|
136
|
+
counts_dict = dict(counts[ii])
|
|
137
|
+
counts_array = xr.DataArray(list(counts_dict.values()), {f"{identifier}_state_{ii}": list(counts_dict.keys())})
|
|
138
|
+
if f"{identifier}_counts_{ii}" in dataset.variables:
|
|
139
|
+
counts_array = counts_array + dataset[f"{identifier}_counts_{ii}"]
|
|
140
|
+
counts_array = counts_array.sortby(counts_array[f"{identifier}_state_{ii}"])
|
|
141
|
+
ds_temp.update({f"{identifier}_counts_{ii}": counts_array})
|
|
142
|
+
datasets.append(ds_temp)
|
|
143
|
+
dataset_new = merge_datasets_dac(datasets)
|
|
144
|
+
dataset_merged = dataset.merge(dataset_new, compat="override")
|
|
145
|
+
return dataset_merged
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def show_figure(fig):
|
|
149
|
+
"""
|
|
150
|
+
Shows a closed figure.
|
|
151
|
+
|
|
152
|
+
Args:
|
|
153
|
+
fig: Figure to show.
|
|
154
|
+
"""
|
|
155
|
+
dummy = plt.figure()
|
|
156
|
+
new_manager = dummy.canvas.manager
|
|
157
|
+
new_manager.canvas.figure = fig
|
|
158
|
+
fig.set_canvas(new_manager.canvas)
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
class Benchmark(ABC):
|
|
162
|
+
"""
|
|
163
|
+
A base class for running cortex-based Benchmark experiments.
|
|
164
|
+
|
|
165
|
+
In order to write a new benchmark, it is recommended to derive a class
|
|
166
|
+
from this baseclass. A new benchmark is defined by deriving from this base
|
|
167
|
+
class and implementing the execute method. Additionally, a custom analysis
|
|
168
|
+
for the benchmark can be implemented by giving the pointer to the analysis
|
|
169
|
+
method in ``analysis_function`` field. The given analysis_function should
|
|
170
|
+
accept ``AnalysisResult`` as its input and return the final result.
|
|
171
|
+
"""
|
|
172
|
+
|
|
173
|
+
analysis_function = staticmethod(default_analysis_function)
|
|
174
|
+
default_options: dict[str, Any] | None = None
|
|
175
|
+
options: dict[str, Any] | None = None
|
|
176
|
+
# name: str = "unnamed_benchmark"
|
|
177
|
+
|
|
178
|
+
def __init__(self, backend: Union[str, IQMBackendBase], configuration: "BenchmarkConfigurationBase", **kwargs):
|
|
179
|
+
|
|
180
|
+
# Ported from BenchmarkBase # CHECK
|
|
181
|
+
self.configuration = configuration
|
|
182
|
+
self.serializable_configuration = deepcopy(self.configuration)
|
|
183
|
+
self.serializable_configuration.benchmark = self.name
|
|
184
|
+
|
|
185
|
+
if isinstance(backend, str):
|
|
186
|
+
self.backend = get_iqm_backend(backend)
|
|
187
|
+
else:
|
|
188
|
+
assert isinstance(backend, IQMBackendBase)
|
|
189
|
+
self.backend = backend
|
|
190
|
+
|
|
191
|
+
self.shots = self.configuration.shots
|
|
192
|
+
self.calset_id = self.configuration.calset_id
|
|
193
|
+
self.max_gates_per_batch = self.configuration.max_gates_per_batch
|
|
194
|
+
|
|
195
|
+
self.routing_method = self.configuration.routing_method
|
|
196
|
+
self.physical_layout = self.configuration.physical_layout
|
|
197
|
+
|
|
198
|
+
self.untranspiled_circuits: Dict[str, Dict[int, list]] = {}
|
|
199
|
+
self.transpiled_circuits: Dict[str, Dict[int, list]] = {}
|
|
200
|
+
|
|
201
|
+
# From exa_support MR
|
|
202
|
+
self.options = copy.copy(self.default_options) if self.default_options else {}
|
|
203
|
+
self.options.update(kwargs)
|
|
204
|
+
self.runs: list[RunResult] = []
|
|
205
|
+
|
|
206
|
+
@classmethod
|
|
207
|
+
@abstractmethod
|
|
208
|
+
def name(cls):
|
|
209
|
+
"""Return the name of the benchmark."""
|
|
210
|
+
|
|
211
|
+
@abstractmethod
|
|
212
|
+
def execute(self, backend: IQMBackend | IQMFacadeBackend | str) -> xr.Dataset:
|
|
213
|
+
"""
|
|
214
|
+
Executes the benchmark.
|
|
215
|
+
|
|
216
|
+
This method should be overridden by deriving classes.
|
|
217
|
+
|
|
218
|
+
Args:
|
|
219
|
+
backend: Qiskit backend used to execute benchmarks.
|
|
220
|
+
|
|
221
|
+
Returns:
|
|
222
|
+
An xarray dataset which contains to results of the benchmark execution.
|
|
223
|
+
The dataset should contain all the information necessary for analysing
|
|
224
|
+
the benchmark results.
|
|
225
|
+
"""
|
|
226
|
+
|
|
227
|
+
def run(self, calibration_set_id: str | uuid.UUID | None = None) -> RunResult:
|
|
228
|
+
"""
|
|
229
|
+
Runs the benchmark using the given backend and calibration_set_id.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
calibration_set_id:
|
|
233
|
+
CalibrationSetId used to initialize the backend or None for the latest calibration set.
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
RunResult: The result of the benchmark run.
|
|
237
|
+
"""
|
|
238
|
+
backend_for_execute = copy.copy(self.backend)
|
|
239
|
+
backend_for_execute.run = functools.partial(self.backend.run, calibration_set_id=calibration_set_id) # type: ignore
|
|
240
|
+
dataset = self.execute(backend_for_execute)
|
|
241
|
+
run = RunResult(dataset)
|
|
242
|
+
self.runs.append(run)
|
|
243
|
+
return run
|
|
244
|
+
|
|
245
|
+
def analyze(self, run_index=-1) -> AnalysisResult:
|
|
246
|
+
"""
|
|
247
|
+
The default analysis for the benchmark.
|
|
248
|
+
|
|
249
|
+
Internally uses the function defined by the attribute ``analysis_function``
|
|
250
|
+
to perform the analysis. This function makes a shallow copy of the dataset produced by
|
|
251
|
+
run. Therefore, it is recommended to not make changes to the data of the dataset but
|
|
252
|
+
just the structure of the array.
|
|
253
|
+
|
|
254
|
+
Args:
|
|
255
|
+
run_index: Index for the run to analyze.
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
An analysis result constructed from the run and updated by the analysis method defined by
|
|
259
|
+
the ``analysis_function`` field.
|
|
260
|
+
"""
|
|
261
|
+
run = self.runs[run_index]
|
|
262
|
+
result = AnalysisResult.from_run_result(run)
|
|
263
|
+
updated_result = self.analysis_function(result)
|
|
264
|
+
return updated_result
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
# Copyright 2024 IQM Benchmarks developers
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""
|
|
16
|
+
Generic Benchmark Experiment class
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from copy import deepcopy
|
|
20
|
+
from json import dump
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
import pickle
|
|
23
|
+
from time import strftime
|
|
24
|
+
from typing import List, Optional, OrderedDict, Union
|
|
25
|
+
|
|
26
|
+
from iqm.benchmarks.benchmark import BenchmarkBase, BenchmarkConfigurationBase
|
|
27
|
+
from iqm.benchmarks.utils import get_iqm_backend
|
|
28
|
+
from iqm.qiskit_iqm.iqm_backend import IQMBackendBase
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class BenchmarkExperiment:
|
|
32
|
+
"""
|
|
33
|
+
A Benchmark Experiment wraps the execution of one or more benchmarks, checks their requirements are met, stores the
|
|
34
|
+
execution results and plots relevant figures.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
backend: Union[str, IQMBackendBase],
|
|
40
|
+
benchmark_configurations: List[BenchmarkConfigurationBase],
|
|
41
|
+
device_id: Optional[str] = None,
|
|
42
|
+
):
|
|
43
|
+
"""Construct the BenchmarkExperiment class.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
backend (str | IQMBackendBase): the backend to execute the benchmarks on
|
|
47
|
+
benchmark_configurations (List[BenchmarkConfigurationBase]): the configuration(s) of the benchmark(s)
|
|
48
|
+
device_id (Optional[str], optional): the identifier of the device. Defaults to None.
|
|
49
|
+
|
|
50
|
+
Raises:
|
|
51
|
+
ValueError: backend not supported. Try 'garnet' or 'iqmfakeadonis'
|
|
52
|
+
"""
|
|
53
|
+
self.timestamp = strftime("%Y%m%d-%H%M%S")
|
|
54
|
+
|
|
55
|
+
self.device_id = device_id if device_id is not None else backend
|
|
56
|
+
|
|
57
|
+
if isinstance(backend, str):
|
|
58
|
+
self.backend = get_iqm_backend(backend)
|
|
59
|
+
|
|
60
|
+
else:
|
|
61
|
+
assert isinstance(backend, IQMBackendBase)
|
|
62
|
+
self.backend = backend
|
|
63
|
+
|
|
64
|
+
benchmarks: OrderedDict[str, BenchmarkBase] = OrderedDict(
|
|
65
|
+
(config.benchmark.name(), config.benchmark(self.backend, config)) for config in benchmark_configurations
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
for benchmark in benchmarks.values():
|
|
69
|
+
benchmarks_copy = deepcopy(benchmarks)
|
|
70
|
+
benchmarks_copy = benchmark.check_requirements(benchmarks_copy)
|
|
71
|
+
self.benchmarks = benchmarks_copy
|
|
72
|
+
|
|
73
|
+
def run_experiment(self) -> None:
|
|
74
|
+
"""Run the Benchmark experiment, and store the configuration, raw data, results and figures."""
|
|
75
|
+
|
|
76
|
+
for name, benchmark in self.benchmarks.items():
|
|
77
|
+
print("\nNow executing " + name)
|
|
78
|
+
# Create the directory for results
|
|
79
|
+
results_dir = f"Outputs/{self.device_id}/{self.timestamp}/{name}/"
|
|
80
|
+
Path(results_dir).mkdir(parents=True, exist_ok=True)
|
|
81
|
+
|
|
82
|
+
# Execute the current benchmark
|
|
83
|
+
benchmark.generate_requirements(self.benchmarks)
|
|
84
|
+
benchmark.execute_full_benchmark()
|
|
85
|
+
|
|
86
|
+
# Create configuration JSON file
|
|
87
|
+
with open(
|
|
88
|
+
f"{results_dir}{self.device_id}_{self.timestamp}_{name}_configuration.json",
|
|
89
|
+
"w",
|
|
90
|
+
encoding="utf-8",
|
|
91
|
+
) as f_configuration:
|
|
92
|
+
dump(benchmark.serializable_configuration.model_dump(), f_configuration)
|
|
93
|
+
|
|
94
|
+
# Create untranspiled circuit files
|
|
95
|
+
if benchmark.untranspiled_circuits:
|
|
96
|
+
for key_qubits in benchmark.untranspiled_circuits.keys():
|
|
97
|
+
for key_depth in benchmark.untranspiled_circuits[key_qubits].keys():
|
|
98
|
+
with open(
|
|
99
|
+
f"{results_dir}{self.device_id}_{self.timestamp}_{name}_qubits_{key_qubits}_depth_{key_depth}_untranspiled.pkl",
|
|
100
|
+
"wb",
|
|
101
|
+
) as f_circuits:
|
|
102
|
+
pickle.dump(
|
|
103
|
+
benchmark.untranspiled_circuits[key_qubits][key_depth],
|
|
104
|
+
f_circuits,
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
# Create transpiled circuit files
|
|
108
|
+
for key_qubits in benchmark.transpiled_circuits.keys():
|
|
109
|
+
for key_depth in benchmark.transpiled_circuits[key_qubits].keys():
|
|
110
|
+
with open(
|
|
111
|
+
f"{results_dir}{self.device_id}_{self.timestamp}_{name}_qubits_{key_qubits}_depth_{key_depth}_transpiled.pkl",
|
|
112
|
+
"wb",
|
|
113
|
+
) as f_circuits:
|
|
114
|
+
pickle.dump(
|
|
115
|
+
benchmark.transpiled_circuits[key_qubits][key_depth],
|
|
116
|
+
f_circuits,
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
# Create raw result pickle files
|
|
120
|
+
with open(
|
|
121
|
+
f"{results_dir}{self.device_id}_{self.timestamp}_{name}_raw_results.pkl",
|
|
122
|
+
"wb",
|
|
123
|
+
) as f_raw_results:
|
|
124
|
+
pickle.dump(
|
|
125
|
+
benchmark.raw_results,
|
|
126
|
+
f_raw_results,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
# Create raw data JSON file
|
|
130
|
+
with open(
|
|
131
|
+
f"{results_dir}{self.device_id}_{self.timestamp}_{name}_raw_data.json",
|
|
132
|
+
"w",
|
|
133
|
+
encoding="utf-8",
|
|
134
|
+
) as f_raw_data:
|
|
135
|
+
dump(benchmark.raw_data, f_raw_data)
|
|
136
|
+
|
|
137
|
+
# Create job metadata JSON file
|
|
138
|
+
with open(
|
|
139
|
+
f"{results_dir}{self.device_id}_{self.timestamp}_{name}_job_metadata.json",
|
|
140
|
+
"w",
|
|
141
|
+
encoding="utf-8",
|
|
142
|
+
) as f_job_metadata:
|
|
143
|
+
dump(benchmark.job_meta, f_job_metadata)
|
|
144
|
+
|
|
145
|
+
# Create results JSON file
|
|
146
|
+
with open(
|
|
147
|
+
f"{results_dir}{self.device_id}_{self.timestamp}_{name}_results.json",
|
|
148
|
+
"w",
|
|
149
|
+
encoding="utf-8",
|
|
150
|
+
) as f_results:
|
|
151
|
+
dump(benchmark.results, f_results)
|
|
152
|
+
|
|
153
|
+
# Create figures
|
|
154
|
+
Path(f"{results_dir}figures/").mkdir(parents=True, exist_ok=True)
|
|
155
|
+
for fig_name, fig in benchmark.figures.items():
|
|
156
|
+
fig.savefig(
|
|
157
|
+
f"{results_dir}figures/{self.device_id}_{self.timestamp}_{name}_{fig_name}",
|
|
158
|
+
dpi=250,
|
|
159
|
+
bbox_inches="tight",
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Save benchmark
|
|
163
|
+
self.benchmarks[benchmark.name()] = benchmark
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# Copyright 2024 IQM Benchmarks developers
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""
|
|
15
|
+
Compressive GST is a self-consistent characterization method which constructs the process matrices for a set of gates,
|
|
16
|
+
as well as full parametrizations of an initial state and a POVM.
|
|
17
|
+
Low rank compression of the process matrix is used to reduce measurement and post-processing overhead.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from . import compressive_gst
|