iqm-benchmarks 1.3__tar.gz → 1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of iqm-benchmarks might be problematic. Click here for more details.

Files changed (73) hide show
  1. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/CHANGELOG.rst +20 -0
  2. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/PKG-INFO +4 -4
  3. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/pyproject.toml +3 -3
  4. iqm_benchmarks-1.5/src/iqm/benchmarks/__init__.py +55 -0
  5. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/benchmark_definition.py +51 -12
  6. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/entanglement/ghz.py +38 -14
  7. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/quantum_volume/clops.py +19 -8
  8. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/quantum_volume/quantum_volume.py +63 -37
  9. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/randomized_benchmarking/clifford_rb/clifford_rb.py +31 -13
  10. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py +37 -18
  11. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py +21 -14
  12. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/randomized_benchmarking/randomized_benchmarking_common.py +7 -6
  13. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm_benchmarks.egg-info/PKG-INFO +4 -4
  14. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm_benchmarks.egg-info/SOURCES.txt +5 -5
  15. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm_benchmarks.egg-info/requires.txt +3 -3
  16. iqm_benchmarks-1.3/src/iqm/benchmarks/__init__.py +0 -31
  17. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/.github/workflows/main.yml +0 -0
  18. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/.github/workflows/publish.yml +0 -0
  19. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/.github/workflows/tag_and_release.yml +0 -0
  20. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/.gitignore +0 -0
  21. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/LICENSE +0 -0
  22. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/README.md +0 -0
  23. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/benchmark_runner.py +0 -0
  24. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/examples/example_clifford_rb.ipynb +0 -0
  25. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/examples/example_clops.ipynb +0 -0
  26. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/examples/example_experiment_all.ipynb +0 -0
  27. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/examples/example_gst.ipynb +0 -0
  28. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/examples/example_interleaved_rb.ipynb +0 -0
  29. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/examples/example_mirror_rb.ipynb +0 -0
  30. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/examples/example_qscore.ipynb +0 -0
  31. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/examples/example_quantum_volume.ipynb +0 -0
  32. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/examples/generate_2qubit_cliffords.ipynb +0 -0
  33. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/examples/how_to_make_your_own_benchmark.ipynb +0 -0
  34. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/scheduled_experiments/adonis/__init__.py +0 -0
  35. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/scheduled_experiments/adonis/weekly.py +0 -0
  36. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/setup.cfg +0 -0
  37. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/benchmark.py +0 -0
  38. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/benchmark_experiment.py +0 -0
  39. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/compressive_gst/__init__.py +0 -0
  40. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/compressive_gst/compressive_gst.py +0 -0
  41. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/entanglement/__init__.py +0 -0
  42. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/logging_config.py +0 -0
  43. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/optimization/__init__.py +0 -0
  44. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/optimization/qscore.py +0 -0
  45. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/quantum_volume/__init__.py +0 -0
  46. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/randomized_benchmarking/__init__.py +0 -0
  47. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/randomized_benchmarking/clifford_1q.pkl +0 -0
  48. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/randomized_benchmarking/clifford_2q.pkl +0 -0
  49. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/randomized_benchmarking/clifford_rb/__init__.py +0 -0
  50. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/randomized_benchmarking/interleaved_rb/__init__.py +0 -0
  51. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/randomized_benchmarking/mirror_rb/__init__.py +0 -0
  52. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/randomized_benchmarking/multi_lmfit.py +0 -0
  53. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/readout_mitigation.py +0 -0
  54. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm/benchmarks/utils.py +0 -0
  55. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm_benchmarks.egg-info/dependency_links.txt +0 -0
  56. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/iqm_benchmarks.egg-info/top_level.txt +0 -0
  57. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/mGST/LICENSE +0 -0
  58. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/mGST/README.md +0 -0
  59. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/mGST/additional_fns.py +0 -0
  60. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/mGST/algorithm.py +0 -0
  61. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/mGST/compatibility.py +0 -0
  62. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/mGST/low_level_jit.py +0 -0
  63. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/mGST/optimization.py +0 -0
  64. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/mGST/qiskit_interface.py +0 -0
  65. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/mGST/reporting/figure_gen.py +0 -0
  66. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/src/mGST/reporting/reporting.py +0 -0
  67. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/tag-from-pipeline.sh +0 -0
  68. /iqm_benchmarks-1.3/tests/ghz_test.py → /iqm_benchmarks-1.5/tests/test_ghz.py +0 -0
  69. /iqm_benchmarks-1.3/tests/gst_test.py → /iqm_benchmarks-1.5/tests/test_gst.py +0 -0
  70. /iqm_benchmarks-1.3/tests/qscore_test.py → /iqm_benchmarks-1.5/tests/test_qscore.py +0 -0
  71. /iqm_benchmarks-1.3/tests/qv_test.py → /iqm_benchmarks-1.5/tests/test_qv.py +0 -0
  72. /iqm_benchmarks-1.3/tests/rb_test.py → /iqm_benchmarks-1.5/tests/test_rb.py +0 -0
  73. {iqm_benchmarks-1.3 → iqm_benchmarks-1.5}/tox.ini +0 -0
@@ -2,6 +2,26 @@
2
2
  Changelog
3
3
  =========
4
4
 
5
+ Version 1.5
6
+ ===========
7
+ * fit results are no longer `BenchmarkObservation`, and instead are moved into the datasets.
8
+
9
+ Version 1.4
10
+ ===========
11
+
12
+ * Renames:
13
+
14
+ * AnalysisResult -> BenchmarkAnalysisResult
15
+ * RunResult -> BenchmarkRunResult
16
+
17
+ * Adds BenchmarkObservation class, and modifies BenchmarkAnalysisResult so observations now accepts a list[BenchmarkObservation].
18
+ * Adds BenchmarkObservationIdentifier class.
19
+ * Rebases RandomizedBenchmarking benchmarks, QuantumVolume, GHZ and CLOPS to use the new Observation class.
20
+ * Fixes serialization of some circuits.
21
+ * Adds AVAILABLE_BENCHMARKS to map a benchmark name to its class in __init__.
22
+ * Adds benchmarks and configurations to __init__ for public import.
23
+ * Other fixes.
24
+
5
25
  Version 1.3
6
26
  ===========
7
27
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: iqm-benchmarks
3
- Version: 1.3
3
+ Version: 1.5
4
4
  Summary: A package for implementation of Quantum Characterization, Verification and Validation (QCVV) techniques on IQM's hardware at gate level abstraction
5
5
  Author-email: IQM Finland Oy <developers@meetiqm.com>, Aniket Rath <aniket.rath@meetiqm.com>, Jami Rönkkö <jami@meetiqm.com>, Pedro Figueroa Romero <pedro.romero@meetiqm.com>, Vicente Pina Canelles <vicente.pina@meetiqm.com>, Raphael Brieger <raphael.brieger@meetiqm.com>, Stefan Seegerer <stefan.seegerer@meetiqm.com>
6
6
  Project-URL: Homepage, https://github.com/iqm-finland/iqm-benchmarks
@@ -15,14 +15,14 @@ Requires-Dist: lmfit==1.3.1
15
15
  Requires-Dist: matplotlib==3.9.0
16
16
  Requires-Dist: more-itertools==10.1.0
17
17
  Requires-Dist: mthree==2.6.3
18
- Requires-Dist: networkx==2.8.8
19
- Requires-Dist: numpy>=1.24.2
18
+ Requires-Dist: networkx==3.3
19
+ Requires-Dist: numpy>=1.25.2
20
20
  Requires-Dist: qiskit==0.45.3
21
21
  Requires-Dist: qiskit-aer==0.13.3
22
22
  Requires-Dist: qiskit-iqm==15.1
23
23
  Requires-Dist: scikit-optimize==0.10.2
24
24
  Requires-Dist: tabulate==0.9.0
25
- Requires-Dist: uncertainties==3.2.1
25
+ Requires-Dist: uncertainties==3.2.2
26
26
  Requires-Dist: pycurl==7.45.3
27
27
  Requires-Dist: xarray==2024.6.0
28
28
  Requires-Dist: types-pycurl
@@ -31,14 +31,14 @@ dependencies = [
31
31
  "matplotlib == 3.9.0",
32
32
  "more-itertools == 10.1.0",
33
33
  "mthree == 2.6.3",
34
- "networkx==2.8.8",
35
- "numpy >= 1.24.2",
34
+ "networkx==3.3",
35
+ "numpy >= 1.25.2",
36
36
  "qiskit == 0.45.3",
37
37
  "qiskit-aer == 0.13.3",
38
38
  "qiskit-iqm == 15.1",
39
39
  "scikit-optimize == 0.10.2",
40
40
  "tabulate == 0.9.0",
41
- "uncertainties == 3.2.1",
41
+ "uncertainties == 3.2.2",
42
42
  "pycurl == 7.45.3",
43
43
  "xarray == 2024.6.0",
44
44
  "types-pycurl",
@@ -0,0 +1,55 @@
1
+ # Copyright 2024 IQM Benchmarks developers
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ IQM's Python Library Benchmarking Suite QCVV.
17
+ """
18
+
19
+ from importlib.metadata import PackageNotFoundError, version
20
+
21
+ from .benchmark_definition import (
22
+ Benchmark,
23
+ BenchmarkAnalysisResult,
24
+ BenchmarkObservation,
25
+ BenchmarkObservationIdentifier,
26
+ BenchmarkRunResult,
27
+ )
28
+ from .entanglement.ghz import GHZBenchmark, GHZConfiguration
29
+ from .quantum_volume.clops import CLOPSBenchmark, CLOPSConfiguration
30
+ from .quantum_volume.quantum_volume import QuantumVolumeBenchmark, QuantumVolumeConfiguration
31
+ from .randomized_benchmarking.clifford_rb.clifford_rb import CliffordRandomizedBenchmarking, CliffordRBConfiguration
32
+ from .randomized_benchmarking.interleaved_rb.interleaved_rb import (
33
+ InterleavedRandomizedBenchmarking,
34
+ InterleavedRBConfiguration,
35
+ )
36
+ from .randomized_benchmarking.mirror_rb.mirror_rb import MirrorRandomizedBenchmarking, MirrorRBConfiguration
37
+
38
+
39
+ AVAILABLE_BENCHMARKS = {
40
+ GHZBenchmark.name: GHZBenchmark,
41
+ CLOPSBenchmark.name: CLOPSBenchmark,
42
+ QuantumVolumeBenchmark.name: QuantumVolumeBenchmark,
43
+ CliffordRandomizedBenchmarking.name: CliffordRandomizedBenchmarking,
44
+ InterleavedRandomizedBenchmarking.name: InterleavedRandomizedBenchmarking,
45
+ MirrorRandomizedBenchmarking.name: MirrorRandomizedBenchmarking,
46
+ }
47
+
48
+ try:
49
+ # Change here if project is renamed and does not equal the package name
50
+ dist_name = "iqm-benchmarks"
51
+ __version__ = version(dist_name)
52
+ except PackageNotFoundError: # pragma: no cover
53
+ __version__ = "unknown"
54
+ finally:
55
+ del version, PackageNotFoundError
@@ -21,7 +21,7 @@ import copy
21
21
  from copy import deepcopy
22
22
  from dataclasses import dataclass, field
23
23
  import functools
24
- from typing import Any, Dict, List, Union
24
+ from typing import Any, Dict, List, Optional, Union
25
25
  import uuid
26
26
 
27
27
  from matplotlib.figure import Figure
@@ -35,7 +35,44 @@ from iqm.qiskit_iqm.iqm_provider import IQMBackend, IQMFacadeBackend
35
35
 
36
36
 
37
37
  @dataclass
38
- class RunResult:
38
+ class BenchmarkObservationIdentifier:
39
+ """Identifier for observations for ease of use
40
+
41
+ Attributes:
42
+ qubit_indices: list containing the indices of the qubits the observation was executed on.
43
+ """
44
+
45
+ qubit_indices: list[int]
46
+
47
+ @property
48
+ def string_identifier(self) -> str:
49
+ """String version of the qubit indices for ease of use
50
+
51
+ Returns:
52
+ A string of the qubit indices
53
+ """
54
+ return str(self.qubit_indices)
55
+
56
+
57
+ @dataclass
58
+ class BenchmarkObservation:
59
+ """Dataclass to store the main results of a single run of a Benchmark
60
+
61
+ Attributes:
62
+ name: name of the observation
63
+ value: value of the observation
64
+ identifier: identifier, which should be a string of the qubit layout
65
+ uncertainty: uncertainty of the observation
66
+ """
67
+
68
+ name: str
69
+ value: Any
70
+ identifier: BenchmarkObservationIdentifier
71
+ uncertainty: Optional[Any] = None
72
+
73
+
74
+ @dataclass
75
+ class BenchmarkRunResult:
39
76
  """
40
77
  A dataclass that stores the results of a single run of a Benchmark.
41
78
 
@@ -47,7 +84,7 @@ class RunResult:
47
84
 
48
85
 
49
86
  @dataclass
50
- class AnalysisResult:
87
+ class BenchmarkAnalysisResult:
51
88
  """
52
89
  A dataclass storing the results of the analysis.
53
90
 
@@ -58,7 +95,7 @@ class AnalysisResult:
58
95
 
59
96
  dataset: xr.Dataset
60
97
  plots: dict[str, Figure] = field(default_factory=lambda: ({}))
61
- observations: dict[str, Any] = field(default_factory=lambda: ({}))
98
+ observations: list[BenchmarkObservation] = field(default_factory=lambda: [])
62
99
 
63
100
  def plot(self, plot_name: str):
64
101
  """
@@ -78,7 +115,7 @@ class AnalysisResult:
78
115
  plt.show()
79
116
 
80
117
  @classmethod
81
- def from_run_result(cls, run: RunResult):
118
+ def from_run_result(cls, run: BenchmarkRunResult):
82
119
  """
83
120
  Creates a new ``AnalysisResult`` from a ``RunResult``.
84
121
 
@@ -88,7 +125,7 @@ class AnalysisResult:
88
125
  return cls(dataset=run.dataset)
89
126
 
90
127
 
91
- def default_analysis_function(result: AnalysisResult) -> AnalysisResult:
128
+ def default_analysis_function(result: BenchmarkAnalysisResult) -> BenchmarkAnalysisResult:
92
129
  """
93
130
  The default analysis that only pass the result through.
94
131
  """
@@ -201,7 +238,7 @@ class Benchmark(ABC):
201
238
  # From exa_support MR
202
239
  self.options = copy.copy(self.default_options) if self.default_options else {}
203
240
  self.options.update(kwargs)
204
- self.runs: list[RunResult] = []
241
+ self.runs: list[BenchmarkRunResult] = []
205
242
 
206
243
  @classmethod
207
244
  @abstractmethod
@@ -224,7 +261,7 @@ class Benchmark(ABC):
224
261
  the benchmark results.
225
262
  """
226
263
 
227
- def run(self, calibration_set_id: str | uuid.UUID | None = None) -> RunResult:
264
+ def run(self, calibration_set_id: str | uuid.UUID | None = None) -> BenchmarkRunResult:
228
265
  """
229
266
  Runs the benchmark using the given backend and calibration_set_id.
230
267
 
@@ -236,13 +273,15 @@ class Benchmark(ABC):
236
273
  RunResult: The result of the benchmark run.
237
274
  """
238
275
  backend_for_execute = copy.copy(self.backend)
239
- backend_for_execute.run = functools.partial(self.backend.run, calibration_set_id=calibration_set_id) # type: ignore
276
+ backend_for_execute.run = functools.partial(
277
+ self.backend.run, calibration_set_id=calibration_set_id
278
+ ) # type: ignore
240
279
  dataset = self.execute(backend_for_execute)
241
- run = RunResult(dataset)
280
+ run = BenchmarkRunResult(dataset)
242
281
  self.runs.append(run)
243
282
  return run
244
283
 
245
- def analyze(self, run_index=-1) -> AnalysisResult:
284
+ def analyze(self, run_index=-1) -> BenchmarkAnalysisResult:
246
285
  """
247
286
  The default analysis for the benchmark.
248
287
 
@@ -259,6 +298,6 @@ class Benchmark(ABC):
259
298
  the ``analysis_function`` field.
260
299
  """
261
300
  run = self.runs[run_index]
262
- result = AnalysisResult.from_run_result(run)
301
+ result = BenchmarkAnalysisResult.from_run_result(run)
263
302
  updated_result = self.analysis_function(result)
264
303
  return updated_result
@@ -31,9 +31,15 @@ from qiskit_aer import Aer
31
31
  from scipy.spatial.distance import hamming
32
32
  import xarray as xr
33
33
 
34
- from iqm.benchmarks import Benchmark
35
34
  from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
36
- from iqm.benchmarks.benchmark_definition import AnalysisResult, RunResult, add_counts_to_dataset
35
+ from iqm.benchmarks.benchmark_definition import (
36
+ Benchmark,
37
+ BenchmarkAnalysisResult,
38
+ BenchmarkObservation,
39
+ BenchmarkObservationIdentifier,
40
+ BenchmarkRunResult,
41
+ add_counts_to_dataset,
42
+ )
37
43
  from iqm.benchmarks.logging_config import qcvv_logger
38
44
  from iqm.benchmarks.readout_mitigation import apply_readout_error_mitigation
39
45
  from iqm.benchmarks.utils import (
@@ -231,7 +237,7 @@ def fidelity_ghz_coherences(dataset: xr.Dataset, qubit_layout: List[int]) -> Lis
231
237
  return [fidelity]
232
238
 
233
239
 
234
- def fidelity_analysis(run: RunResult) -> AnalysisResult:
240
+ def fidelity_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
235
241
  """Analyze counts and compute the state fidelity
236
242
 
237
243
  Args:
@@ -242,12 +248,12 @@ def fidelity_analysis(run: RunResult) -> AnalysisResult:
242
248
  AnalysisResult
243
249
  An object containing the dataset, plots, and observations
244
250
  """
245
- observations = {}
246
251
  dataset = run.dataset
247
252
  routine = dataset.attrs["fidelity_routine"]
248
253
  qubit_layouts = dataset.attrs["custom_qubits_array"]
249
254
  backend_name = dataset.attrs["backend_name"]
250
255
 
256
+ observation_list: list[BenchmarkObservation] = []
251
257
  for qubit_layout in qubit_layouts:
252
258
  if routine == "randomized_measurements":
253
259
  ideal_simulator = Aer.get_backend("statevector_simulator")
@@ -261,16 +267,33 @@ def fidelity_analysis(run: RunResult) -> AnalysisResult:
261
267
  ideal_probabilities.append(
262
268
  dict(sorted(ideal_simulator.run(deflated_qc).result().get_counts().items()))
263
269
  )
264
- result_dict = fidelity_ghz_randomized_measurements(
265
- dataset, qubit_layout, ideal_probabilities, len(qubit_layout)
270
+ observation_list.extend(
271
+ [
272
+ BenchmarkObservation(
273
+ name=key, identifier=BenchmarkObservationIdentifier(qubit_layout), value=value
274
+ )
275
+ for key, value in fidelity_ghz_randomized_measurements(
276
+ dataset, qubit_layout, ideal_probabilities, len(qubit_layout)
277
+ ).items()
278
+ ]
266
279
  )
267
280
  else: # default routine == "coherences":
268
281
  fidelity = fidelity_ghz_coherences(dataset, qubit_layout)
269
- result_dict = {"fidelity": fidelity[0]}
282
+ observation_list.extend(
283
+ [
284
+ BenchmarkObservation(
285
+ name="fidelity", identifier=BenchmarkObservationIdentifier(qubit_layout), value=fidelity[0]
286
+ )
287
+ ]
288
+ )
270
289
  if len(fidelity) > 1:
271
- result_dict.update({"fidelity_rem": fidelity[1]})
272
- observations[str(qubit_layout)] = result_dict
273
- return AnalysisResult(dataset=dataset, observations=observations)
290
+
291
+ observation_list.append(
292
+ BenchmarkObservation(
293
+ name="fidelity_rem", identifier=BenchmarkObservationIdentifier(qubit_layout), value=fidelity[1]
294
+ )
295
+ )
296
+ return BenchmarkAnalysisResult(dataset=dataset, observations=observation_list)
274
297
 
275
298
 
276
299
  def generate_ghz_linear(num_qubits: int) -> QuantumCircuit:
@@ -506,6 +529,7 @@ class GHZBenchmark(Benchmark):
506
529
  """The GHZ Benchmark estimates the quality of generated Greenberger-Horne-Zeilinger states"""
507
530
 
508
531
  analysis_function = staticmethod(fidelity_analysis)
532
+ name = "ghz"
509
533
 
510
534
  def __init__(self, backend: IQMBackendBase, configuration: "GHZConfiguration"):
511
535
  """Construct the GHZBenchmark class.
@@ -541,9 +565,9 @@ class GHZBenchmark(Benchmark):
541
565
 
542
566
  self.timestamp = strftime("%Y%m%d-%H%M%S")
543
567
 
544
- @staticmethod
545
- def name() -> str:
546
- return "ghz"
568
+ # @staticmethod
569
+ # def name() -> str:
570
+ # return "ghz"
547
571
 
548
572
  def generate_native_ghz(self, qubit_layout: List[int], qubit_count: int, routine: str) -> QuantumCircuit:
549
573
  """
@@ -704,7 +728,7 @@ class GHZBenchmark(Benchmark):
704
728
 
705
729
  for key, value in self.configuration:
706
730
  if key == "benchmark": # Avoid saving the class object
707
- dataset.attrs[key] = value.name()
731
+ dataset.attrs[key] = value.name
708
732
  else:
709
733
  dataset.attrs[key] = value
710
734
  dataset.attrs[f"backend_name"] = self.backend.name
@@ -31,7 +31,7 @@ import xarray as xr
31
31
 
32
32
  from iqm.benchmarks import Benchmark
33
33
  from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
34
- from iqm.benchmarks.benchmark_definition import AnalysisResult, RunResult
34
+ from iqm.benchmarks.benchmark_definition import BenchmarkAnalysisResult, BenchmarkRunResult
35
35
  from iqm.benchmarks.logging_config import qcvv_logger
36
36
  from iqm.benchmarks.utils import (
37
37
  count_2q_layers,
@@ -222,7 +222,7 @@ def retrieve_clops_elapsed_times(job_meta: Dict[str, Dict[str, Any]]) -> Dict[st
222
222
  return overall_elapsed
223
223
 
224
224
 
225
- def clops_analysis(run: RunResult) -> AnalysisResult:
225
+ def clops_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
226
226
  """Analysis function for a CLOPS (v or h) experiment
227
227
 
228
228
  Args:
@@ -252,7 +252,8 @@ def clops_analysis(run: RunResult) -> AnalysisResult:
252
252
 
253
253
  transpiled_qc_list = []
254
254
  for _, value in dataset.attrs["transpiled_circuits"].items():
255
- transpiled_qc_list.extend(value)
255
+ for _, transpiled_circuit in value.items():
256
+ transpiled_qc_list.extend(transpiled_circuit)
256
257
 
257
258
  # CLOPS_V
258
259
  clops_v: float = num_circuits * num_updates * num_shots * depth / clops_time
@@ -310,7 +311,7 @@ def clops_analysis(run: RunResult) -> AnalysisResult:
310
311
  # Sort the final dataset
311
312
  dataset.attrs = dict(sorted(dataset.attrs.items()))
312
313
 
313
- return AnalysisResult(dataset=dataset, plots=plots, observations=observations)
314
+ return BenchmarkAnalysisResult(dataset=dataset, plots=plots, observations=observations)
314
315
 
315
316
 
316
317
  class CLOPSBenchmark(Benchmark):
@@ -396,8 +397,15 @@ class CLOPSBenchmark(Benchmark):
396
397
 
397
398
  """
398
399
  qcvv_logger.info(f"Adding all circuits to the dataset")
399
- dataset.attrs["untranspiled_circuits"] = self.untranspiled_circuits
400
- dataset.attrs["transpiled_circuits"] = self.transpiled_circuits
400
+ for key, circuit in zip(
401
+ ["transpiled_circuits", "untranspiled_circuits"], [self.transpiled_circuits, self.untranspiled_circuits]
402
+ ):
403
+ dictionary = {}
404
+ for outer_key, outer_value in circuit.items():
405
+ dictionary[str(outer_key)] = {
406
+ str(inner_key): inner_values for inner_key, inner_values in outer_value.items()
407
+ }
408
+ dataset.attrs[key] = dictionary
401
409
 
402
410
  def append_parameterized_unitary(
403
411
  self,
@@ -614,8 +622,11 @@ class CLOPSBenchmark(Benchmark):
614
622
  # Sort circuits according to their final measurement mappings
615
623
  (sorted_transpiled_qc_list, _), self.time_sort_batches = sort_batches_by_final_layout(transpiled_qc_list)
616
624
 
617
- self.untranspiled_circuits.update({tuple(self.qubits): qc_list})
618
- self.transpiled_circuits.update(sorted_transpiled_qc_list)
625
+ self.untranspiled_circuits.update({str(self.qubits): {str(self.qubits): qc_list}})
626
+ self.transpiled_circuits.update(
627
+ {str(self.qubits): {str(key): value for key, value in sorted_transpiled_qc_list.items()}}
628
+ )
629
+ # self.transpiled_circuits[str(self.qubits)].update(sorted_transpiled_qc_list)
619
630
 
620
631
  return sorted_transpiled_qc_list
621
632
 
@@ -33,9 +33,15 @@ import xarray as xr
33
33
  # import iqm.diqe.executors.dynamical_decoupling.dd_high_level as dd
34
34
  # from iqm.diqe.executors.dynamical_decoupling.dynamical_decoupling_core import DDStrategy
35
35
  # from iqm.diqe.mapomatic import evaluate_costs, get_calibration_fidelities, get_circuit, matching_layouts
36
- from iqm.benchmarks import AnalysisResult, Benchmark, RunResult
37
36
  from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
38
- from iqm.benchmarks.benchmark_definition import add_counts_to_dataset
37
+ from iqm.benchmarks.benchmark_definition import (
38
+ Benchmark,
39
+ BenchmarkAnalysisResult,
40
+ BenchmarkObservation,
41
+ BenchmarkObservationIdentifier,
42
+ BenchmarkRunResult,
43
+ add_counts_to_dataset,
44
+ )
39
45
  from iqm.benchmarks.logging_config import qcvv_logger
40
46
  from iqm.benchmarks.readout_mitigation import apply_readout_error_mitigation
41
47
  from iqm.benchmarks.utils import ( # execute_with_dd,
@@ -281,7 +287,7 @@ def plot_hop_threshold(
281
287
  return fig_name, fig
282
288
 
283
289
 
284
- def qv_analysis(run: RunResult) -> AnalysisResult:
290
+ def qv_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
285
291
  """Analysis function for a Quantum Volume experiment
286
292
 
287
293
  Args:
@@ -291,8 +297,8 @@ def qv_analysis(run: RunResult) -> AnalysisResult:
291
297
  """
292
298
 
293
299
  plots = {}
294
- observations = {}
295
- dataset = run.dataset
300
+ observations: list[BenchmarkObservation] = []
301
+ dataset = run.dataset.copy(deep=True)
296
302
  backend_name = dataset.attrs["backend_name"]
297
303
  execution_timestamp = dataset.attrs["execution_timestamp"]
298
304
  num_circuits = dataset.attrs["num_circuits"]
@@ -333,17 +339,24 @@ def qv_analysis(run: RunResult) -> AnalysisResult:
333
339
  # Compute the HO probabilities
334
340
  qv_result = compute_heavy_output_probabilities(execution_results[str(qubits)], ideal_heavy_outputs[str(qubits)])
335
341
 
336
- processed_results = {
337
- "average_heavy_output_probability": {
338
- "value": cumulative_hop(qv_result)[-1],
339
- "uncertainty": cumulative_std(qv_result)[-1],
340
- },
341
- "is_successful": {"value": str(is_successful(qv_result, num_sigmas)), "uncertainty": np.NaN},
342
- "QV_result": {
343
- "value": 2 ** len(qubits) if is_successful(qv_result, num_sigmas) else 1,
344
- "uncertainty": np.NaN,
345
- },
346
- }
342
+ observations = [
343
+ BenchmarkObservation(
344
+ name="average_heavy_output_probability",
345
+ value=cumulative_hop(qv_result)[-1],
346
+ uncertainty=cumulative_std(qv_result)[-1],
347
+ identifier=BenchmarkObservationIdentifier(qubits),
348
+ ),
349
+ BenchmarkObservation(
350
+ name="is_succesful",
351
+ value=is_successful(qv_result, num_sigmas),
352
+ identifier=BenchmarkObservationIdentifier(qubits),
353
+ ),
354
+ BenchmarkObservation(
355
+ name="QV_result",
356
+ value=2 ** len(qubits) if is_successful(qv_result) else 1,
357
+ identifier=BenchmarkObservationIdentifier(qubits),
358
+ ),
359
+ ]
347
360
 
348
361
  dataset.attrs[qubits_idx].update(
349
362
  {
@@ -354,9 +367,6 @@ def qv_analysis(run: RunResult) -> AnalysisResult:
354
367
  }
355
368
  )
356
369
 
357
- # UPDATE OBSERVATIONS
358
- observations.update({qubits_idx: processed_results})
359
-
360
370
  fig_name, fig = plot_hop_threshold(
361
371
  qubits,
362
372
  depth[str(qubits)],
@@ -370,7 +380,7 @@ def qv_analysis(run: RunResult) -> AnalysisResult:
370
380
  plots[fig_name] = fig
371
381
 
372
382
  if not rem:
373
- return AnalysisResult(dataset=dataset, plots=plots, observations=observations)
383
+ return BenchmarkAnalysisResult(dataset=dataset, plots=plots, observations=observations)
374
384
 
375
385
  # When REM is set to True, do the post-processing with the adjusted quasi-probabilities
376
386
  mit_shots = dataset.attrs["mit_shots"]
@@ -388,18 +398,6 @@ def qv_analysis(run: RunResult) -> AnalysisResult:
388
398
  ideal_heavy_outputs[str(qubits)],
389
399
  )
390
400
 
391
- rem_results = {
392
- "REM_average_heavy_output_probability": {
393
- "value": cumulative_hop(qv_result_rem)[-1],
394
- "uncertainty": cumulative_std(qv_result_rem)[-1],
395
- },
396
- "REM_is_successful": {"value": str(is_successful(qv_result_rem)), "uncertainty": np.NaN},
397
- "REM_QV_result": {
398
- "value": 2 ** len(qubits) if is_successful(qv_result_rem, num_sigmas) else 1,
399
- "uncertainty": np.NaN,
400
- },
401
- }
402
-
403
401
  dataset.attrs[qubits_idx].update(
404
402
  {
405
403
  "sorted_qc_list_indices": (sorted_qc_list_indices if physical_layout == "batching" else None),
@@ -410,7 +408,26 @@ def qv_analysis(run: RunResult) -> AnalysisResult:
410
408
  )
411
409
 
412
410
  # UPDATE OBSERVATIONS
413
- observations.update({qubits_idx: rem_results})
411
+ observations.extend(
412
+ [
413
+ BenchmarkObservation(
414
+ name="REM_average_heavy_output_probability",
415
+ value=cumulative_hop(qv_result_rem)[-1],
416
+ uncertainty=cumulative_std(qv_result_rem)[-1],
417
+ identifier=BenchmarkObservationIdentifier(qubits),
418
+ ),
419
+ BenchmarkObservation(
420
+ name="REM_is_succesful",
421
+ value=is_successful(qv_result_rem, num_sigmas),
422
+ identifier=BenchmarkObservationIdentifier(qubits),
423
+ ),
424
+ BenchmarkObservation(
425
+ name="REM_QV_result",
426
+ value=2 ** len(qubits) if is_successful(qv_result_rem) else 1,
427
+ identifier=BenchmarkObservationIdentifier(qubits),
428
+ ),
429
+ ]
430
+ )
414
431
 
415
432
  fig_name_rem, fig_rem = plot_hop_threshold(
416
433
  qubits,
@@ -424,7 +441,7 @@ def qv_analysis(run: RunResult) -> AnalysisResult:
424
441
  )
425
442
  plots[fig_name_rem] = fig_rem
426
443
 
427
- return AnalysisResult(dataset=dataset, plots=plots, observations=observations)
444
+ return BenchmarkAnalysisResult(dataset=dataset, plots=plots, observations=observations)
428
445
 
429
446
 
430
447
  class QuantumVolumeBenchmark(Benchmark):
@@ -509,8 +526,15 @@ class QuantumVolumeBenchmark(Benchmark):
509
526
 
510
527
  """
511
528
  qcvv_logger.info(f"Adding all circuits to the dataset")
512
- dataset.attrs["untranspiled_circuits"] = self.untranspiled_circuits
513
- dataset.attrs["transpiled_circuits"] = self.transpiled_circuits
529
+ for key, circuit in zip(
530
+ ["transpiled_circuits", "untranspiled_circuits"], [self.transpiled_circuits, self.untranspiled_circuits]
531
+ ):
532
+ dictionary = {}
533
+ for outer_key, outer_value in circuit.items():
534
+ dictionary[str(outer_key)] = {
535
+ str(inner_key): inner_values for inner_key, inner_values in outer_value.items()
536
+ }
537
+ dataset.attrs[key] = dictionary
514
538
 
515
539
  # def get_mapomatic_average_qv_scores(self) -> List[List[int]]:
516
540
  # """Estimate the average mapomatic scores for N quantum volume circuit samples
@@ -786,7 +810,9 @@ class QuantumVolumeBenchmark(Benchmark):
786
810
  "time_submit": job_dict["time_submit"],
787
811
  "time_retrieve": time_retrieve,
788
812
  "all_job_metadata": all_job_metadata,
789
- "sorted_qc_list_indices": sorted_qc_list_indices[str(qubits)],
813
+ "sorted_qc_list_indices": {
814
+ str(key): value for key, value in sorted_qc_list_indices[str(qubits)].items()
815
+ },
790
816
  "operation_counts": all_op_counts[str(qubits)],
791
817
  }
792
818
  }