iqm-benchmarks 1.3__py3-none-any.whl → 1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of iqm-benchmarks might be problematic. Click here for more details.

@@ -18,9 +18,33 @@ IQM's Python Library Benchmarking Suite QCVV.
18
18
 
19
19
  from importlib.metadata import PackageNotFoundError, version
20
20
 
21
- from .benchmark_definition import AnalysisResult, Benchmark, RunResult
21
+ from .benchmark_definition import (
22
+ Benchmark,
23
+ BenchmarkAnalysisResult,
24
+ BenchmarkObservation,
25
+ BenchmarkObservationIdentifier,
26
+ BenchmarkRunResult,
27
+ )
28
+ from .entanglement.ghz import GHZBenchmark, GHZConfiguration
29
+ from .quantum_volume.clops import CLOPSBenchmark, CLOPSConfiguration
30
+ from .quantum_volume.quantum_volume import QuantumVolumeBenchmark, QuantumVolumeConfiguration
31
+ from .randomized_benchmarking.clifford_rb.clifford_rb import CliffordRandomizedBenchmarking, CliffordRBConfiguration
32
+ from .randomized_benchmarking.interleaved_rb.interleaved_rb import (
33
+ InterleavedRandomizedBenchmarking,
34
+ InterleavedRBConfiguration,
35
+ )
36
+ from .randomized_benchmarking.mirror_rb.mirror_rb import MirrorRandomizedBenchmarking, MirrorRBConfiguration
22
37
 
23
38
 
39
+ AVAILABLE_BENCHMARKS = {
40
+ GHZBenchmark.name: GHZBenchmark,
41
+ CLOPSBenchmark.name: CLOPSBenchmark,
42
+ QuantumVolumeBenchmark.name: QuantumVolumeBenchmark,
43
+ CliffordRandomizedBenchmarking.name: CliffordRandomizedBenchmarking,
44
+ InterleavedRandomizedBenchmarking.name: InterleavedRandomizedBenchmarking,
45
+ MirrorRandomizedBenchmarking.name: MirrorRandomizedBenchmarking,
46
+ }
47
+
24
48
  try:
25
49
  # Change here if project is renamed and does not equal the package name
26
50
  dist_name = "iqm-benchmarks"
@@ -21,7 +21,7 @@ import copy
21
21
  from copy import deepcopy
22
22
  from dataclasses import dataclass, field
23
23
  import functools
24
- from typing import Any, Dict, List, Union
24
+ from typing import Any, Dict, List, Optional, Union
25
25
  import uuid
26
26
 
27
27
  from matplotlib.figure import Figure
@@ -35,7 +35,44 @@ from iqm.qiskit_iqm.iqm_provider import IQMBackend, IQMFacadeBackend
35
35
 
36
36
 
37
37
  @dataclass
38
- class RunResult:
38
+ class BenchmarkObservationIdentifier:
39
+ """Identifier for observations for ease of use
40
+
41
+ Attributes:
42
+ qubit_indices: list containing the indices of the qubits the observation was executed on.
43
+ """
44
+
45
+ qubit_indices: list[int]
46
+
47
+ @property
48
+ def string_identifier(self) -> str:
49
+ """String version of the qubit indices for ease of use
50
+
51
+ Returns:
52
+ A string of the qubit indices
53
+ """
54
+ return str(self.qubit_indices)
55
+
56
+
57
+ @dataclass
58
+ class BenchmarkObservation:
59
+ """Dataclass to store the main results of a single run of a Benchmark
60
+
61
+ Attributes:
62
+ name: name of the observation
63
+ value: value of the observation
64
+ identifier: identifier, which should be a string of the qubit layout
65
+ uncertainty: uncertainty of the observation
66
+ """
67
+
68
+ name: str
69
+ value: Any
70
+ identifier: BenchmarkObservationIdentifier
71
+ uncertainty: Optional[Any] = None
72
+
73
+
74
+ @dataclass
75
+ class BenchmarkRunResult:
39
76
  """
40
77
  A dataclass that stores the results of a single run of a Benchmark.
41
78
 
@@ -47,7 +84,7 @@ class RunResult:
47
84
 
48
85
 
49
86
  @dataclass
50
- class AnalysisResult:
87
+ class BenchmarkAnalysisResult:
51
88
  """
52
89
  A dataclass storing the results of the analysis.
53
90
 
@@ -58,7 +95,7 @@ class AnalysisResult:
58
95
 
59
96
  dataset: xr.Dataset
60
97
  plots: dict[str, Figure] = field(default_factory=lambda: ({}))
61
- observations: dict[str, Any] = field(default_factory=lambda: ({}))
98
+ observations: list[BenchmarkObservation] = field(default_factory=lambda: [])
62
99
 
63
100
  def plot(self, plot_name: str):
64
101
  """
@@ -78,7 +115,7 @@ class AnalysisResult:
78
115
  plt.show()
79
116
 
80
117
  @classmethod
81
- def from_run_result(cls, run: RunResult):
118
+ def from_run_result(cls, run: BenchmarkRunResult):
82
119
  """
83
120
  Creates a new ``AnalysisResult`` from a ``RunResult``.
84
121
 
@@ -88,7 +125,7 @@ class AnalysisResult:
88
125
  return cls(dataset=run.dataset)
89
126
 
90
127
 
91
- def default_analysis_function(result: AnalysisResult) -> AnalysisResult:
128
+ def default_analysis_function(result: BenchmarkAnalysisResult) -> BenchmarkAnalysisResult:
92
129
  """
93
130
  The default analysis that only pass the result through.
94
131
  """
@@ -201,7 +238,7 @@ class Benchmark(ABC):
201
238
  # From exa_support MR
202
239
  self.options = copy.copy(self.default_options) if self.default_options else {}
203
240
  self.options.update(kwargs)
204
- self.runs: list[RunResult] = []
241
+ self.runs: list[BenchmarkRunResult] = []
205
242
 
206
243
  @classmethod
207
244
  @abstractmethod
@@ -224,7 +261,7 @@ class Benchmark(ABC):
224
261
  the benchmark results.
225
262
  """
226
263
 
227
- def run(self, calibration_set_id: str | uuid.UUID | None = None) -> RunResult:
264
+ def run(self, calibration_set_id: str | uuid.UUID | None = None) -> BenchmarkRunResult:
228
265
  """
229
266
  Runs the benchmark using the given backend and calibration_set_id.
230
267
 
@@ -236,13 +273,15 @@ class Benchmark(ABC):
236
273
  RunResult: The result of the benchmark run.
237
274
  """
238
275
  backend_for_execute = copy.copy(self.backend)
239
- backend_for_execute.run = functools.partial(self.backend.run, calibration_set_id=calibration_set_id) # type: ignore
276
+ backend_for_execute.run = functools.partial(
277
+ self.backend.run, calibration_set_id=calibration_set_id
278
+ ) # type: ignore
240
279
  dataset = self.execute(backend_for_execute)
241
- run = RunResult(dataset)
280
+ run = BenchmarkRunResult(dataset)
242
281
  self.runs.append(run)
243
282
  return run
244
283
 
245
- def analyze(self, run_index=-1) -> AnalysisResult:
284
+ def analyze(self, run_index=-1) -> BenchmarkAnalysisResult:
246
285
  """
247
286
  The default analysis for the benchmark.
248
287
 
@@ -259,6 +298,6 @@ class Benchmark(ABC):
259
298
  the ``analysis_function`` field.
260
299
  """
261
300
  run = self.runs[run_index]
262
- result = AnalysisResult.from_run_result(run)
301
+ result = BenchmarkAnalysisResult.from_run_result(run)
263
302
  updated_result = self.analysis_function(result)
264
303
  return updated_result
@@ -31,9 +31,15 @@ from qiskit_aer import Aer
31
31
  from scipy.spatial.distance import hamming
32
32
  import xarray as xr
33
33
 
34
- from iqm.benchmarks import Benchmark
35
34
  from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
36
- from iqm.benchmarks.benchmark_definition import AnalysisResult, RunResult, add_counts_to_dataset
35
+ from iqm.benchmarks.benchmark_definition import (
36
+ Benchmark,
37
+ BenchmarkAnalysisResult,
38
+ BenchmarkObservation,
39
+ BenchmarkObservationIdentifier,
40
+ BenchmarkRunResult,
41
+ add_counts_to_dataset,
42
+ )
37
43
  from iqm.benchmarks.logging_config import qcvv_logger
38
44
  from iqm.benchmarks.readout_mitigation import apply_readout_error_mitigation
39
45
  from iqm.benchmarks.utils import (
@@ -231,7 +237,7 @@ def fidelity_ghz_coherences(dataset: xr.Dataset, qubit_layout: List[int]) -> Lis
231
237
  return [fidelity]
232
238
 
233
239
 
234
- def fidelity_analysis(run: RunResult) -> AnalysisResult:
240
+ def fidelity_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
235
241
  """Analyze counts and compute the state fidelity
236
242
 
237
243
  Args:
@@ -242,12 +248,12 @@ def fidelity_analysis(run: RunResult) -> AnalysisResult:
242
248
  AnalysisResult
243
249
  An object containing the dataset, plots, and observations
244
250
  """
245
- observations = {}
246
251
  dataset = run.dataset
247
252
  routine = dataset.attrs["fidelity_routine"]
248
253
  qubit_layouts = dataset.attrs["custom_qubits_array"]
249
254
  backend_name = dataset.attrs["backend_name"]
250
255
 
256
+ observation_list: list[BenchmarkObservation] = []
251
257
  for qubit_layout in qubit_layouts:
252
258
  if routine == "randomized_measurements":
253
259
  ideal_simulator = Aer.get_backend("statevector_simulator")
@@ -261,16 +267,33 @@ def fidelity_analysis(run: RunResult) -> AnalysisResult:
261
267
  ideal_probabilities.append(
262
268
  dict(sorted(ideal_simulator.run(deflated_qc).result().get_counts().items()))
263
269
  )
264
- result_dict = fidelity_ghz_randomized_measurements(
265
- dataset, qubit_layout, ideal_probabilities, len(qubit_layout)
270
+ observation_list.extend(
271
+ [
272
+ BenchmarkObservation(
273
+ name=key, identifier=BenchmarkObservationIdentifier(qubit_layout), value=value
274
+ )
275
+ for key, value in fidelity_ghz_randomized_measurements(
276
+ dataset, qubit_layout, ideal_probabilities, len(qubit_layout)
277
+ ).items()
278
+ ]
266
279
  )
267
280
  else: # default routine == "coherences":
268
281
  fidelity = fidelity_ghz_coherences(dataset, qubit_layout)
269
- result_dict = {"fidelity": fidelity[0]}
282
+ observation_list.extend(
283
+ [
284
+ BenchmarkObservation(
285
+ name="fidelity", identifier=BenchmarkObservationIdentifier(qubit_layout), value=fidelity[0]
286
+ )
287
+ ]
288
+ )
270
289
  if len(fidelity) > 1:
271
- result_dict.update({"fidelity_rem": fidelity[1]})
272
- observations[str(qubit_layout)] = result_dict
273
- return AnalysisResult(dataset=dataset, observations=observations)
290
+
291
+ observation_list.append(
292
+ BenchmarkObservation(
293
+ name="fidelity_rem", identifier=BenchmarkObservationIdentifier(qubit_layout), value=fidelity[1]
294
+ )
295
+ )
296
+ return BenchmarkAnalysisResult(dataset=dataset, observations=observation_list)
274
297
 
275
298
 
276
299
  def generate_ghz_linear(num_qubits: int) -> QuantumCircuit:
@@ -506,6 +529,7 @@ class GHZBenchmark(Benchmark):
506
529
  """The GHZ Benchmark estimates the quality of generated Greenberger-Horne-Zeilinger states"""
507
530
 
508
531
  analysis_function = staticmethod(fidelity_analysis)
532
+ name = "ghz"
509
533
 
510
534
  def __init__(self, backend: IQMBackendBase, configuration: "GHZConfiguration"):
511
535
  """Construct the GHZBenchmark class.
@@ -541,9 +565,9 @@ class GHZBenchmark(Benchmark):
541
565
 
542
566
  self.timestamp = strftime("%Y%m%d-%H%M%S")
543
567
 
544
- @staticmethod
545
- def name() -> str:
546
- return "ghz"
568
+ # @staticmethod
569
+ # def name() -> str:
570
+ # return "ghz"
547
571
 
548
572
  def generate_native_ghz(self, qubit_layout: List[int], qubit_count: int, routine: str) -> QuantumCircuit:
549
573
  """
@@ -704,7 +728,7 @@ class GHZBenchmark(Benchmark):
704
728
 
705
729
  for key, value in self.configuration:
706
730
  if key == "benchmark": # Avoid saving the class object
707
- dataset.attrs[key] = value.name()
731
+ dataset.attrs[key] = value.name
708
732
  else:
709
733
  dataset.attrs[key] = value
710
734
  dataset.attrs[f"backend_name"] = self.backend.name
@@ -31,7 +31,7 @@ import xarray as xr
31
31
 
32
32
  from iqm.benchmarks import Benchmark
33
33
  from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
34
- from iqm.benchmarks.benchmark_definition import AnalysisResult, RunResult
34
+ from iqm.benchmarks.benchmark_definition import BenchmarkAnalysisResult, BenchmarkRunResult
35
35
  from iqm.benchmarks.logging_config import qcvv_logger
36
36
  from iqm.benchmarks.utils import (
37
37
  count_2q_layers,
@@ -222,7 +222,7 @@ def retrieve_clops_elapsed_times(job_meta: Dict[str, Dict[str, Any]]) -> Dict[st
222
222
  return overall_elapsed
223
223
 
224
224
 
225
- def clops_analysis(run: RunResult) -> AnalysisResult:
225
+ def clops_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
226
226
  """Analysis function for a CLOPS (v or h) experiment
227
227
 
228
228
  Args:
@@ -252,7 +252,8 @@ def clops_analysis(run: RunResult) -> AnalysisResult:
252
252
 
253
253
  transpiled_qc_list = []
254
254
  for _, value in dataset.attrs["transpiled_circuits"].items():
255
- transpiled_qc_list.extend(value)
255
+ for _, transpiled_circuit in value.items():
256
+ transpiled_qc_list.extend(transpiled_circuit)
256
257
 
257
258
  # CLOPS_V
258
259
  clops_v: float = num_circuits * num_updates * num_shots * depth / clops_time
@@ -310,7 +311,7 @@ def clops_analysis(run: RunResult) -> AnalysisResult:
310
311
  # Sort the final dataset
311
312
  dataset.attrs = dict(sorted(dataset.attrs.items()))
312
313
 
313
- return AnalysisResult(dataset=dataset, plots=plots, observations=observations)
314
+ return BenchmarkAnalysisResult(dataset=dataset, plots=plots, observations=observations)
314
315
 
315
316
 
316
317
  class CLOPSBenchmark(Benchmark):
@@ -396,8 +397,15 @@ class CLOPSBenchmark(Benchmark):
396
397
 
397
398
  """
398
399
  qcvv_logger.info(f"Adding all circuits to the dataset")
399
- dataset.attrs["untranspiled_circuits"] = self.untranspiled_circuits
400
- dataset.attrs["transpiled_circuits"] = self.transpiled_circuits
400
+ for key, circuit in zip(
401
+ ["transpiled_circuits", "untranspiled_circuits"], [self.transpiled_circuits, self.untranspiled_circuits]
402
+ ):
403
+ dictionary = {}
404
+ for outer_key, outer_value in circuit.items():
405
+ dictionary[str(outer_key)] = {
406
+ str(inner_key): inner_values for inner_key, inner_values in outer_value.items()
407
+ }
408
+ dataset.attrs[key] = dictionary
401
409
 
402
410
  def append_parameterized_unitary(
403
411
  self,
@@ -614,8 +622,11 @@ class CLOPSBenchmark(Benchmark):
614
622
  # Sort circuits according to their final measurement mappings
615
623
  (sorted_transpiled_qc_list, _), self.time_sort_batches = sort_batches_by_final_layout(transpiled_qc_list)
616
624
 
617
- self.untranspiled_circuits.update({tuple(self.qubits): qc_list})
618
- self.transpiled_circuits.update(sorted_transpiled_qc_list)
625
+ self.untranspiled_circuits.update({str(self.qubits): {str(self.qubits): qc_list}})
626
+ self.transpiled_circuits.update(
627
+ {str(self.qubits): {str(key): value for key, value in sorted_transpiled_qc_list.items()}}
628
+ )
629
+ # self.transpiled_circuits[str(self.qubits)].update(sorted_transpiled_qc_list)
619
630
 
620
631
  return sorted_transpiled_qc_list
621
632
 
@@ -33,9 +33,15 @@ import xarray as xr
33
33
  # import iqm.diqe.executors.dynamical_decoupling.dd_high_level as dd
34
34
  # from iqm.diqe.executors.dynamical_decoupling.dynamical_decoupling_core import DDStrategy
35
35
  # from iqm.diqe.mapomatic import evaluate_costs, get_calibration_fidelities, get_circuit, matching_layouts
36
- from iqm.benchmarks import AnalysisResult, Benchmark, RunResult
37
36
  from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
38
- from iqm.benchmarks.benchmark_definition import add_counts_to_dataset
37
+ from iqm.benchmarks.benchmark_definition import (
38
+ Benchmark,
39
+ BenchmarkAnalysisResult,
40
+ BenchmarkObservation,
41
+ BenchmarkObservationIdentifier,
42
+ BenchmarkRunResult,
43
+ add_counts_to_dataset,
44
+ )
39
45
  from iqm.benchmarks.logging_config import qcvv_logger
40
46
  from iqm.benchmarks.readout_mitigation import apply_readout_error_mitigation
41
47
  from iqm.benchmarks.utils import ( # execute_with_dd,
@@ -281,7 +287,7 @@ def plot_hop_threshold(
281
287
  return fig_name, fig
282
288
 
283
289
 
284
- def qv_analysis(run: RunResult) -> AnalysisResult:
290
+ def qv_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
285
291
  """Analysis function for a Quantum Volume experiment
286
292
 
287
293
  Args:
@@ -291,8 +297,8 @@ def qv_analysis(run: RunResult) -> AnalysisResult:
291
297
  """
292
298
 
293
299
  plots = {}
294
- observations = {}
295
- dataset = run.dataset
300
+ observations: list[BenchmarkObservation] = []
301
+ dataset = run.dataset.copy(deep=True)
296
302
  backend_name = dataset.attrs["backend_name"]
297
303
  execution_timestamp = dataset.attrs["execution_timestamp"]
298
304
  num_circuits = dataset.attrs["num_circuits"]
@@ -333,17 +339,24 @@ def qv_analysis(run: RunResult) -> AnalysisResult:
333
339
  # Compute the HO probabilities
334
340
  qv_result = compute_heavy_output_probabilities(execution_results[str(qubits)], ideal_heavy_outputs[str(qubits)])
335
341
 
336
- processed_results = {
337
- "average_heavy_output_probability": {
338
- "value": cumulative_hop(qv_result)[-1],
339
- "uncertainty": cumulative_std(qv_result)[-1],
340
- },
341
- "is_successful": {"value": str(is_successful(qv_result, num_sigmas)), "uncertainty": np.NaN},
342
- "QV_result": {
343
- "value": 2 ** len(qubits) if is_successful(qv_result, num_sigmas) else 1,
344
- "uncertainty": np.NaN,
345
- },
346
- }
342
+ observations = [
343
+ BenchmarkObservation(
344
+ name="average_heavy_output_probability",
345
+ value=cumulative_hop(qv_result)[-1],
346
+ uncertainty=cumulative_std(qv_result)[-1],
347
+ identifier=BenchmarkObservationIdentifier(qubits),
348
+ ),
349
+ BenchmarkObservation(
350
+ name="is_succesful",
351
+ value=is_successful(qv_result, num_sigmas),
352
+ identifier=BenchmarkObservationIdentifier(qubits),
353
+ ),
354
+ BenchmarkObservation(
355
+ name="QV_result",
356
+ value=2 ** len(qubits) if is_successful(qv_result) else 1,
357
+ identifier=BenchmarkObservationIdentifier(qubits),
358
+ ),
359
+ ]
347
360
 
348
361
  dataset.attrs[qubits_idx].update(
349
362
  {
@@ -354,9 +367,6 @@ def qv_analysis(run: RunResult) -> AnalysisResult:
354
367
  }
355
368
  )
356
369
 
357
- # UPDATE OBSERVATIONS
358
- observations.update({qubits_idx: processed_results})
359
-
360
370
  fig_name, fig = plot_hop_threshold(
361
371
  qubits,
362
372
  depth[str(qubits)],
@@ -370,7 +380,7 @@ def qv_analysis(run: RunResult) -> AnalysisResult:
370
380
  plots[fig_name] = fig
371
381
 
372
382
  if not rem:
373
- return AnalysisResult(dataset=dataset, plots=plots, observations=observations)
383
+ return BenchmarkAnalysisResult(dataset=dataset, plots=plots, observations=observations)
374
384
 
375
385
  # When REM is set to True, do the post-processing with the adjusted quasi-probabilities
376
386
  mit_shots = dataset.attrs["mit_shots"]
@@ -388,18 +398,6 @@ def qv_analysis(run: RunResult) -> AnalysisResult:
388
398
  ideal_heavy_outputs[str(qubits)],
389
399
  )
390
400
 
391
- rem_results = {
392
- "REM_average_heavy_output_probability": {
393
- "value": cumulative_hop(qv_result_rem)[-1],
394
- "uncertainty": cumulative_std(qv_result_rem)[-1],
395
- },
396
- "REM_is_successful": {"value": str(is_successful(qv_result_rem)), "uncertainty": np.NaN},
397
- "REM_QV_result": {
398
- "value": 2 ** len(qubits) if is_successful(qv_result_rem, num_sigmas) else 1,
399
- "uncertainty": np.NaN,
400
- },
401
- }
402
-
403
401
  dataset.attrs[qubits_idx].update(
404
402
  {
405
403
  "sorted_qc_list_indices": (sorted_qc_list_indices if physical_layout == "batching" else None),
@@ -410,7 +408,26 @@ def qv_analysis(run: RunResult) -> AnalysisResult:
410
408
  )
411
409
 
412
410
  # UPDATE OBSERVATIONS
413
- observations.update({qubits_idx: rem_results})
411
+ observations.extend(
412
+ [
413
+ BenchmarkObservation(
414
+ name="REM_average_heavy_output_probability",
415
+ value=cumulative_hop(qv_result_rem)[-1],
416
+ uncertainty=cumulative_std(qv_result_rem)[-1],
417
+ identifier=BenchmarkObservationIdentifier(qubits),
418
+ ),
419
+ BenchmarkObservation(
420
+ name="REM_is_succesful",
421
+ value=is_successful(qv_result_rem, num_sigmas),
422
+ identifier=BenchmarkObservationIdentifier(qubits),
423
+ ),
424
+ BenchmarkObservation(
425
+ name="REM_QV_result",
426
+ value=2 ** len(qubits) if is_successful(qv_result_rem) else 1,
427
+ identifier=BenchmarkObservationIdentifier(qubits),
428
+ ),
429
+ ]
430
+ )
414
431
 
415
432
  fig_name_rem, fig_rem = plot_hop_threshold(
416
433
  qubits,
@@ -424,7 +441,7 @@ def qv_analysis(run: RunResult) -> AnalysisResult:
424
441
  )
425
442
  plots[fig_name_rem] = fig_rem
426
443
 
427
- return AnalysisResult(dataset=dataset, plots=plots, observations=observations)
444
+ return BenchmarkAnalysisResult(dataset=dataset, plots=plots, observations=observations)
428
445
 
429
446
 
430
447
  class QuantumVolumeBenchmark(Benchmark):
@@ -509,8 +526,15 @@ class QuantumVolumeBenchmark(Benchmark):
509
526
 
510
527
  """
511
528
  qcvv_logger.info(f"Adding all circuits to the dataset")
512
- dataset.attrs["untranspiled_circuits"] = self.untranspiled_circuits
513
- dataset.attrs["transpiled_circuits"] = self.transpiled_circuits
529
+ for key, circuit in zip(
530
+ ["transpiled_circuits", "untranspiled_circuits"], [self.transpiled_circuits, self.untranspiled_circuits]
531
+ ):
532
+ dictionary = {}
533
+ for outer_key, outer_value in circuit.items():
534
+ dictionary[str(outer_key)] = {
535
+ str(inner_key): inner_values for inner_key, inner_values in outer_value.items()
536
+ }
537
+ dataset.attrs[key] = dictionary
514
538
 
515
539
  # def get_mapomatic_average_qv_scores(self) -> List[List[int]]:
516
540
  # """Estimate the average mapomatic scores for N quantum volume circuit samples
@@ -786,7 +810,9 @@ class QuantumVolumeBenchmark(Benchmark):
786
810
  "time_submit": job_dict["time_submit"],
787
811
  "time_retrieve": time_retrieve,
788
812
  "all_job_metadata": all_job_metadata,
789
- "sorted_qc_list_indices": sorted_qc_list_indices[str(qubits)],
813
+ "sorted_qc_list_indices": {
814
+ str(key): value for key, value in sorted_qc_list_indices[str(qubits)].items()
815
+ },
790
816
  "operation_counts": all_op_counts[str(qubits)],
791
817
  }
792
818
  }
@@ -23,9 +23,15 @@ import numpy as np
23
23
  from qiskit import QuantumCircuit
24
24
  import xarray as xr
25
25
 
26
- from iqm.benchmarks import AnalysisResult, Benchmark, RunResult
27
26
  from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
28
- from iqm.benchmarks.benchmark_definition import add_counts_to_dataset
27
+ from iqm.benchmarks.benchmark_definition import (
28
+ Benchmark,
29
+ BenchmarkAnalysisResult,
30
+ BenchmarkObservation,
31
+ BenchmarkObservationIdentifier,
32
+ BenchmarkRunResult,
33
+ add_counts_to_dataset,
34
+ )
29
35
  from iqm.benchmarks.logging_config import qcvv_logger
30
36
  from iqm.benchmarks.randomized_benchmarking.randomized_benchmarking_common import (
31
37
  exponential_rb,
@@ -45,7 +51,7 @@ from iqm.benchmarks.utils import retrieve_all_counts, retrieve_all_job_metadata,
45
51
  from iqm.qiskit_iqm.iqm_backend import IQMBackendBase
46
52
 
47
53
 
48
- def clifford_rb_analysis(run: RunResult) -> AnalysisResult:
54
+ def clifford_rb_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
49
55
  """Analysis function for a Clifford RB experiment
50
56
 
51
57
  Args:
@@ -53,8 +59,9 @@ def clifford_rb_analysis(run: RunResult) -> AnalysisResult:
53
59
  Returns:
54
60
  AnalysisResult corresponding to Clifford RB
55
61
  """
56
- dataset = run.dataset
57
- observations = {}
62
+ dataset = run.dataset.copy(deep=True)
63
+ observations: list[BenchmarkObservation] = []
64
+ obs_dict = {}
58
65
  plots = {}
59
66
 
60
67
  is_parallel_execution = dataset.attrs["parallel_execution"]
@@ -118,13 +125,13 @@ def clifford_rb_analysis(run: RunResult) -> AnalysisResult:
118
125
 
119
126
  processed_results = {
120
127
  "avg_gate_fidelity": {"value": fidelity.value, "uncertainty": fidelity.stderr},
121
- "decay_rate": {"value": popt["decay_rate"].value, "uncertainty": popt["decay_rate"].stderr},
122
- "fit_amplitude": {"value": popt["amplitude"].value, "uncertainty": popt["amplitude"].stderr},
123
- "fit_offset": {"value": popt["offset"].value, "uncertainty": popt["offset"].stderr},
124
128
  }
125
129
 
126
130
  dataset.attrs[qubits_idx].update(
127
131
  {
132
+ "decay_rate": {"value": popt["decay_rate"].value, "uncertainty": popt["decay_rate"].stderr},
133
+ "fit_amplitude": {"value": popt["amplitude"].value, "uncertainty": popt["amplitude"].stderr},
134
+ "fit_offset": {"value": popt["offset"].value, "uncertainty": popt["offset"].stderr},
128
135
  "fidelities": fidelities[str(qubits)],
129
136
  "avg_fidelities_nominal_values": average_fidelities,
130
137
  "avg_fidelities_stderr": stddevs_from_mean,
@@ -139,13 +146,24 @@ def clifford_rb_analysis(run: RunResult) -> AnalysisResult:
139
146
  }
140
147
  )
141
148
 
142
- observations.update({qubits_idx: processed_results})
149
+ obs_dict.update({qubits_idx: processed_results})
150
+ observations.extend(
151
+ [
152
+ BenchmarkObservation(
153
+ name=key,
154
+ identifier=BenchmarkObservationIdentifier(qubits),
155
+ value=values["value"],
156
+ uncertainty=values["uncertainty"],
157
+ )
158
+ for key, values in processed_results.items()
159
+ ]
160
+ )
143
161
 
144
162
  # Generate individual decay plots
145
- fig_name, fig = plot_rb_decay("clifford", [qubits], dataset, observations)
163
+ fig_name, fig = plot_rb_decay("clifford", [qubits], dataset, obs_dict)
146
164
  plots[fig_name] = fig
147
165
 
148
- return AnalysisResult(dataset=dataset, observations=observations, plots=plots)
166
+ return BenchmarkAnalysisResult(dataset=dataset, observations=observations, plots=plots)
149
167
 
150
168
 
151
169
  class CliffordRandomizedBenchmarking(Benchmark):
@@ -176,7 +194,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
176
194
  self.session_timestamp = strftime("%Y%m%d-%H%M%S")
177
195
  self.execution_timestamp = ""
178
196
 
179
- def add_all_meta_to_dataset(self, dataset: xr.Dataset):
197
+ def add_all_metadata_to_dataset(self, dataset: xr.Dataset):
180
198
  """Adds all configuration metadata and circuits to the dataset variable
181
199
  Args:
182
200
  dataset (xr.Dataset): The xarray dataset
@@ -215,7 +233,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
215
233
  validate_rb_qubits(self.qubits_array, backend)
216
234
 
217
235
  dataset = xr.Dataset()
218
- self.add_all_meta_to_dataset(dataset)
236
+ self.add_all_metadata_to_dataset(dataset)
219
237
 
220
238
  clifford_1q_dict, clifford_2q_dict = import_native_gate_cliffords()
221
239
 
@@ -24,9 +24,15 @@ import numpy as np
24
24
  from qiskit import QuantumCircuit
25
25
  import xarray as xr
26
26
 
27
- from iqm.benchmarks import AnalysisResult, Benchmark, RunResult
28
27
  from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
29
- from iqm.benchmarks.benchmark_definition import add_counts_to_dataset
28
+ from iqm.benchmarks.benchmark_definition import (
29
+ Benchmark,
30
+ BenchmarkAnalysisResult,
31
+ BenchmarkObservation,
32
+ BenchmarkObservationIdentifier,
33
+ BenchmarkRunResult,
34
+ add_counts_to_dataset,
35
+ )
30
36
  from iqm.benchmarks.logging_config import qcvv_logger
31
37
  from iqm.benchmarks.randomized_benchmarking.randomized_benchmarking_common import (
32
38
  exponential_rb,
@@ -48,7 +54,7 @@ from iqm.qiskit_iqm.iqm_backend import IQMBackendBase
48
54
 
49
55
 
50
56
  # pylint: disable=too-many-statements, too-many-branches
51
- def interleaved_rb_analysis(run: RunResult) -> AnalysisResult:
57
+ def interleaved_rb_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
52
58
  """Analysis function for an Interleaved RB experiment
53
59
 
54
60
  Args:
@@ -56,8 +62,9 @@ def interleaved_rb_analysis(run: RunResult) -> AnalysisResult:
56
62
  Returns:
57
63
  AnalysisResult corresponding to Interleaved RB
58
64
  """
59
- dataset = run.dataset
60
- observations: Dict[int, Any] = {}
65
+ dataset = run.dataset.copy(deep=True)
66
+ obs_dict: Dict[int, Any] = {}
67
+ observations: list[BenchmarkObservation] = []
61
68
  plots: Dict[str, Figure] = {}
62
69
 
63
70
  is_parallel_execution = dataset.attrs["parallel_execution"]
@@ -160,14 +167,26 @@ def interleaved_rb_analysis(run: RunResult) -> AnalysisResult:
160
167
 
161
168
  processed_results[rb_type] = {
162
169
  "avg_gate_fidelity": {"value": fidelity.value, "uncertainty": fidelity.stderr},
163
- "decay_rate": {"value": popt["decay_rate"].value, "uncertainty": popt["decay_rate"].stderr},
164
- "fit_amplitude": {"value": popt["amplitude"].value, "uncertainty": popt["amplitude"].stderr},
165
- "fit_offset": {"value": popt["offset"].value, "uncertainty": popt["offset"].stderr},
166
170
  }
167
171
 
172
+ observations.extend(
173
+ [
174
+ BenchmarkObservation(
175
+ name=f"{key}_{rb_type}",
176
+ identifier=BenchmarkObservationIdentifier(qubits),
177
+ value=values["value"],
178
+ uncertainty=values["uncertainty"],
179
+ )
180
+ for key, values in processed_results[rb_type].items()
181
+ ]
182
+ )
183
+
168
184
  dataset.attrs[qubits_idx].update(
169
185
  {
170
186
  rb_type: {
187
+ "decay_rate": {"value": popt["decay_rate"].value, "uncertainty": popt["decay_rate"].stderr},
188
+ "fit_amplitude": {"value": popt["amplitude"].value, "uncertainty": popt["amplitude"].stderr},
189
+ "fit_offset": {"value": popt["offset"].value, "uncertainty": popt["offset"].stderr},
171
190
  "fidelities": fidelities[str(qubits)][rb_type],
172
191
  "avg_fidelities_nominal_values": average_fidelities,
173
192
  "avg_fidelities_stderr": stddevs_from_mean,
@@ -183,7 +202,7 @@ def interleaved_rb_analysis(run: RunResult) -> AnalysisResult:
183
202
  }
184
203
  )
185
204
 
186
- observations.update({qubits_idx: processed_results})
205
+ obs_dict.update({qubits_idx: processed_results})
187
206
 
188
207
  # Generate decay plots
189
208
  if interleaved_gate_parameters is None:
@@ -196,19 +215,19 @@ def interleaved_rb_analysis(run: RunResult) -> AnalysisResult:
196
215
  "irb",
197
216
  [qubits],
198
217
  dataset,
199
- observations,
218
+ obs_dict,
200
219
  interleaved_gate=interleaved_gate_string,
201
220
  )
202
221
  plots[fig_name] = fig
203
222
 
204
223
  # Rearrange observations
205
- observations_refactored: Dict[int, Dict[str, Dict[str, float]]] = {}
206
- for k, o in observations.items():
207
- observations_refactored[k] = {}
208
- for rb_type in o.keys():
209
- observations_refactored[k].update({f"{k}_{rb_type}": v for k, v in o[rb_type].items()})
224
+ # observations_refactored: Dict[int, Dict[str, Dict[str, float]]] = {}
225
+ # for k, o in obs_dict.items():
226
+ # observations_refactored[k] = {}
227
+ # for rb_type in o.keys():
228
+ # observations_refactored[k].update({f"{k}_{rb_type}": v for k, v in o[rb_type].items()})
210
229
 
211
- return AnalysisResult(dataset=dataset, observations=observations_refactored, plots=plots)
230
+ return BenchmarkAnalysisResult(dataset=dataset, observations=observations, plots=plots)
212
231
 
213
232
 
214
233
  class InterleavedRandomizedBenchmarking(Benchmark):
@@ -216,14 +235,14 @@ class InterleavedRandomizedBenchmarking(Benchmark):
216
235
 
217
236
  analysis_function = staticmethod(interleaved_rb_analysis)
218
237
 
219
- name: str = "clifford_rb"
238
+ name: str = "interleaved_clifford_rb"
220
239
 
221
240
  def __init__(self, backend_arg: IQMBackendBase | str, configuration: "InterleavedRBConfiguration"):
222
241
  """Construct the InterleavedRandomizedBenchmark class
223
242
 
224
243
  Args:
225
244
  backend_arg (IQMBackendBase | str): the backend to execute Clifford RB on
226
- configuration (CliffordRBConfiguration): The Clifford RB configuration
245
+ configuration (InterleavedRBConfiguration): The Clifford RB configuration
227
246
  """
228
247
  super().__init__(backend_arg, configuration)
229
248
 
@@ -15,9 +15,9 @@ from qiskit_aer import Aer
15
15
  from scipy.spatial.distance import hamming
16
16
  import xarray as xr
17
17
 
18
- from iqm.benchmarks import AnalysisResult, Benchmark, RunResult
18
+ from iqm.benchmarks import BenchmarkAnalysisResult, BenchmarkRunResult
19
19
  from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
20
- from iqm.benchmarks.benchmark_definition import add_counts_to_dataset
20
+ from iqm.benchmarks.benchmark_definition import Benchmark, add_counts_to_dataset
21
21
  from iqm.benchmarks.logging_config import qcvv_logger
22
22
  from iqm.benchmarks.randomized_benchmarking.randomized_benchmarking_common import (
23
23
  exponential_rb,
@@ -404,7 +404,7 @@ def list_to_numcircuit_times_numpauli_matrix(
404
404
 
405
405
 
406
406
  # pylint: disable=too-many-statements
407
- def mrb_analysis(run: RunResult) -> AnalysisResult:
407
+ def mrb_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
408
408
  """Analysis function for a MRB experiment
409
409
 
410
410
  Args:
@@ -414,7 +414,7 @@ def mrb_analysis(run: RunResult) -> AnalysisResult:
414
414
  """
415
415
  plots = {}
416
416
  observations = {}
417
- dataset = run.dataset
417
+ dataset = run.dataset.copy(deep=True)
418
418
 
419
419
  shots = dataset.attrs["shots"]
420
420
  num_circuit_samples = dataset.attrs["num_circuit_samples"]
@@ -468,7 +468,7 @@ def mrb_analysis(run: RunResult) -> AnalysisResult:
468
468
  # Execute the quantum circuits on the simulated, ideal backend
469
469
  # pylint: disable=unbalanced-tuple-unpacking
470
470
  all_noiseless_jobs, _ = submit_execute(
471
- {tuple(qubits): transpiled_circuits[str(qubits)][depth]},
471
+ {tuple(qubits): transpiled_circuits[str(qubits)][str(depth)]},
472
472
  simulator,
473
473
  shots,
474
474
  calset_id=None,
@@ -508,13 +508,13 @@ def mrb_analysis(run: RunResult) -> AnalysisResult:
508
508
 
509
509
  processed_results = {
510
510
  "avg_gate_fidelity": {"value": fidelity.value, "uncertainty": fidelity.stderr},
511
- "decay_rate": {"value": popt["decay_rate"].value, "uncertainty": popt["decay_rate"].stderr},
512
- "fit_amplitude": {"value": popt["amplitude"].value, "uncertainty": popt["amplitude"].stderr},
513
- "fit_offset": {"value": popt["offset"].value, "uncertainty": popt["offset"].stderr},
514
511
  }
515
512
 
516
513
  dataset.attrs[qubits_idx].update(
517
514
  {
515
+ "decay_rate": {"value": popt["decay_rate"].value, "uncertainty": popt["decay_rate"].stderr},
516
+ "fit_amplitude": {"value": popt["amplitude"].value, "uncertainty": popt["amplitude"].stderr},
517
+ "fit_offset": {"value": popt["offset"].value, "uncertainty": popt["offset"].stderr},
518
518
  "polarizations": polarizations,
519
519
  "avg_polarization_nominal_values": average_polarizations,
520
520
  "avg_polatization_stderr": stddevs_from_mean,
@@ -554,7 +554,7 @@ def mrb_analysis(run: RunResult) -> AnalysisResult:
554
554
  )
555
555
  plots[fig_name] = fig
556
556
 
557
- return AnalysisResult(dataset=dataset, plots=plots, observations=observations)
557
+ return BenchmarkAnalysisResult(dataset=dataset, plots=plots, observations=observations)
558
558
 
559
559
 
560
560
  class MirrorRandomizedBenchmarking(Benchmark):
@@ -564,7 +564,7 @@ class MirrorRandomizedBenchmarking(Benchmark):
564
564
 
565
565
  analysis_function = staticmethod(mrb_analysis)
566
566
 
567
- name: str = "mrb"
567
+ name: str = "mirror_rb"
568
568
 
569
569
  def __init__(self, backend_arg: IQMBackendBase | str, configuration: "MirrorRBConfiguration"):
570
570
  """Construct the MirrorRandomizedBenchmarking class
@@ -617,8 +617,15 @@ class MirrorRandomizedBenchmarking(Benchmark):
617
617
  dataset (xr.Dataset): The xarray dataset
618
618
  """
619
619
  qcvv_logger.info(f"Adding all circuits to the dataset")
620
- dataset.attrs["untranspiled_circuits"] = self.untranspiled_circuits
621
- dataset.attrs["transpiled_circuits"] = self.transpiled_circuits
620
+ for key, circuit in zip(
621
+ ["transpiled_circuits", "untranspiled_circuits"], [self.transpiled_circuits, self.untranspiled_circuits]
622
+ ):
623
+ dictionary = {}
624
+ for outer_key, outer_value in circuit.items():
625
+ dictionary[str(outer_key)] = {
626
+ str(inner_key): inner_values for inner_key, inner_values in outer_value.items()
627
+ }
628
+ dataset.attrs[key] = dictionary
622
629
 
623
630
  def submit_single_mrb_job(
624
631
  self,
@@ -733,10 +740,10 @@ class MirrorRandomizedBenchmarking(Benchmark):
733
740
  qcvv_logger.info(f"Job for layout {qubits} & depth {depth} submitted successfully!")
734
741
 
735
742
  self.untranspiled_circuits[str(qubits)] = {
736
- d: mrb_untranspiled_circuits_lists[d] for d in assigned_mrb_depths[str(qubits)]
743
+ str(d): mrb_untranspiled_circuits_lists[d] for d in assigned_mrb_depths[str(qubits)]
737
744
  }
738
745
  self.transpiled_circuits[str(qubits)] = {
739
- d: mrb_transpiled_circuits_lists[d] for d in assigned_mrb_depths[str(qubits)]
746
+ str(d): mrb_transpiled_circuits_lists[d] for d in assigned_mrb_depths[str(qubits)]
740
747
  }
741
748
 
742
749
  dataset.attrs[qubits_idx] = {"qubits": qubits}
@@ -614,13 +614,13 @@ def plot_rb_decay(
614
614
  str(q): observations[q_idx]["avg_gate_fidelity"]["uncertainty"] for q_idx, q in enumerate(qubits_array)
615
615
  }
616
616
  decay_rate[identifier] = {
617
- str(q): observations[q_idx]["decay_rate"]["value"] for q_idx, q in enumerate(qubits_array)
617
+ str(q): dataset.attrs[q_idx]["decay_rate"]["value"] for q_idx, q in enumerate(qubits_array)
618
618
  }
619
619
  offset[identifier] = {
620
- str(q): observations[q_idx]["fit_offset"]["value"] for q_idx, q in enumerate(qubits_array)
620
+ str(q): dataset.attrs[q_idx]["fit_offset"]["value"] for q_idx, q in enumerate(qubits_array)
621
621
  }
622
622
  amplitude[identifier] = {
623
- str(q): observations[q_idx]["fit_amplitude"]["value"] for q_idx, q in enumerate(qubits_array)
623
+ str(q): dataset.attrs[q_idx]["fit_amplitude"]["value"] for q_idx, q in enumerate(qubits_array)
624
624
  }
625
625
  else:
626
626
  rb_type_keys = list(observations[0].keys())
@@ -648,13 +648,14 @@ def plot_rb_decay(
648
648
  for q_idx, q in enumerate(qubits_array)
649
649
  }
650
650
  decay_rate[rb_type] = {
651
- str(q): observations[q_idx][rb_type]["decay_rate"]["value"] for q_idx, q in enumerate(qubits_array)
651
+ str(q): dataset.attrs[q_idx][rb_type]["decay_rate"]["value"] for q_idx, q in enumerate(qubits_array)
652
652
  }
653
+ print(dataset.attrs)
653
654
  offset[rb_type] = {
654
- str(q): observations[q_idx][rb_type]["fit_offset"]["value"] for q_idx, q in enumerate(qubits_array)
655
+ str(q): dataset.attrs[q_idx][rb_type]["fit_offset"]["value"] for q_idx, q in enumerate(qubits_array)
655
656
  }
656
657
  amplitude[rb_type] = {
657
- str(q): observations[q_idx][rb_type]["fit_amplitude"]["value"] for q_idx, q in enumerate(qubits_array)
658
+ str(q): dataset.attrs[q_idx][rb_type]["fit_amplitude"]["value"] for q_idx, q in enumerate(qubits_array)
658
659
  }
659
660
 
660
661
  for index_irb, key in enumerate(rb_type_keys):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: iqm-benchmarks
3
- Version: 1.3
3
+ Version: 1.5
4
4
  Summary: A package for implementation of Quantum Characterization, Verification and Validation (QCVV) techniques on IQM's hardware at gate level abstraction
5
5
  Author-email: IQM Finland Oy <developers@meetiqm.com>, Aniket Rath <aniket.rath@meetiqm.com>, Jami Rönkkö <jami@meetiqm.com>, Pedro Figueroa Romero <pedro.romero@meetiqm.com>, Vicente Pina Canelles <vicente.pina@meetiqm.com>, Raphael Brieger <raphael.brieger@meetiqm.com>, Stefan Seegerer <stefan.seegerer@meetiqm.com>
6
6
  Project-URL: Homepage, https://github.com/iqm-finland/iqm-benchmarks
@@ -15,14 +15,14 @@ Requires-Dist: lmfit==1.3.1
15
15
  Requires-Dist: matplotlib==3.9.0
16
16
  Requires-Dist: more-itertools==10.1.0
17
17
  Requires-Dist: mthree==2.6.3
18
- Requires-Dist: networkx==2.8.8
19
- Requires-Dist: numpy>=1.24.2
18
+ Requires-Dist: networkx==3.3
19
+ Requires-Dist: numpy>=1.25.2
20
20
  Requires-Dist: qiskit==0.45.3
21
21
  Requires-Dist: qiskit-aer==0.13.3
22
22
  Requires-Dist: qiskit-iqm==15.1
23
23
  Requires-Dist: scikit-optimize==0.10.2
24
24
  Requires-Dist: tabulate==0.9.0
25
- Requires-Dist: uncertainties==3.2.1
25
+ Requires-Dist: uncertainties==3.2.2
26
26
  Requires-Dist: pycurl==7.45.3
27
27
  Requires-Dist: xarray==2024.6.0
28
28
  Requires-Dist: types-pycurl
@@ -1,6 +1,6 @@
1
- iqm/benchmarks/__init__.py,sha256=OPL9DS7yU21ZYrYGJ_VGpSeWH-CpW_iq5w0mSPx7bAE,1055
1
+ iqm/benchmarks/__init__.py,sha256=7EOYlsJriQHKAlb3tHpPY4bh4wLbsssZg4NV0XW0WBU,2128
2
2
  iqm/benchmarks/benchmark.py,sha256=SGhBcSxLPUu-cVXAjG4Db2TRobFCRBYoE1NtTDK1lJg,4432
3
- iqm/benchmarks/benchmark_definition.py,sha256=1VxXaJBUHuiLh1VgSuG7xUfZ8ZxTMxrN7IkzQdnvBzw,9232
3
+ iqm/benchmarks/benchmark_definition.py,sha256=zdsZcZm8U_C9PVhgktKPF0AX8neu-6VdWjtcCua2XEs,10310
4
4
  iqm/benchmarks/benchmark_experiment.py,sha256=0BFNn04jyD1Yj-pIKnuZjCD00v3pU5EnwkRzJUBc0n4,6540
5
5
  iqm/benchmarks/logging_config.py,sha256=U7olP5Kr75AcLJqNODf9VBhJLVqIvA4AYR6J39D5rww,1052
6
6
  iqm/benchmarks/readout_mitigation.py,sha256=cx8K2EAjhYfVKGSRlpoh9xBFQZZhyL63wPMtzNL3hAg,12329
@@ -8,23 +8,23 @@ iqm/benchmarks/utils.py,sha256=fNuGkN_GI73nZD6xDN_r97Pr6I8uIDhPXZqdQkQvDpM,21291
8
8
  iqm/benchmarks/compressive_gst/__init__.py,sha256=4F_5YxExn4ysGQaEs-WNfm0yiFBxkKtGAYBZFWnRsSk,915
9
9
  iqm/benchmarks/compressive_gst/compressive_gst.py,sha256=iLBG6HBPfW3y4EYs6DItuBjqBbMQIoCFsJlUSgH_w8w,46288
10
10
  iqm/benchmarks/entanglement/__init__.py,sha256=9T7prOwqMmFWdb4t6ETAHZXKK5o6FvU2DvVb6WhNi-U,682
11
- iqm/benchmarks/entanglement/ghz.py,sha256=T7HCYmI9pCZn5JuONZMc_J-zFxxf2ioIQ-iMHBOQ8u4,35242
11
+ iqm/benchmarks/entanglement/ghz.py,sha256=BFadLxux6vGWz1IlKOJkQA_qKAOa6HR63vhwrREZelI,36058
12
12
  iqm/benchmarks/optimization/__init__.py,sha256=_ajW_OibYLCtzU5AUv5c2zuuVYn8ZNeZUcUUSIGt51M,747
13
13
  iqm/benchmarks/optimization/qscore.py,sha256=_s5_w5QTlaeDcHX1BmAevVctHOTVobee1eGkFx7DTEs,27942
14
14
  iqm/benchmarks/quantum_volume/__init__.py,sha256=i-Q4SpDWELBw7frXnxm1j4wJRcxbIyrS5uEK_v06YHo,951
15
- iqm/benchmarks/quantum_volume/clops.py,sha256=8HxCNaI5_ZUBgRqE6hQkcR7Exc-E9INv4GFvv-QF--U,30837
16
- iqm/benchmarks/quantum_volume/quantum_volume.py,sha256=An1NUsS5ekuDn4fk7MrS1B-YRoG4cboPwGQd4YMuQ5M,35839
15
+ iqm/benchmarks/quantum_volume/clops.py,sha256=GSuDnAB-XzFP7DLVsYC9zjvf7NAQqQHIgX0TAxiFvOs,31451
16
+ iqm/benchmarks/quantum_volume/quantum_volume.py,sha256=jnVh1A-WC9DjpE8nvVIa87Ov9iihB17NOh8aiwjlu0I,36883
17
17
  iqm/benchmarks/randomized_benchmarking/__init__.py,sha256=IkKo-7zUChxZZd3my_csQCJfJfZNsV3-JTvdG8uqys4,734
18
18
  iqm/benchmarks/randomized_benchmarking/clifford_1q.pkl,sha256=SVcye1bsjxf1hvC1ku2vHOIZQRG5hy1loAOugpVhUE8,42517
19
19
  iqm/benchmarks/randomized_benchmarking/clifford_2q.pkl,sha256=BLDpXdyDGkFUxCMHyUMDBnNclCQTsty-kIXwiuY31eA,82994083
20
20
  iqm/benchmarks/randomized_benchmarking/multi_lmfit.py,sha256=Se1ygR4mXn_2_P82Ch31KBnCmY-g_A9NKzE9Ir8nEvw,3247
21
- iqm/benchmarks/randomized_benchmarking/randomized_benchmarking_common.py,sha256=RZX1G9PBCwqwHk_ylqPHLtWk3GMN5Tc2OchnaEv9W2Y,37824
21
+ iqm/benchmarks/randomized_benchmarking/randomized_benchmarking_common.py,sha256=hKe3qz2dQtcGEOODhcEP1OBH3e68ryDYFiYJ0PDGEJs,37863
22
22
  iqm/benchmarks/randomized_benchmarking/clifford_rb/__init__.py,sha256=bTDA156LAl7OLGcMec--1nzDrV1XpPRVq3CquTmucgE,677
23
- iqm/benchmarks/randomized_benchmarking/clifford_rb/clifford_rb.py,sha256=0zaFjzz95aDsZ7dxV5-rXd8lGOSk9KuCXZBtk988I8M,16911
23
+ iqm/benchmarks/randomized_benchmarking/clifford_rb/clifford_rb.py,sha256=uo6cPhoQQ9qAGeZFZG23jxaR88_qKdiqtlBrG_mAGWg,17470
24
24
  iqm/benchmarks/randomized_benchmarking/interleaved_rb/__init__.py,sha256=sq6MgN_hwlpkOj10vyCU4e6eKSX-oLcF2L9na6W2Gt4,681
25
- iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py,sha256=kZHS3_FnwgVAt4_UmOmwl5DRDO24dKNV6epAGxGRM-U,25563
25
+ iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py,sha256=hTjjitBp0CEkOnvZj_byJ2CzPHiwXaDhxIO8O8dcAj4,26205
26
26
  iqm/benchmarks/randomized_benchmarking/mirror_rb/__init__.py,sha256=ZekEqI_89nXzGO1vjM-b5Uwwicy59M4fYHXfA-f0MIg,674
27
- iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py,sha256=yDYJePSG6kcW0aa3Xw5GHris5L7Sry3JWemvEW5iH2o,34060
27
+ iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py,sha256=v1UOCVK4_fcOB2VNjMqQB8tp4JwytjdY7RM_lXAtFy0,34463
28
28
  mGST/LICENSE,sha256=TtHNq55cUcbglb7uhVudeBLUh_qPdUoAEvU0BBwFz-k,1098
29
29
  mGST/README.md,sha256=v_5kw253csHF4-RfE-44KqFmBXIsSMRmOtN0AUPrRxE,5050
30
30
  mGST/additional_fns.py,sha256=_SEJ10FRNM7_CroysT8hCLZTfpm6ZhEIDCY5zPTnhjo,31390
@@ -35,8 +35,8 @@ mGST/optimization.py,sha256=YHwkzIkYvsZOPjclR-BCQWh24jeqjuXp0BB0WX5Lwow,10559
35
35
  mGST/qiskit_interface.py,sha256=2XuJ4WFViLsHCTpEZncwsLbRr-cELEYhegTpRPzCcuI,10080
36
36
  mGST/reporting/figure_gen.py,sha256=6Xd8vwfy09hLY1YbJY6TRevuMsQSU4MsWqemly3ZO0I,12970
37
37
  mGST/reporting/reporting.py,sha256=-XBy3OCJIMOsA8cApwKjhVKBwnjSAoxm-voHNbRWytM,25803
38
- iqm_benchmarks-1.3.dist-info/LICENSE,sha256=2Ncb40-hqkTil78RPv3-YiJfKaJ8te9USJgliKqIdSY,11558
39
- iqm_benchmarks-1.3.dist-info/METADATA,sha256=Xl9uaemgivU4KTvjrt9A0nF97TFNyKfHputsfiKl4KI,8378
40
- iqm_benchmarks-1.3.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
41
- iqm_benchmarks-1.3.dist-info/top_level.txt,sha256=3G23Z-1LGf-IOzTCUl6QwWqiQ3USz25Zt90Ihq192to,9
42
- iqm_benchmarks-1.3.dist-info/RECORD,,
38
+ iqm_benchmarks-1.5.dist-info/LICENSE,sha256=2Ncb40-hqkTil78RPv3-YiJfKaJ8te9USJgliKqIdSY,11558
39
+ iqm_benchmarks-1.5.dist-info/METADATA,sha256=y5aehxY2SokKgSxMNB4COxxCoJKhJh-ogMeO0ft6cmM,8376
40
+ iqm_benchmarks-1.5.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
41
+ iqm_benchmarks-1.5.dist-info/top_level.txt,sha256=3G23Z-1LGf-IOzTCUl6QwWqiQ3USz25Zt90Ihq192to,9
42
+ iqm_benchmarks-1.5.dist-info/RECORD,,