iqm-benchmarks 2.48__tar.gz → 2.49__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/CHANGELOG.rst +4 -0
  2. {iqm_benchmarks-2.48/src/iqm_benchmarks.egg-info → iqm_benchmarks-2.49}/PKG-INFO +1 -1
  3. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/coherence/coherence.py +12 -5
  4. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/compressive_gst/compressive_gst.py +14 -5
  5. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/entanglement/ghz.py +8 -2
  6. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/entanglement/graph_states.py +6 -3
  7. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/optimization/qscore.py +11 -4
  8. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/quantum_volume/quantum_volume.py +8 -3
  9. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/clifford_rb/clifford_rb.py +10 -1
  10. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/direct_rb/direct_rb.py +10 -3
  11. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py +11 -1
  12. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py +8 -1
  13. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49/src/iqm_benchmarks.egg-info}/PKG-INFO +1 -1
  14. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/.github/workflows/main.yml +0 -0
  15. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/.github/workflows/publish.yml +0 -0
  16. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/.github/workflows/tag_and_release.yml +0 -0
  17. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/.gitignore +0 -0
  18. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/LICENSE +0 -0
  19. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/MANIFEST.in +0 -0
  20. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/README.md +0 -0
  21. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/benchmark_runner.py +0 -0
  22. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docbuild +0 -0
  23. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/API.rst +0 -0
  24. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/Makefile +0 -0
  25. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/_static/images/favicon.ico +0 -0
  26. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/_static/images/logo.png +0 -0
  27. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/_templates/autosummary-class-template.rst +0 -0
  28. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/_templates/autosummary-module-template.rst +0 -0
  29. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/changelog.rst +0 -0
  30. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/conf.py +0 -0
  31. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/development/development.rst +0 -0
  32. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/development/generate_2qubit_cliffords.ipynb +0 -0
  33. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/development/how_to_make_your_own_benchmark.ipynb +0 -0
  34. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/devices/devices.rst +0 -0
  35. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/devices/spark.ipynb +0 -0
  36. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/devices/star.ipynb +0 -0
  37. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/examples/example_clifford_rb.ipynb +0 -0
  38. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/examples/example_clops.ipynb +0 -0
  39. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/examples/example_coherence.ipynb +0 -0
  40. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/examples/example_eplg.ipynb +0 -0
  41. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/examples/example_experiment_all.ipynb +0 -0
  42. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/examples/example_ghz.ipynb +0 -0
  43. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/examples/example_graphstate.ipynb +0 -0
  44. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/examples/example_gst.ipynb +0 -0
  45. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/examples/example_interleaved_rb.ipynb +0 -0
  46. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/examples/example_mirror_rb.ipynb +0 -0
  47. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/examples/example_qscore.ipynb +0 -0
  48. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/examples/example_quantum_volume.ipynb +0 -0
  49. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/examples/examples.rst +0 -0
  50. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/index.rst +0 -0
  51. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/license.rst +0 -0
  52. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/docs/readme.md +0 -0
  53. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/format +0 -0
  54. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/pyproject.toml +0 -0
  55. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/requirements.txt +0 -0
  56. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/scheduled_experiments/adonis/__init__.py +0 -0
  57. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/scheduled_experiments/adonis/weekly.py +0 -0
  58. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/setup.cfg +0 -0
  59. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/__init__.py +0 -0
  60. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/benchmark.py +0 -0
  61. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/benchmark_definition.py +0 -0
  62. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/circuit_containers.py +0 -0
  63. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/coherence/__init__.py +0 -0
  64. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/compressive_gst/__init__.py +0 -0
  65. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/compressive_gst/gst_analysis.py +0 -0
  66. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/entanglement/__init__.py +0 -0
  67. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/logging_config.py +0 -0
  68. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/optimization/__init__.py +0 -0
  69. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/quantum_volume/__init__.py +0 -0
  70. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/quantum_volume/clops.py +0 -0
  71. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/__init__.py +0 -0
  72. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/clifford_1q.pkl +0 -0
  73. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/clifford_2q.pkl +0 -0
  74. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/clifford_rb/__init__.py +0 -0
  75. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/direct_rb/__init__.py +0 -0
  76. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/eplg/__init__.py +0 -0
  77. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/eplg/eplg.py +0 -0
  78. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/interleaved_rb/__init__.py +0 -0
  79. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/mirror_rb/__init__.py +0 -0
  80. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/multi_lmfit.py +0 -0
  81. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/randomized_benchmarking/randomized_benchmarking_common.py +0 -0
  82. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/readout_mitigation.py +0 -0
  83. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/utils.py +0 -0
  84. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/utils_plots.py +0 -0
  85. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm/benchmarks/utils_shadows.py +0 -0
  86. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm_benchmarks.egg-info/SOURCES.txt +0 -0
  87. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm_benchmarks.egg-info/dependency_links.txt +0 -0
  88. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm_benchmarks.egg-info/requires.txt +0 -0
  89. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/iqm_benchmarks.egg-info/top_level.txt +0 -0
  90. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/mGST/LICENSE +0 -0
  91. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/mGST/README.md +0 -0
  92. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/mGST/additional_fns.py +0 -0
  93. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/mGST/algorithm.py +0 -0
  94. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/mGST/compatibility.py +0 -0
  95. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/mGST/low_level_jit.py +0 -0
  96. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/mGST/optimization.py +0 -0
  97. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/mGST/qiskit_interface.py +0 -0
  98. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/mGST/reporting/figure_gen.py +0 -0
  99. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/src/mGST/reporting/reporting.py +0 -0
  100. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/tag-from-pipeline.sh +0 -0
  101. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/test +0 -0
  102. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/tests/test_coherence.py +0 -0
  103. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/tests/test_ghz.py +0 -0
  104. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/tests/test_graph_states.py +0 -0
  105. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/tests/test_gst.py +0 -0
  106. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/tests/test_qscore.py +0 -0
  107. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/tests/test_qv.py +0 -0
  108. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/tests/test_rb.py +0 -0
  109. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/tests/unit/test_backend_transpilation.py +0 -0
  110. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/tests/unit/test_benchmark_circuit.py +0 -0
  111. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/tests/unit/test_submit_execute.py +0 -0
  112. {iqm_benchmarks-2.48 → iqm_benchmarks-2.49}/update-requirements.py +0 -0
@@ -2,6 +2,10 @@
2
2
  Changelog
3
3
  =========
4
4
 
5
+ Version 2.49
6
+ ============
7
+ * Added logging of execution time to all benchmarks.
8
+
5
9
  Version 2.48
6
10
  ============
7
11
  * Updated iqm-client and supported python versions.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: iqm-benchmarks
3
- Version: 2.48
3
+ Version: 2.49
4
4
  Summary: A package for implementation of Quantum Characterization, Verification and Validation (QCVV) techniques on IQM's hardware at gate level abstraction
5
5
  Author-email: IQM Finland Oy <developers@meetiqm.com>, Adrian Auer <adrian.auer@meetiqm.com>, Raphael Brieger <raphael.brieger@meetiqm.com>, Alessio Calzona <alessio.calzona@meetiqm.com>, Pedro Figueroa Romero <pedro.romero@meetiqm.com>, Amin Hosseinkhani <amin.hosseinkhani@meetiqm.com>, Miikka Koistinen <miikka@meetiqm.com>, Nadia Milazzo <nadia.milazzo@meetiqm.com>, Vicente Pina Canelles <vicente.pina@meetiqm.com>, Aniket Rath <aniket.rath@meetiqm.com>, Jami Rönkkö <jami@meetiqm.com>, Stefan Seegerer <stefan.seegerer@meetiqm.com>
6
6
  Project-URL: Homepage, https://github.com/iqm-finland/iqm-benchmarks
@@ -422,6 +422,8 @@ class CoherenceBenchmark(Benchmark):
422
422
  ) -> xr.Dataset:
423
423
  """Executes the benchmark."""
424
424
  self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
425
+ total_submit: float = 0
426
+ total_retrieve: float = 0
425
427
 
426
428
  dataset = xr.Dataset()
427
429
  self.add_all_meta_to_dataset(dataset)
@@ -457,7 +459,7 @@ class CoherenceBenchmark(Benchmark):
457
459
  qcvv_logger.debug(f"Executing on {self.coherence_exp}.")
458
460
  qcvv_logger.setLevel(logging.WARNING)
459
461
 
460
- jobs, _ = submit_execute(
462
+ jobs, time_submit = submit_execute(
461
463
  sorted_transpiled_qc_list,
462
464
  self.backend,
463
465
  self.shots,
@@ -466,9 +468,11 @@ class CoherenceBenchmark(Benchmark):
466
468
  max_circuits_per_batch=self.configuration.max_circuits_per_batch,
467
469
  circuit_compilation_options=self.circuit_compilation_options,
468
470
  )
471
+ total_submit += time_submit
469
472
 
470
473
  qcvv_logger.setLevel(logging.INFO)
471
- execution_results = retrieve_all_counts(jobs)[0]
474
+ execution_results, time_retrieve = retrieve_all_counts(jobs)
475
+ total_retrieve += time_retrieve
472
476
  identifier = BenchmarkObservationIdentifier(qubit_set)
473
477
  dataset, _ = add_counts_to_dataset(execution_results, identifier.string_identifier, dataset)
474
478
  dataset.attrs.update(
@@ -494,8 +498,7 @@ class CoherenceBenchmark(Benchmark):
494
498
  # Execute on the backend
495
499
  if self.configuration.use_dd is True:
496
500
  raise ValueError("Coherence benchmarks should not be run with dynamical decoupling.")
497
-
498
- jobs, _ = submit_execute(
501
+ jobs, time_submit = submit_execute(
499
502
  sorted_transpiled_qc_list,
500
503
  self.backend,
501
504
  self.shots,
@@ -504,8 +507,10 @@ class CoherenceBenchmark(Benchmark):
504
507
  max_circuits_per_batch=self.configuration.max_circuits_per_batch,
505
508
  circuit_compilation_options=self.circuit_compilation_options,
506
509
  )
510
+ total_submit += time_submit
507
511
  qcvv_logger.setLevel(logging.INFO)
508
- execution_results = retrieve_all_counts(jobs)[0]
512
+ execution_results, time_retrieve = retrieve_all_counts(jobs)
513
+ total_retrieve += time_retrieve
509
514
  identifier = BenchmarkObservationIdentifier(group)
510
515
  dataset, _ = add_counts_to_dataset(execution_results, identifier.string_identifier, dataset)
511
516
 
@@ -524,6 +529,8 @@ class CoherenceBenchmark(Benchmark):
524
529
  self.transpiled_circuits.circuit_groups.append(
525
530
  CircuitGroup(name=self.coherence_exp, circuits=transpiled_qc_list)
526
531
  )
532
+ dataset.attrs["total_submit_time"] = total_submit
533
+ dataset.attrs["total_retrieve_time"] = total_retrieve
527
534
 
528
535
  return dataset
529
536
 
@@ -32,6 +32,7 @@ from typing import Any, Dict, List, Tuple, Type, Union
32
32
  import numpy as np
33
33
  from qiskit.circuit.library import CZGate, RGate
34
34
  import xarray as xr
35
+ from time import strftime
35
36
 
36
37
  from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
37
38
  from iqm.benchmarks.benchmark_definition import Benchmark, BenchmarkObservationIdentifier, add_counts_to_dataset
@@ -242,7 +243,9 @@ class CompressiveGST(Benchmark):
242
243
  """
243
244
  The main GST execution routine
244
245
  """
245
-
246
+ self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
247
+ total_submit: float = 0
248
+ total_retrieve: float = 0
246
249
  dataset = xr.Dataset()
247
250
  qcvv_logger.info(f"Generating {self.configuration.num_circuits} random GST circuits")
248
251
 
@@ -255,7 +258,7 @@ class CompressiveGST(Benchmark):
255
258
  transpiled_circuit_dict = {
256
259
  tuple(range(self.backend.num_qubits)): transpiled_circuits[str(self.qubit_layouts[0])].circuits
257
260
  }
258
- all_jobs_parallel, _ = submit_execute(
261
+ all_jobs_parallel, time_submit = submit_execute(
259
262
  transpiled_circuit_dict,
260
263
  backend,
261
264
  self.configuration.shots,
@@ -264,15 +267,17 @@ class CompressiveGST(Benchmark):
264
267
  max_circuits_per_batch=self.configuration.max_circuits_per_batch,
265
268
  circuit_compilation_options=self.circuit_compilation_options,
266
269
  )
270
+ total_submit += time_submit
267
271
  # Retrieve
268
272
  qcvv_logger.info(f"Now executing the corresponding circuit batch")
269
- counts, _ = retrieve_all_counts(all_jobs_parallel)
273
+ counts, time_retrieve = retrieve_all_counts(all_jobs_parallel)
274
+ total_retrieve += time_retrieve
270
275
  dataset, _ = add_counts_to_dataset(counts, f"parallel_results", dataset)
271
276
  else:
272
277
  all_jobs: Dict = {}
273
278
  for qubit_layout in self.qubit_layouts:
274
279
  transpiled_circuit_dict = {tuple(qubit_layout): transpiled_circuits[str(qubit_layout)].circuits}
275
- all_jobs[str(qubit_layout)], _ = submit_execute(
280
+ all_jobs[str(qubit_layout)], time_submit = submit_execute(
276
281
  transpiled_circuit_dict,
277
282
  backend,
278
283
  self.configuration.shots,
@@ -280,14 +285,18 @@ class CompressiveGST(Benchmark):
280
285
  max_gates_per_batch=self.configuration.max_gates_per_batch,
281
286
  max_circuits_per_batch=self.configuration.max_circuits_per_batch,
282
287
  )
288
+ total_submit += time_submit
283
289
  # Retrieve all
284
290
  qcvv_logger.info(f"Now executing the corresponding circuit batch")
285
291
  for qubit_layout in self.qubit_layouts:
286
- counts, _ = retrieve_all_counts(all_jobs[str(qubit_layout)])
292
+ counts, time_retrieve = retrieve_all_counts(all_jobs[str(qubit_layout)])
293
+ total_retrieve += time_retrieve
287
294
  dataset, _ = add_counts_to_dataset(counts, str(qubit_layout), dataset)
288
295
 
289
296
  self.circuits.benchmark_circuits = [transpiled_circuits, untranspiled_circuits]
290
297
  self.add_configuration_to_dataset(dataset)
298
+ dataset.attrs["total_submit_time"] = total_submit
299
+ dataset.attrs["total_retrieve_time"] = total_retrieve
291
300
  qcvv_logger.info(f"Run completed")
292
301
  return dataset
293
302
 
@@ -828,6 +828,8 @@ class GHZBenchmark(Benchmark):
828
828
  Executes the benchmark.
829
829
  """
830
830
  self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
831
+ total_submit: float = 0
832
+ total_retrieve: float = 0
831
833
  aux_custom_qubits_array = cast(List[List[int]], self.custom_qubits_array).copy()
832
834
  dataset = xr.Dataset()
833
835
 
@@ -844,7 +846,7 @@ class GHZBenchmark(Benchmark):
844
846
  qubit_count = len(qubit_layout)
845
847
  circuit_group: CircuitGroup = self.generate_readout_circuit(qubit_layout, qubit_count)
846
848
  transpiled_circuit_dict = {tuple(qubit_layout): circuit_group.circuits}
847
- all_jobs[idx], _ = submit_execute(
849
+ all_jobs[idx], time_submit = submit_execute(
848
850
  transpiled_circuit_dict,
849
851
  backend,
850
852
  self.shots,
@@ -853,6 +855,7 @@ class GHZBenchmark(Benchmark):
853
855
  max_circuits_per_batch=self.configuration.max_circuits_per_batch,
854
856
  circuit_compilation_options=self.circuit_compilation_options,
855
857
  )
858
+ total_submit += time_submit
856
859
 
857
860
  # Retrieve all
858
861
  for qubit_layout in aux_custom_qubits_array:
@@ -860,7 +863,8 @@ class GHZBenchmark(Benchmark):
860
863
  Id = BenchmarkObservationIdentifier(qubit_layout)
861
864
  idx = Id.string_identifier
862
865
  qubit_count = len(qubit_layout)
863
- counts, _ = retrieve_all_counts(all_jobs[idx])
866
+ counts, time_retrieve = retrieve_all_counts(all_jobs[idx])
867
+ total_retrieve += time_retrieve
864
868
  dataset, _ = add_counts_to_dataset(counts, idx, dataset)
865
869
  if self.rem:
866
870
  qcvv_logger.info(f"Applying readout error mitigation")
@@ -870,6 +874,8 @@ class GHZBenchmark(Benchmark):
870
874
  dataset, _ = add_counts_to_dataset(rem_results_dist, f"{idx}_rem", dataset)
871
875
 
872
876
  self.add_configuration_to_dataset(dataset)
877
+ dataset.attrs["total_submit_time"] = total_submit
878
+ dataset.attrs["total_retrieve_time"] = total_retrieve
873
879
  return dataset
874
880
 
875
881
 
@@ -1151,6 +1151,8 @@ class GraphStateBenchmark(Benchmark):
1151
1151
  Executes the benchmark.
1152
1152
  """
1153
1153
  self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
1154
+ total_submit: float = 0
1155
+ total_retrieve: float = 0
1154
1156
 
1155
1157
  dataset = xr.Dataset()
1156
1158
  self.add_all_meta_to_dataset(dataset)
@@ -1280,7 +1282,7 @@ class GraphStateBenchmark(Benchmark):
1280
1282
  graph_jobs, time_submit = submit_execute(
1281
1283
  sorted_transpiled_qc_list, backend, self.shots, self.calset_id, self.max_gates_per_batch
1282
1284
  )
1283
-
1285
+ total_submit += time_submit
1284
1286
  all_graph_submit_results.append(
1285
1287
  {
1286
1288
  "unprojected_qubits": unprojected_qubits[idx],
@@ -1295,7 +1297,7 @@ class GraphStateBenchmark(Benchmark):
1295
1297
  unprojected_qubits = job_dict["unprojected_qubits"]
1296
1298
  # Retrieve counts
1297
1299
  execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], identifier=str(unprojected_qubits))
1298
-
1300
+ total_retrieve += time_retrieve
1299
1301
  # Retrieve all job meta data
1300
1302
  all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
1301
1303
 
@@ -1320,7 +1322,8 @@ class GraphStateBenchmark(Benchmark):
1320
1322
  # if self.rem: TODO: add REM functionality
1321
1323
 
1322
1324
  qcvv_logger.info(f"Graph State benchmark experiment execution concluded !")
1323
-
1325
+ dataset.attrs["total_submit_time"] = total_submit
1326
+ dataset.attrs["total_retrieve_time"] = total_retrieve
1324
1327
  return dataset
1325
1328
 
1326
1329
 
@@ -741,6 +741,8 @@ class QScoreBenchmark(Benchmark):
741
741
  ) -> xr.Dataset:
742
742
  """Executes the benchmark."""
743
743
  self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
744
+ total_submit: float = 0
745
+ total_retrieve: float = 0
744
746
 
745
747
  dataset = xr.Dataset()
746
748
  self.add_all_meta_to_dataset(dataset)
@@ -874,7 +876,7 @@ class QScoreBenchmark(Benchmark):
874
876
 
875
877
  sorted_transpiled_qc_list = {tuple(qubit_set): transpiled_qc}
876
878
  # Execute on the backend
877
- jobs, _ = submit_execute(
879
+ jobs, time_submit = submit_execute(
878
880
  sorted_transpiled_qc_list,
879
881
  self.backend,
880
882
  self.shots,
@@ -883,13 +885,15 @@ class QScoreBenchmark(Benchmark):
883
885
  max_circuits_per_batch=self.configuration.max_circuits_per_batch,
884
886
  circuit_compilation_options=self.circuit_compilation_options,
885
887
  )
888
+ total_submit += time_submit
886
889
  qc_transpiled_list.append(transpiled_qc)
887
890
  qcvv_logger.setLevel(logging.INFO)
888
891
  instance_with_edges = set(range(self.num_instances)) - set(no_edge_instances)
889
892
  num_instances_with_edges = len(instance_with_edges)
890
893
  if self.REM:
894
+ counts_retrieved, time_retrieve = retrieve_all_counts(jobs)
891
895
  rem_counts = apply_readout_error_mitigation(
892
- backend, transpiled_qc, retrieve_all_counts(jobs)[0], self.mit_shots
896
+ backend, transpiled_qc, counts_retrieved, self.mit_shots
893
897
  )
894
898
  execution_results.extend(
895
899
  rem_counts[0][instance].nearest_probability_distribution()
@@ -897,8 +901,9 @@ class QScoreBenchmark(Benchmark):
897
901
  )
898
902
  # execution_results.append(rem_distribution)
899
903
  else:
900
- execution_results.extend(retrieve_all_counts(jobs)[0])
901
-
904
+ counts_retrieved, time_retrieve = retrieve_all_counts(jobs)
905
+ execution_results.extend(counts_retrieved)
906
+ total_retrieve += time_retrieve
902
907
  dataset.attrs.update(
903
908
  {
904
909
  num_nodes: {
@@ -921,6 +926,8 @@ class QScoreBenchmark(Benchmark):
921
926
  )
922
927
 
923
928
  self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
929
+ dataset.attrs["total_submit_time"] = total_submit
930
+ dataset.attrs["total_retrieve_time"] = total_retrieve
924
931
 
925
932
  return dataset
926
933
 
@@ -17,7 +17,7 @@ Quantum Volume benchmark
17
17
  """
18
18
 
19
19
  from copy import deepcopy
20
- from time import strftime
20
+ from time import strftime, time
21
21
  from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Type
22
22
 
23
23
  from matplotlib.figure import Figure
@@ -716,6 +716,8 @@ class QuantumVolumeBenchmark(Benchmark):
716
716
  """Executes the benchmark."""
717
717
 
718
718
  self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
719
+ total_submit: float = 0
720
+ total_retrieve: float = 0
719
721
 
720
722
  dataset = xr.Dataset()
721
723
  self.add_all_meta_to_dataset(dataset)
@@ -780,7 +782,9 @@ class QuantumVolumeBenchmark(Benchmark):
780
782
  all_op_counts[str(qubits)] = count_native_gates(backend, transpiled_qc_list)
781
783
 
782
784
  # Submit
785
+ t_start = time()
783
786
  all_qv_jobs.append(self.submit_single_qv_job(backend, qubits, sorted_transpiled_qc_list))
787
+ total_submit += time() - t_start
784
788
  qcvv_logger.info(f"Job for layout {qubits} submitted successfully!")
785
789
 
786
790
  # Retrieve counts of jobs for all qubit layouts
@@ -791,7 +795,7 @@ class QuantumVolumeBenchmark(Benchmark):
791
795
  execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], str(qubits))
792
796
  # Retrieve all job meta data
793
797
  all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
794
-
798
+ total_retrieve += time_retrieve
795
799
  # Export all to dataset
796
800
  dataset.attrs.update(
797
801
  {
@@ -829,7 +833,8 @@ class QuantumVolumeBenchmark(Benchmark):
829
833
  self.mit_shots,
830
834
  )
831
835
  dataset.attrs.update({"REM_quasidistributions": rem_quasidistros})
832
-
836
+ dataset.attrs["total_submit_time"] = total_submit
837
+ dataset.attrs["total_retrieve_time"] = total_retrieve
833
838
  qcvv_logger.info(f"QV experiment execution concluded !")
834
839
  return dataset
835
840
 
@@ -16,7 +16,7 @@
16
16
  'Standard' Clifford Randomized Benchmarking.
17
17
  """
18
18
 
19
- from time import strftime
19
+ from time import strftime, time
20
20
  from typing import Any, Dict, List, Sequence, Type
21
21
 
22
22
  import numpy as np
@@ -252,6 +252,8 @@ class CliffordRandomizedBenchmarking(Benchmark):
252
252
  # Submit jobs for all qubit layouts
253
253
  all_rb_jobs: List[Dict[str, Any]] = []
254
254
  time_circuit_generation: Dict[str, float] = {}
255
+ total_submit: float = 0
256
+ total_retrieve: float = 0
255
257
 
256
258
  # Initialize the variable to contain the circuits for each layout
257
259
  self.untranspiled_circuits = BenchmarkCircuit("untranspiled_circuits")
@@ -290,6 +292,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
290
292
  # Submit all
291
293
  flat_qubits_array = [x for y in self.qubits_array for x in y]
292
294
  sorted_transpiled_qc_list = {tuple(flat_qubits_array): parallel_transpiled_rb_circuits[seq_length]}
295
+ t_start = time()
293
296
  all_rb_jobs.append(
294
297
  submit_parallel_rb_job(
295
298
  backend,
@@ -302,6 +305,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
302
305
  self.configuration.max_circuits_per_batch,
303
306
  )
304
307
  )
308
+ total_submit += time() - t_start
305
309
  qcvv_logger.info(f"Job for sequence length {seq_length} submitted successfully!")
306
310
 
307
311
  self.untranspiled_circuits.circuit_groups.append(
@@ -351,6 +355,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
351
355
  )
352
356
 
353
357
  # Submit
358
+ t_start = time()
354
359
  all_rb_jobs.extend(
355
360
  submit_sequential_rb_jobs(
356
361
  qubits,
@@ -363,6 +368,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
363
368
  circuit_compilation_options=self.circuit_compilation_options,
364
369
  )
365
370
  )
371
+ total_submit += time() - t_start
366
372
  qcvv_logger.info(
367
373
  f"All jobs for qubits {qubits} and sequence lengths {self.sequence_lengths} submitted successfully!"
368
374
  )
@@ -386,6 +392,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
386
392
  execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], identifier)
387
393
  # Retrieve all job meta data
388
394
  all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
395
+ total_retrieve += time_retrieve
389
396
  # Export all to dataset
390
397
  dataset.attrs[qubit_idx[str(qubits)]].update(
391
398
  {
@@ -401,6 +408,8 @@ class CliffordRandomizedBenchmarking(Benchmark):
401
408
  qcvv_logger.info(f"Adding counts of qubits {qubits} and depth {depth} run to the dataset")
402
409
  dataset, _ = add_counts_to_dataset(execution_results, identifier, dataset)
403
410
 
411
+ dataset.attrs["total_submit_time"] = total_submit
412
+ dataset.attrs["total_retrieve_time"] = total_retrieve
404
413
  qcvv_logger.info(f"RB experiment concluded !")
405
414
  self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
406
415
 
@@ -3,7 +3,7 @@ Direct Randomized Benchmarking.
3
3
  """
4
4
 
5
5
  import random
6
- from time import strftime
6
+ from time import strftime, time
7
7
  from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Type, cast
8
8
 
9
9
  import numpy as np
@@ -751,6 +751,8 @@ class DirectRandomizedBenchmarking(Benchmark):
751
751
  xr.Dataset: Dataset containing benchmark results and metadata
752
752
  """
753
753
  self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
754
+ total_submit: float = 0
755
+ total_retrieve: float = 0
754
756
 
755
757
  dataset = xr.Dataset()
756
758
 
@@ -816,6 +818,7 @@ class DirectRandomizedBenchmarking(Benchmark):
816
818
  # Submit all
817
819
  flat_qubits_array = [x for y in loop_qubits_sequence for x in y]
818
820
  sorted_transpiled_qc_list = {tuple(flat_qubits_array): parallel_drb_circuits[depth]["transpiled"]}
821
+ t_start = time()
819
822
  all_drb_jobs.append(
820
823
  submit_parallel_rb_job(
821
824
  backend,
@@ -828,6 +831,7 @@ class DirectRandomizedBenchmarking(Benchmark):
828
831
  max_circuits_per_batch=self.configuration.max_circuits_per_batch,
829
832
  )
830
833
  )
834
+ total_submit += time() - t_start
831
835
  qcvv_logger.info(f"Job for depth {depth} submitted successfully!")
832
836
 
833
837
  self.untranspiled_circuits.circuit_groups.append(
@@ -884,6 +888,7 @@ class DirectRandomizedBenchmarking(Benchmark):
884
888
  sorted_transpiled_qc_list = {
885
889
  cast(Tuple[int, ...], tuple(qubits)): drb_transpiled_circuits_lists[depth]
886
890
  }
891
+ t_start = time()
887
892
  all_drb_jobs.append(
888
893
  self.submit_single_drb_job(
889
894
  backend,
@@ -892,7 +897,7 @@ class DirectRandomizedBenchmarking(Benchmark):
892
897
  cast(dict[tuple[int, ...], list[Any]], sorted_transpiled_qc_list),
893
898
  )
894
899
  )
895
-
900
+ total_submit += time() - t_start
896
901
  qcvv_logger.info(f"Job for layout {qubits} & depth {depth} submitted successfully!")
897
902
 
898
903
  self.untranspiled_circuits.circuit_groups.append(
@@ -916,6 +921,7 @@ class DirectRandomizedBenchmarking(Benchmark):
916
921
  execution_results, time_retrieve = retrieve_all_counts(
917
922
  job_dict["jobs"], f"qubits_{str(qubits)}_depth_{str(depth)}"
918
923
  )
924
+ total_retrieve += time_retrieve
919
925
  # Retrieve all job meta data
920
926
  all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
921
927
  # Export all to dataset
@@ -934,7 +940,8 @@ class DirectRandomizedBenchmarking(Benchmark):
934
940
  dataset, _ = add_counts_to_dataset(execution_results, f"qubits_{str(qubits)}_depth_{str(depth)}", dataset)
935
941
 
936
942
  self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
937
-
943
+ dataset.attrs["total_submit_time"] = total_submit
944
+ dataset.attrs["total_retrieve_time"] = total_retrieve
938
945
  qcvv_logger.info(f"DRB experiment execution concluded!")
939
946
 
940
947
  return dataset
@@ -16,12 +16,13 @@
16
16
  Interleaved Clifford Randomized Benchmarking.
17
17
  """
18
18
 
19
- from time import strftime
19
+ from time import strftime, time
20
20
  from typing import Any, Dict, List, Literal, Optional, Sequence, Type
21
21
 
22
22
  from matplotlib.figure import Figure
23
23
  import numpy as np
24
24
  import xarray as xr
25
+ from pycparser.ply.ctokens import t_STRING
25
26
 
26
27
  from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
27
28
  from iqm.benchmarks.benchmark_definition import (
@@ -323,6 +324,8 @@ class InterleavedRandomizedBenchmarking(Benchmark):
323
324
  # Submit jobs for all qubit layouts
324
325
  all_rb_jobs: Dict[str, List[Dict[str, Any]]] = {} # Label by Clifford or Interleaved
325
326
  time_circuit_generation: Dict[str, float] = {}
327
+ total_submit: float = 0
328
+ total_retrieve: float = 0
326
329
 
327
330
  # Initialize the variable to contain the circuits for each layout
328
331
 
@@ -398,6 +401,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
398
401
  sorted_transpiled_interleaved_rb_qc_list = {
399
402
  tuple(flat_qubits_array): parallel_transpiled_interleaved_rb_circuits[seq_length]
400
403
  }
404
+ t_start = time()
401
405
  all_rb_jobs["clifford"].append(
402
406
  submit_parallel_rb_job(
403
407
  backend,
@@ -422,6 +426,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
422
426
  self.configuration.max_circuits_per_batch,
423
427
  )
424
428
  )
429
+ total_submit += time() - t_start
425
430
  qcvv_logger.info(f"Both jobs for sequence length {seq_length} submitted successfully!")
426
431
 
427
432
  self.untranspiled_circuits.circuit_groups.append(
@@ -507,6 +512,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
507
512
  time_circuit_generation[str(qubits)] = t_clifford + t_inter
508
513
 
509
514
  # Submit Clifford then Interleaved
515
+ t_start = time()
510
516
  all_rb_jobs["clifford"].extend(
511
517
  submit_sequential_rb_jobs(
512
518
  qubits,
@@ -531,6 +537,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
531
537
  circuit_compilation_options=self.circuit_compilation_options,
532
538
  )
533
539
  )
540
+ total_submit += time() - t_start
534
541
  qcvv_logger.info(
535
542
  f"All jobs for qubits {qubits} and sequence lengths {self.sequence_lengths} submitted successfully!"
536
543
  )
@@ -568,6 +575,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
568
575
  execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], identifier)
569
576
  # Retrieve all job meta data
570
577
  all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
578
+ total_retrieve += time_retrieve
571
579
  # Export all to dataset
572
580
  dataset.attrs[qubit_idx[str(qubits)]].update(
573
581
  {
@@ -585,6 +593,8 @@ class InterleavedRandomizedBenchmarking(Benchmark):
585
593
  qcvv_logger.info(f"Adding counts of qubits {qubits} and depth {depth} run to the dataset")
586
594
  dataset, _ = add_counts_to_dataset(execution_results, identifier, dataset)
587
595
 
596
+ dataset.attrs["total_submit_time"] = total_submit
597
+ dataset.attrs["total_retrieve_time"] = total_retrieve
588
598
  qcvv_logger.info(f"Interleaved RB experiment concluded !")
589
599
  self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
590
600
 
@@ -2,7 +2,7 @@
2
2
  Mirror Randomized Benchmarking.
3
3
  """
4
4
 
5
- from time import strftime
5
+ from time import strftime, time
6
6
  from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Type
7
7
  import warnings
8
8
 
@@ -592,6 +592,8 @@ class MirrorRandomizedBenchmarking(Benchmark):
592
592
  # Submit jobs for all qubit layouts
593
593
  all_mrb_jobs: List[Dict[str, Any]] = []
594
594
  time_circuit_generation: Dict[str, float] = {}
595
+ total_submit: float = 0
596
+ total_retrieve: float = 0
595
597
 
596
598
  # The depths should be assigned to each set of qubits!
597
599
  # The real final MRB depths are twice the originally specified, must be taken into account here!
@@ -653,7 +655,9 @@ class MirrorRandomizedBenchmarking(Benchmark):
653
655
 
654
656
  # Submit
655
657
  sorted_transpiled_qc_list = {tuple(qubits): mrb_transpiled_circuits_lists[depth]}
658
+ t_start = time()
656
659
  all_mrb_jobs.append(self.submit_single_mrb_job(backend, qubits, depth, sorted_transpiled_qc_list))
660
+ total_retrieve += time() - t_start
657
661
  qcvv_logger.info(f"Job for layout {qubits} & depth {depth} submitted successfully!")
658
662
 
659
663
  self.untranspiled_circuits.circuit_groups.append(
@@ -675,6 +679,7 @@ class MirrorRandomizedBenchmarking(Benchmark):
675
679
  )
676
680
  # Retrieve all job meta data
677
681
  all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
682
+ total_retrieve += time_retrieve
678
683
  # Export all to dataset
679
684
  dataset.attrs[qubit_idx[str(qubits)]].update(
680
685
  {
@@ -690,6 +695,8 @@ class MirrorRandomizedBenchmarking(Benchmark):
690
695
  qcvv_logger.info(f"Adding counts of qubits {qubits} and depth {depth} run to the dataset")
691
696
  dataset, _ = add_counts_to_dataset(execution_results, f"qubits_{str(qubits)}_depth_{str(depth)}", dataset)
692
697
 
698
+ dataset.attrs["total_submit_time"] = total_submit
699
+ dataset.attrs["total_retrieve_time"] = total_retrieve
693
700
  self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
694
701
 
695
702
  qcvv_logger.info(f"MRB experiment execution concluded !")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: iqm-benchmarks
3
- Version: 2.48
3
+ Version: 2.49
4
4
  Summary: A package for implementation of Quantum Characterization, Verification and Validation (QCVV) techniques on IQM's hardware at gate level abstraction
5
5
  Author-email: IQM Finland Oy <developers@meetiqm.com>, Adrian Auer <adrian.auer@meetiqm.com>, Raphael Brieger <raphael.brieger@meetiqm.com>, Alessio Calzona <alessio.calzona@meetiqm.com>, Pedro Figueroa Romero <pedro.romero@meetiqm.com>, Amin Hosseinkhani <amin.hosseinkhani@meetiqm.com>, Miikka Koistinen <miikka@meetiqm.com>, Nadia Milazzo <nadia.milazzo@meetiqm.com>, Vicente Pina Canelles <vicente.pina@meetiqm.com>, Aniket Rath <aniket.rath@meetiqm.com>, Jami Rönkkö <jami@meetiqm.com>, Stefan Seegerer <stefan.seegerer@meetiqm.com>
6
6
  Project-URL: Homepage, https://github.com/iqm-finland/iqm-benchmarks
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes