iqm-benchmarks 2.48__tar.gz → 2.50__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/CHANGELOG.rst +8 -0
  2. {iqm_benchmarks-2.48/src/iqm_benchmarks.egg-info → iqm_benchmarks-2.50}/PKG-INFO +3 -3
  3. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/pyproject.toml +2 -2
  4. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/requirements.txt +107 -18
  5. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/coherence/coherence.py +12 -5
  6. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/compressive_gst/compressive_gst.py +14 -5
  7. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/entanglement/ghz.py +8 -2
  8. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/entanglement/graph_states.py +6 -3
  9. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/optimization/qscore.py +11 -4
  10. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/quantum_volume/quantum_volume.py +8 -3
  11. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/clifford_rb/clifford_rb.py +10 -1
  12. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/direct_rb/direct_rb.py +10 -3
  13. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py +11 -1
  14. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py +8 -1
  15. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50/src/iqm_benchmarks.egg-info}/PKG-INFO +3 -3
  16. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm_benchmarks.egg-info/requires.txt +2 -2
  17. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/.github/workflows/main.yml +0 -0
  18. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/.github/workflows/publish.yml +0 -0
  19. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/.github/workflows/tag_and_release.yml +0 -0
  20. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/.gitignore +0 -0
  21. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/LICENSE +0 -0
  22. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/MANIFEST.in +0 -0
  23. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/README.md +0 -0
  24. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/benchmark_runner.py +0 -0
  25. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docbuild +0 -0
  26. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/API.rst +0 -0
  27. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/Makefile +0 -0
  28. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/_static/images/favicon.ico +0 -0
  29. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/_static/images/logo.png +0 -0
  30. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/_templates/autosummary-class-template.rst +0 -0
  31. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/_templates/autosummary-module-template.rst +0 -0
  32. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/changelog.rst +0 -0
  33. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/conf.py +0 -0
  34. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/development/development.rst +0 -0
  35. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/development/generate_2qubit_cliffords.ipynb +0 -0
  36. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/development/how_to_make_your_own_benchmark.ipynb +0 -0
  37. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/devices/devices.rst +0 -0
  38. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/devices/spark.ipynb +0 -0
  39. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/devices/star.ipynb +0 -0
  40. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/examples/example_clifford_rb.ipynb +0 -0
  41. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/examples/example_clops.ipynb +0 -0
  42. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/examples/example_coherence.ipynb +0 -0
  43. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/examples/example_eplg.ipynb +0 -0
  44. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/examples/example_experiment_all.ipynb +0 -0
  45. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/examples/example_ghz.ipynb +0 -0
  46. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/examples/example_graphstate.ipynb +0 -0
  47. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/examples/example_gst.ipynb +0 -0
  48. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/examples/example_interleaved_rb.ipynb +0 -0
  49. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/examples/example_mirror_rb.ipynb +0 -0
  50. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/examples/example_qscore.ipynb +0 -0
  51. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/examples/example_quantum_volume.ipynb +0 -0
  52. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/examples/examples.rst +0 -0
  53. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/index.rst +0 -0
  54. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/license.rst +0 -0
  55. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/docs/readme.md +0 -0
  56. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/format +0 -0
  57. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/scheduled_experiments/adonis/__init__.py +0 -0
  58. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/scheduled_experiments/adonis/weekly.py +0 -0
  59. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/setup.cfg +0 -0
  60. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/__init__.py +0 -0
  61. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/benchmark.py +0 -0
  62. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/benchmark_definition.py +0 -0
  63. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/circuit_containers.py +0 -0
  64. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/coherence/__init__.py +0 -0
  65. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/compressive_gst/__init__.py +0 -0
  66. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/compressive_gst/gst_analysis.py +0 -0
  67. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/entanglement/__init__.py +0 -0
  68. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/logging_config.py +0 -0
  69. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/optimization/__init__.py +0 -0
  70. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/quantum_volume/__init__.py +0 -0
  71. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/quantum_volume/clops.py +0 -0
  72. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/__init__.py +0 -0
  73. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/clifford_1q.pkl +0 -0
  74. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/clifford_2q.pkl +0 -0
  75. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/clifford_rb/__init__.py +0 -0
  76. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/direct_rb/__init__.py +0 -0
  77. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/eplg/__init__.py +0 -0
  78. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/eplg/eplg.py +0 -0
  79. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/interleaved_rb/__init__.py +0 -0
  80. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/mirror_rb/__init__.py +0 -0
  81. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/multi_lmfit.py +0 -0
  82. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/randomized_benchmarking/randomized_benchmarking_common.py +0 -0
  83. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/readout_mitigation.py +0 -0
  84. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/utils.py +0 -0
  85. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/utils_plots.py +0 -0
  86. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm/benchmarks/utils_shadows.py +0 -0
  87. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm_benchmarks.egg-info/SOURCES.txt +0 -0
  88. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm_benchmarks.egg-info/dependency_links.txt +0 -0
  89. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/iqm_benchmarks.egg-info/top_level.txt +0 -0
  90. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/mGST/LICENSE +0 -0
  91. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/mGST/README.md +0 -0
  92. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/mGST/additional_fns.py +0 -0
  93. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/mGST/algorithm.py +0 -0
  94. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/mGST/compatibility.py +0 -0
  95. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/mGST/low_level_jit.py +0 -0
  96. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/mGST/optimization.py +0 -0
  97. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/mGST/qiskit_interface.py +0 -0
  98. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/mGST/reporting/figure_gen.py +0 -0
  99. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/src/mGST/reporting/reporting.py +0 -0
  100. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/tag-from-pipeline.sh +0 -0
  101. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/test +0 -0
  102. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/tests/test_coherence.py +0 -0
  103. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/tests/test_ghz.py +0 -0
  104. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/tests/test_graph_states.py +0 -0
  105. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/tests/test_gst.py +0 -0
  106. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/tests/test_qscore.py +0 -0
  107. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/tests/test_qv.py +0 -0
  108. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/tests/test_rb.py +0 -0
  109. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/tests/unit/test_backend_transpilation.py +0 -0
  110. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/tests/unit/test_benchmark_circuit.py +0 -0
  111. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/tests/unit/test_submit_execute.py +0 -0
  112. {iqm_benchmarks-2.48 → iqm_benchmarks-2.50}/update-requirements.py +0 -0
@@ -2,6 +2,14 @@
2
2
  Changelog
3
3
  =========
4
4
 
5
+ Version 2.50
6
+ ============
7
+ * Update iqm-client and iqm-station-control-client dependency versions as part of IQM OS 4.3 release.
8
+
9
+ Version 2.49
10
+ ============
11
+ * Added logging of execution time to all benchmarks.
12
+
5
13
  Version 2.48
6
14
  ============
7
15
  * Updated iqm-client and supported python versions.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: iqm-benchmarks
3
- Version: 2.48
3
+ Version: 2.50
4
4
  Summary: A package for implementation of Quantum Characterization, Verification and Validation (QCVV) techniques on IQM's hardware at gate level abstraction
5
5
  Author-email: IQM Finland Oy <developers@meetiqm.com>, Adrian Auer <adrian.auer@meetiqm.com>, Raphael Brieger <raphael.brieger@meetiqm.com>, Alessio Calzona <alessio.calzona@meetiqm.com>, Pedro Figueroa Romero <pedro.romero@meetiqm.com>, Amin Hosseinkhani <amin.hosseinkhani@meetiqm.com>, Miikka Koistinen <miikka@meetiqm.com>, Nadia Milazzo <nadia.milazzo@meetiqm.com>, Vicente Pina Canelles <vicente.pina@meetiqm.com>, Aniket Rath <aniket.rath@meetiqm.com>, Jami Rönkkö <jami@meetiqm.com>, Stefan Seegerer <stefan.seegerer@meetiqm.com>
6
6
  Project-URL: Homepage, https://github.com/iqm-finland/iqm-benchmarks
@@ -19,8 +19,8 @@ Requires-Dist: networkx<4.0,>=3.3
19
19
  Requires-Dist: rustworkx>=0.16.0
20
20
  Requires-Dist: numpy<2.0,>=1.25.2
21
21
  Requires-Dist: qiskit<=1.4.2,>=1.2.4
22
- Requires-Dist: iqm-client[qiskit]<31.0,>=30.1.0
23
- Requires-Dist: iqm-station-control-client>=9.3.0
22
+ Requires-Dist: iqm-client[qiskit]<33.0,>=32.1.1
23
+ Requires-Dist: iqm-station-control-client<12.0,>=11.3.1
24
24
  Requires-Dist: requests<3.0,>=2.32.3
25
25
  Requires-Dist: scikit-optimize<0.11.0,>=0.10.2
26
26
  Requires-Dist: tabulate<1.0.0,>=0.9.0
@@ -44,8 +44,8 @@ dependencies = [
44
44
  "rustworkx>=0.16.0",
45
45
  "numpy >= 1.25.2, < 2.0",
46
46
  "qiskit >= 1.2.4, <= 1.4.2",
47
- "iqm-client[qiskit] >=30.1.0, <31.0",
48
- "iqm-station-control-client >= 9.3.0",
47
+ "iqm-client[qiskit] >=32.1.1, <33.0",
48
+ "iqm-station-control-client >= 11.3.1, <12.0",
49
49
  "requests>=2.32.3, < 3.0",
50
50
  "scikit-optimize >= 0.10.2, < 0.11.0",
51
51
  "tabulate >= 0.9.0, <1.0.0",
@@ -824,18 +824,21 @@ ipykernel==6.29.5 \
824
824
  ipython==8.31.0 \
825
825
  --hash=sha256:46ec58f8d3d076a61d128fe517a51eb730e3aaf0c184ea8c17d16e366660c6a6 \
826
826
  --hash=sha256:b6a2274606bec6166405ff05e54932ed6e5cfecaca1fc05f2cacde7bb074d70b
827
- iqm-client==30.2.0 \
828
- --hash=sha256:1bad4c4284fff640b4476e60a87bbf50ed34a41f23da129c4e207ecdf13148ed \
829
- --hash=sha256:2b34186ee140accace743bbc489ce0a1b65de836328ab9492e735b45e5389759
830
- iqm-data-definitions==2.13 \
831
- --hash=sha256:0de52e2a1ffdc63375b13825b28170d0868b12cda011e782b9348bb143083a0e \
832
- --hash=sha256:be6a71ab2e5b04c6e6c4d53094c89c088cc4b6e8e293f412f9fa937cf8dc980c
833
- iqm-exa-common==26.24.0 \
834
- --hash=sha256:92e880913589015b78baf8e0267f82cadc8a52cb090c8e7b5226d1d9a8ddbd26 \
835
- --hash=sha256:fc3edd89b501961b3ad034f4c37983bb879735626df4f342ea0edcab3bce8182
836
- iqm-station-control-client==9.18.0 \
837
- --hash=sha256:280aa5d150f034753428cfbcae6be57ecf44cded2af2c05b180b52b18a8b4693 \
838
- --hash=sha256:6425fbd787a3f28eac3529c7212cb795c1db3939edfb8473019f132021b9b11d
827
+ iqm-client==32.1.1 \
828
+ --hash=sha256:556f8bd5c19fa030c7f1795dcc5a15c93983baf5997a715785a332002846719d \
829
+ --hash=sha256:e0b52425822b05df4517b9194c3c1195c964627ab3c0e1ee90793e4ea1c40ce6
830
+ iqm-data-definitions==2.19 \
831
+ --hash=sha256:021699403ada0a326b69f705ea9f326d4caa69ca0e8ed84e186d9b3fd62f3edf \
832
+ --hash=sha256:248022127322a9c786143f8226716061b90231895274add8e6a5adc1f85d2409
833
+ iqm-exa-common==27.3.1 \
834
+ --hash=sha256:0bf0f3a47111b5feaa98694ea6b576ed56e3d7a29ee0b4987fb2068992205ae4 \
835
+ --hash=sha256:2cc552e318a0161e8151c50ba896c452c295bb60860dc6032044931226057cb2
836
+ iqm-pulse==12.6.1 \
837
+ --hash=sha256:1b909c6fff268df100e120c3d889acfaabd40d7b8e7444211a9beb984822db78 \
838
+ --hash=sha256:c61f7b58354682092cd985182c409c9fd7fa58cc4a39cfa0f871698b1502bf83
839
+ iqm-station-control-client==11.3.1 \
840
+ --hash=sha256:56b409da8f5107ae6a2687d33746400650b53965a21f3bd905ca06aa3bf97d39 \
841
+ --hash=sha256:69640fd95585b749ba1223b0fce817bec9474f0e05aef6fc90a3cec225565d1e
839
842
  isoduration==20.11.0 \
840
843
  --hash=sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9 \
841
844
  --hash=sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042
@@ -845,9 +848,9 @@ isort==5.13.2 \
845
848
  jedi==0.19.2 \
846
849
  --hash=sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0 \
847
850
  --hash=sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9
848
- jinja2==3.1.5 \
849
- --hash=sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb \
850
- --hash=sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb
851
+ jinja2==3.0.3 \
852
+ --hash=sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8 \
853
+ --hash=sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7
851
854
  joblib==1.4.2 \
852
855
  --hash=sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6 \
853
856
  --hash=sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e
@@ -1290,6 +1293,9 @@ numpy==1.26.4 \
1290
1293
  --hash=sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef \
1291
1294
  --hash=sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3 \
1292
1295
  --hash=sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f
1296
+ numpy-typing-compat==20250818.1.25 \
1297
+ --hash=sha256:4f91427369583074b236c804dd27559134f08ec4243485034c8e7d258cbd9cd3 \
1298
+ --hash=sha256:8ff461725af0b436e9b0445d07712f1e6e3a97540a3542810f65f936dcc587a5
1293
1299
  opentelemetry-api==1.25.0 \
1294
1300
  --hash=sha256:757fa1aa020a0f8fa139f8959e53dec2051cc26b832e76fa839a6d76ecefd737 \
1295
1301
  --hash=sha256:77c4985f62f2614e42ce77ee4c9da5fa5f0bc1e1821085e9a47533a9323ae869
@@ -1314,6 +1320,9 @@ opentelemetry-sdk==1.25.0 \
1314
1320
  opentelemetry-semantic-conventions==0.46b0 \
1315
1321
  --hash=sha256:6daef4ef9fa51d51855d9f8e0ccd3a1bd59e0e545abe99ac6203804e36ab3e07 \
1316
1322
  --hash=sha256:fbc982ecbb6a6e90869b15c1673be90bd18c8a56ff1cffc0864e38e2edffaefa
1323
+ optype==0.14.0 \
1324
+ --hash=sha256:50d02edafd04edf2e5e27d6249760a51b2198adb9f6ffd778030b3d2806b026b \
1325
+ --hash=sha256:925cf060b7d1337647f880401f6094321e7d8e837533b8e159b9a92afa3157c6
1317
1326
  orjson==3.10.14 \
1318
1327
  --hash=sha256:0293a88815e9bb5c90af4045f81ed364d982f955d12052d989d844d6c4e50945 \
1319
1328
  --hash=sha256:03f61ca3674555adcb1aa717b9fc87ae936aa7a63f6aba90a474a88701278780 \
@@ -1838,6 +1847,80 @@ python-dotenv==0.21.1 \
1838
1847
  python-json-logger==3.2.1 \
1839
1848
  --hash=sha256:8eb0554ea17cb75b05d2848bc14fb02fbdbd9d6972120781b974380bfa162008 \
1840
1849
  --hash=sha256:cdc17047eb5374bd311e748b42f99d71223f3b0e186f4206cc5d52aefe85b090
1850
+ python-rapidjson==1.20 \
1851
+ --hash=sha256:00183c4938cd491b98b1a43626bc5a381842ceba87644cb91b25555f3fc3c0bf \
1852
+ --hash=sha256:026077b663acf93a3f2b1adb87282e611a30214b8ae8001b7e4863a3b978e646 \
1853
+ --hash=sha256:05e28c3dbb4a0d74ec13af9668ef2b9f302edf83cf7ce1d8316a95364720eec0 \
1854
+ --hash=sha256:06ee7bcf660ebbdf1953aa7bf74214b722d934928c7b9f2a23b12e0713b61fa4 \
1855
+ --hash=sha256:083df379c769b30f9bc40041c91fd9d8f7bb8ca2b3c7170258842aced2098e05 \
1856
+ --hash=sha256:0c3f7085c52259c56af72462df7620c3b8bb95575fd9b8c3a073728855e93269 \
1857
+ --hash=sha256:115f08c86d2df7543c02605e77c84727cdabc4b08310d2f097e953efeaaa73eb \
1858
+ --hash=sha256:1446e902b6c781f271bf8556da636c1375cbb208e25f92e1af4cc2d92cf0cf15 \
1859
+ --hash=sha256:1c0303bd445312a78485a9adba06dfdb84561c5157a9cda7999fefb36df4c6cc \
1860
+ --hash=sha256:1fc3bba6632ecffeb1897fdf98858dc50a677237f4241853444c70a041158a90 \
1861
+ --hash=sha256:225bd4cbabfe7910261cbcebb8b811d4ff98e90cdd17c233b916c6aa71a9553f \
1862
+ --hash=sha256:303b079ef268a996242be51ae80c8b563ee2d73489ab4f16199fef2216e80765 \
1863
+ --hash=sha256:328095d6d558090c29d24d889482b10dcc3ade3b77c93a61ea86794623046628 \
1864
+ --hash=sha256:368ecdf4031abbde9c94aac40981d9a1238e6bcfef9fbfee441047b4757d6033 \
1865
+ --hash=sha256:3a6620eed0b04196f37fab7048c1d672d03391bb29d7f09ee8fee8dea33f11f4 \
1866
+ --hash=sha256:3e963e78fff6ab5ab2ae847b65683774c48b9b192307380f2175540d6423fd73 \
1867
+ --hash=sha256:403e4986484f01f79fdce00b48c12a1b39d16e822cd37c60843ab26455ab0680 \
1868
+ --hash=sha256:4099cb9eae8a0ce19c09e02729eb6d69d5180424f13a2641a6c407d053e47a82 \
1869
+ --hash=sha256:425c2bb8e778a04497953482c251944b2736f61012d897f17b73da3eca060c27 \
1870
+ --hash=sha256:4355bcfc8629d15f6246011b40e84cc368d842518a91adb15c5eba211305ee5b \
1871
+ --hash=sha256:488d0c6155004b5177225eaf331bb1838616da05ae966dd24a7d442751c1d193 \
1872
+ --hash=sha256:4c680cd2b4de760ff6875de71fe6a87bd610aa116593d62e4f81a563be86ae18 \
1873
+ --hash=sha256:4f7cbbff9696ea01dd8a29502cb314471c9a5d4239f2f3b7e35b6adbde2cc620 \
1874
+ --hash=sha256:599ab208ccf6172d6cfac1abe048c837e62612f91f97d198e32773c45346a0b4 \
1875
+ --hash=sha256:5adcef7a27abafbb2b3d0b02c822dfd9b4b329769cb97810b7f9733e1fda0498 \
1876
+ --hash=sha256:5d3be149ce5475f9605f01240487541057792abad94d3fd0cd56af363cf5a4dc \
1877
+ --hash=sha256:5f55531c8197cb7a21a5ef0ffa46f2b8fc8c5fe7c6fd08bdbd2063ae65d2ff65 \
1878
+ --hash=sha256:6056fcc8caeb9b04775bf655568bba362c7670ab792c1b438671bb056db954cd \
1879
+ --hash=sha256:632acb2dfa29883723e24bb2ce47c726edd5f672341553a5184db68f78d3bd09 \
1880
+ --hash=sha256:6355cb690bf64629767206524d4d00da909970d46d8fc0b367f339975e4eb419 \
1881
+ --hash=sha256:69e702fe74fe8c44c6253bb91364a270dc49f704920c90e01040155bd600a5fd \
1882
+ --hash=sha256:6c9f813a37d1f708a221f1f7d8c97c437d10597261810c1d3b52cf8f248d66c0 \
1883
+ --hash=sha256:6cb3ad353ec083a6dcf0552f1fce3c490f92e2fccf9a81eac42835297a8431a1 \
1884
+ --hash=sha256:7444bc7e6a04c03d6ed748b5dab0798fa2b3f2b303be8c38d3af405b2cac6d63 \
1885
+ --hash=sha256:7d36aab758bfb1b59e0a849cd20e971eda951a04d3586bb5f6cb460bfc7c103d \
1886
+ --hash=sha256:7dd9c5e661d17eafa44b2875f6ce55178cc87388575ce3cd3c606d5a33772b49 \
1887
+ --hash=sha256:7f7b6574887d8828f34eb3384092d6e6c290e8fbb12703c409dbdde814612657 \
1888
+ --hash=sha256:83a48f96d0abb8349a4d42f029259b755d8c6fd347f5de2d640e164c3f45e63b \
1889
+ --hash=sha256:871f2eeb0907f3d7ab09efe04c5b5e2886c275ea568f7867c97468ae14cdd52f \
1890
+ --hash=sha256:87aa0b01b8c20984844f1440b8ff6bdb32de911a1750fed344b9daed33b4b52b \
1891
+ --hash=sha256:884e1dd4c0770ed424737941af4d5dc9014995f9c33595f151af13f83ce282c3 \
1892
+ --hash=sha256:8fc52405435ce875aa000afa2637ea267eb0d4ab9622f9b97c92d92cb1a9c440 \
1893
+ --hash=sha256:924f9ea302494d4a4d540d3509f8f1f15622ea7d614c6f29df3188d52c6cb546 \
1894
+ --hash=sha256:9831430f17101a6a249e07db9c42d26c3263e6009450722cce0c14726421f434 \
1895
+ --hash=sha256:9df543521fa4b69589c42772b2f32a6c334b3b5fc612cd6dc3705136d0788da3 \
1896
+ --hash=sha256:9e431a7afc77aa874fed537c9f6bf5fcecaef124ebeae2a2379d3b9e9adce74b \
1897
+ --hash=sha256:a2b624b3613fb7b8dfef4adc709bf39489be8c655cd9d24dc4e2cc16fc5def83 \
1898
+ --hash=sha256:a5fb413414b92763a54d53b732df3c9de1b114012c8881a3d1215a19b9fca494 \
1899
+ --hash=sha256:b0d07d4f0ebbb2228d5140463f11ac519147b9d791f7e40b3edf518a806be3cc \
1900
+ --hash=sha256:b733978ecd84fc5df9a778ce821dc1f3113f7bfc2493cac0bb17efb4ae0bb8fa \
1901
+ --hash=sha256:b7c0408e7f52f32cf4bdd5aa305f005914b0143cac69d42575e2d40e8678cd72 \
1902
+ --hash=sha256:b9399ad75a2e3377f9e6208caabe73eb9354cd01b732407475ccadcd42c577df \
1903
+ --hash=sha256:b9496b1e9d6247e8802ac559b7eebb5f3cae426d1c1dbde4049c63dff0941370 \
1904
+ --hash=sha256:bc79d7f00f7538e027960ca6bcd1e03ed99fcf660d4d882d1c22f641155d0db0 \
1905
+ --hash=sha256:bd978c7669cc844f669a48d2a6019fb9134a2385536f806fe265a1e374c3573a \
1906
+ --hash=sha256:bef1eca712fb9fd5d2edd724dd1dd8a608215d6afcaee4f351b3e99e3f73f720 \
1907
+ --hash=sha256:bf3c0e2a5b97b0d07311f15f0dce4434e43dec865c3794ad1b10d968460fd665 \
1908
+ --hash=sha256:c05c8602c019cc0db19601fdc4927755a9d33f21d01beb3d5767313d7a81360d \
1909
+ --hash=sha256:c2f85da53286e67778d4061ef32ff44ca9b5f945030463716e046ee8985319f8 \
1910
+ --hash=sha256:c60121d155562dc694c05ed7df4e39e42ee1d3adff2a060c64a004498e6451f7 \
1911
+ --hash=sha256:ce4cee141c924300cbedba1e5bea05b13484598d1e550afc5b50209ba73c62f2 \
1912
+ --hash=sha256:d87041448cec00e2db5d858625a76dc1b59eef6691a039acff6d92ad8581cfc1 \
1913
+ --hash=sha256:daee815b4c20ca6e4dbc6bde373dd3f65b53813d775f1c94b765b33b402513a7 \
1914
+ --hash=sha256:ddb63eff401ce7cf20cdd5e21942fc23fbe0e1dc1d96d7ae838645fb1f74fb47 \
1915
+ --hash=sha256:e3f89a58d7709d5879586e9dbfd11be76a799e8fbdbb5eddaffaeba9b572fba3 \
1916
+ --hash=sha256:e5774c905034362298312116f9b58c181e91a09800e4e5cede7b3d460a6a9fde \
1917
+ --hash=sha256:e8064b8edb57ddd9e3ffa539cf2ec2f03515751fb0698b40ba5cb66a2123af19 \
1918
+ --hash=sha256:ec17a18df700e1f956fc5a0c41cbb3cc746c44c0fef38988efba9b2cb607ecfa \
1919
+ --hash=sha256:eeaa8487fdd8db409bd2e0c41c59cee3b9f1d08401fc75520f7d35c7a22d8789 \
1920
+ --hash=sha256:f510ffe32fec319699f0c1ea9cee5bde47c33202b034b85c5d1b9ace682aa96a \
1921
+ --hash=sha256:f974c4e11be833221062fc4c3129bed172082792b33ef9fc1b8104f49c514f1d \
1922
+ --hash=sha256:fbff5caf127c5bed4d6620f95a039dd9e293784d844af50782aaf278a743acb4 \
1923
+ --hash=sha256:fc7a095f77eb3bb6acff94acf868a100faaf06028c4b513428f161cd55030476
1841
1924
  pytz==2024.2 \
1842
1925
  --hash=sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a \
1843
1926
  --hash=sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725
@@ -2382,6 +2465,9 @@ scipy==1.15.1 \
2382
2465
  --hash=sha256:ce3a000cd28b4430426db2ca44d96636f701ed12e2b3ca1f2b1dd7abdd84b39a \
2383
2466
  --hash=sha256:f735bc41bd1c792c96bc426dece66c8723283695f02df61dcc4d0a707a42fc54 \
2384
2467
  --hash=sha256:f82fcf4e5b377f819542fbc8541f7b5fbcf1c0017d0df0bc22c781bf60abc4d8
2468
+ scipy-stubs==1.16.3.0 \
2469
+ --hash=sha256:90e5d82ced2183ef3c5c0a28a77df8cc227458624364fa0ff975ad24fa89d6ad \
2470
+ --hash=sha256:d6943c085e47a1ed431309f9ca582b6a206a9db808a036132a0bf01ebc34b506
2385
2471
  scs==3.2.7.post2 \
2386
2472
  --hash=sha256:0427a5bf9aa43eb2a22083e1a43412db5054a88d695fdaa6018cd6fb3a9f0203 \
2387
2473
  --hash=sha256:05da761821a14b8ebe54427510cbe10722b3433db4e953acd7a067893e955781 \
@@ -2429,9 +2515,9 @@ send2trash==1.8.3 \
2429
2515
  setuptools==75.8.0 \
2430
2516
  --hash=sha256:c5afc8f407c626b8313a86e10311dd3f661c6cd9c09d4bf8c15c0e11f9f2b0e6 \
2431
2517
  --hash=sha256:e3982f444617239225d675215d51f6ba05f845d4eec313da4418fdbb56fb27e3
2432
- six==1.17.0 \
2433
- --hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \
2434
- --hash=sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81
2518
+ six==1.16.0 \
2519
+ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
2520
+ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
2435
2521
  sniffio==1.3.1 \
2436
2522
  --hash=sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2 \
2437
2523
  --hash=sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc
@@ -2653,6 +2739,9 @@ types-python-dateutil==2.9.0.20241206 \
2653
2739
  types-requests==2.32.4.20250913 \
2654
2740
  --hash=sha256:78c9c1fffebbe0fa487a418e0fa5252017e9c60d1a2da394077f1780f655d7e1 \
2655
2741
  --hash=sha256:abd6d4f9ce3a9383f269775a9835a4c24e5cd6b9f647d64f88aa4613c33def5d
2742
+ types-six==1.17.0.20251009 \
2743
+ --hash=sha256:2494f4c2a58ada0edfe01ea84b58468732e43394c572d9cf5b1dd06d86c487a3 \
2744
+ --hash=sha256:efe03064ecd0ffb0f7afe133990a2398d8493d8d1c1cc10ff3dfe476d57ba44f
2656
2745
  types-tqdm==4.67.0.20250516 \
2657
2746
  --hash=sha256:1dd9b2c65273f2342f37e5179bc6982df86b6669b3376efc12aef0a29e35d36d \
2658
2747
  --hash=sha256:230ccab8a332d34f193fc007eb132a6ef54b4512452e718bf21ae0a7caeb5a6b
@@ -422,6 +422,8 @@ class CoherenceBenchmark(Benchmark):
422
422
  ) -> xr.Dataset:
423
423
  """Executes the benchmark."""
424
424
  self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
425
+ total_submit: float = 0
426
+ total_retrieve: float = 0
425
427
 
426
428
  dataset = xr.Dataset()
427
429
  self.add_all_meta_to_dataset(dataset)
@@ -457,7 +459,7 @@ class CoherenceBenchmark(Benchmark):
457
459
  qcvv_logger.debug(f"Executing on {self.coherence_exp}.")
458
460
  qcvv_logger.setLevel(logging.WARNING)
459
461
 
460
- jobs, _ = submit_execute(
462
+ jobs, time_submit = submit_execute(
461
463
  sorted_transpiled_qc_list,
462
464
  self.backend,
463
465
  self.shots,
@@ -466,9 +468,11 @@ class CoherenceBenchmark(Benchmark):
466
468
  max_circuits_per_batch=self.configuration.max_circuits_per_batch,
467
469
  circuit_compilation_options=self.circuit_compilation_options,
468
470
  )
471
+ total_submit += time_submit
469
472
 
470
473
  qcvv_logger.setLevel(logging.INFO)
471
- execution_results = retrieve_all_counts(jobs)[0]
474
+ execution_results, time_retrieve = retrieve_all_counts(jobs)
475
+ total_retrieve += time_retrieve
472
476
  identifier = BenchmarkObservationIdentifier(qubit_set)
473
477
  dataset, _ = add_counts_to_dataset(execution_results, identifier.string_identifier, dataset)
474
478
  dataset.attrs.update(
@@ -494,8 +498,7 @@ class CoherenceBenchmark(Benchmark):
494
498
  # Execute on the backend
495
499
  if self.configuration.use_dd is True:
496
500
  raise ValueError("Coherence benchmarks should not be run with dynamical decoupling.")
497
-
498
- jobs, _ = submit_execute(
501
+ jobs, time_submit = submit_execute(
499
502
  sorted_transpiled_qc_list,
500
503
  self.backend,
501
504
  self.shots,
@@ -504,8 +507,10 @@ class CoherenceBenchmark(Benchmark):
504
507
  max_circuits_per_batch=self.configuration.max_circuits_per_batch,
505
508
  circuit_compilation_options=self.circuit_compilation_options,
506
509
  )
510
+ total_submit += time_submit
507
511
  qcvv_logger.setLevel(logging.INFO)
508
- execution_results = retrieve_all_counts(jobs)[0]
512
+ execution_results, time_retrieve = retrieve_all_counts(jobs)
513
+ total_retrieve += time_retrieve
509
514
  identifier = BenchmarkObservationIdentifier(group)
510
515
  dataset, _ = add_counts_to_dataset(execution_results, identifier.string_identifier, dataset)
511
516
 
@@ -524,6 +529,8 @@ class CoherenceBenchmark(Benchmark):
524
529
  self.transpiled_circuits.circuit_groups.append(
525
530
  CircuitGroup(name=self.coherence_exp, circuits=transpiled_qc_list)
526
531
  )
532
+ dataset.attrs["total_submit_time"] = total_submit
533
+ dataset.attrs["total_retrieve_time"] = total_retrieve
527
534
 
528
535
  return dataset
529
536
 
@@ -32,6 +32,7 @@ from typing import Any, Dict, List, Tuple, Type, Union
32
32
  import numpy as np
33
33
  from qiskit.circuit.library import CZGate, RGate
34
34
  import xarray as xr
35
+ from time import strftime
35
36
 
36
37
  from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
37
38
  from iqm.benchmarks.benchmark_definition import Benchmark, BenchmarkObservationIdentifier, add_counts_to_dataset
@@ -242,7 +243,9 @@ class CompressiveGST(Benchmark):
242
243
  """
243
244
  The main GST execution routine
244
245
  """
245
-
246
+ self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
247
+ total_submit: float = 0
248
+ total_retrieve: float = 0
246
249
  dataset = xr.Dataset()
247
250
  qcvv_logger.info(f"Generating {self.configuration.num_circuits} random GST circuits")
248
251
 
@@ -255,7 +258,7 @@ class CompressiveGST(Benchmark):
255
258
  transpiled_circuit_dict = {
256
259
  tuple(range(self.backend.num_qubits)): transpiled_circuits[str(self.qubit_layouts[0])].circuits
257
260
  }
258
- all_jobs_parallel, _ = submit_execute(
261
+ all_jobs_parallel, time_submit = submit_execute(
259
262
  transpiled_circuit_dict,
260
263
  backend,
261
264
  self.configuration.shots,
@@ -264,15 +267,17 @@ class CompressiveGST(Benchmark):
264
267
  max_circuits_per_batch=self.configuration.max_circuits_per_batch,
265
268
  circuit_compilation_options=self.circuit_compilation_options,
266
269
  )
270
+ total_submit += time_submit
267
271
  # Retrieve
268
272
  qcvv_logger.info(f"Now executing the corresponding circuit batch")
269
- counts, _ = retrieve_all_counts(all_jobs_parallel)
273
+ counts, time_retrieve = retrieve_all_counts(all_jobs_parallel)
274
+ total_retrieve += time_retrieve
270
275
  dataset, _ = add_counts_to_dataset(counts, f"parallel_results", dataset)
271
276
  else:
272
277
  all_jobs: Dict = {}
273
278
  for qubit_layout in self.qubit_layouts:
274
279
  transpiled_circuit_dict = {tuple(qubit_layout): transpiled_circuits[str(qubit_layout)].circuits}
275
- all_jobs[str(qubit_layout)], _ = submit_execute(
280
+ all_jobs[str(qubit_layout)], time_submit = submit_execute(
276
281
  transpiled_circuit_dict,
277
282
  backend,
278
283
  self.configuration.shots,
@@ -280,14 +285,18 @@ class CompressiveGST(Benchmark):
280
285
  max_gates_per_batch=self.configuration.max_gates_per_batch,
281
286
  max_circuits_per_batch=self.configuration.max_circuits_per_batch,
282
287
  )
288
+ total_submit += time_submit
283
289
  # Retrieve all
284
290
  qcvv_logger.info(f"Now executing the corresponding circuit batch")
285
291
  for qubit_layout in self.qubit_layouts:
286
- counts, _ = retrieve_all_counts(all_jobs[str(qubit_layout)])
292
+ counts, time_retrieve = retrieve_all_counts(all_jobs[str(qubit_layout)])
293
+ total_retrieve += time_retrieve
287
294
  dataset, _ = add_counts_to_dataset(counts, str(qubit_layout), dataset)
288
295
 
289
296
  self.circuits.benchmark_circuits = [transpiled_circuits, untranspiled_circuits]
290
297
  self.add_configuration_to_dataset(dataset)
298
+ dataset.attrs["total_submit_time"] = total_submit
299
+ dataset.attrs["total_retrieve_time"] = total_retrieve
291
300
  qcvv_logger.info(f"Run completed")
292
301
  return dataset
293
302
 
@@ -828,6 +828,8 @@ class GHZBenchmark(Benchmark):
828
828
  Executes the benchmark.
829
829
  """
830
830
  self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
831
+ total_submit: float = 0
832
+ total_retrieve: float = 0
831
833
  aux_custom_qubits_array = cast(List[List[int]], self.custom_qubits_array).copy()
832
834
  dataset = xr.Dataset()
833
835
 
@@ -844,7 +846,7 @@ class GHZBenchmark(Benchmark):
844
846
  qubit_count = len(qubit_layout)
845
847
  circuit_group: CircuitGroup = self.generate_readout_circuit(qubit_layout, qubit_count)
846
848
  transpiled_circuit_dict = {tuple(qubit_layout): circuit_group.circuits}
847
- all_jobs[idx], _ = submit_execute(
849
+ all_jobs[idx], time_submit = submit_execute(
848
850
  transpiled_circuit_dict,
849
851
  backend,
850
852
  self.shots,
@@ -853,6 +855,7 @@ class GHZBenchmark(Benchmark):
853
855
  max_circuits_per_batch=self.configuration.max_circuits_per_batch,
854
856
  circuit_compilation_options=self.circuit_compilation_options,
855
857
  )
858
+ total_submit += time_submit
856
859
 
857
860
  # Retrieve all
858
861
  for qubit_layout in aux_custom_qubits_array:
@@ -860,7 +863,8 @@ class GHZBenchmark(Benchmark):
860
863
  Id = BenchmarkObservationIdentifier(qubit_layout)
861
864
  idx = Id.string_identifier
862
865
  qubit_count = len(qubit_layout)
863
- counts, _ = retrieve_all_counts(all_jobs[idx])
866
+ counts, time_retrieve = retrieve_all_counts(all_jobs[idx])
867
+ total_retrieve += time_retrieve
864
868
  dataset, _ = add_counts_to_dataset(counts, idx, dataset)
865
869
  if self.rem:
866
870
  qcvv_logger.info(f"Applying readout error mitigation")
@@ -870,6 +874,8 @@ class GHZBenchmark(Benchmark):
870
874
  dataset, _ = add_counts_to_dataset(rem_results_dist, f"{idx}_rem", dataset)
871
875
 
872
876
  self.add_configuration_to_dataset(dataset)
877
+ dataset.attrs["total_submit_time"] = total_submit
878
+ dataset.attrs["total_retrieve_time"] = total_retrieve
873
879
  return dataset
874
880
 
875
881
 
@@ -1151,6 +1151,8 @@ class GraphStateBenchmark(Benchmark):
1151
1151
  Executes the benchmark.
1152
1152
  """
1153
1153
  self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
1154
+ total_submit: float = 0
1155
+ total_retrieve: float = 0
1154
1156
 
1155
1157
  dataset = xr.Dataset()
1156
1158
  self.add_all_meta_to_dataset(dataset)
@@ -1280,7 +1282,7 @@ class GraphStateBenchmark(Benchmark):
1280
1282
  graph_jobs, time_submit = submit_execute(
1281
1283
  sorted_transpiled_qc_list, backend, self.shots, self.calset_id, self.max_gates_per_batch
1282
1284
  )
1283
-
1285
+ total_submit += time_submit
1284
1286
  all_graph_submit_results.append(
1285
1287
  {
1286
1288
  "unprojected_qubits": unprojected_qubits[idx],
@@ -1295,7 +1297,7 @@ class GraphStateBenchmark(Benchmark):
1295
1297
  unprojected_qubits = job_dict["unprojected_qubits"]
1296
1298
  # Retrieve counts
1297
1299
  execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], identifier=str(unprojected_qubits))
1298
-
1300
+ total_retrieve += time_retrieve
1299
1301
  # Retrieve all job meta data
1300
1302
  all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
1301
1303
 
@@ -1320,7 +1322,8 @@ class GraphStateBenchmark(Benchmark):
1320
1322
  # if self.rem: TODO: add REM functionality
1321
1323
 
1322
1324
  qcvv_logger.info(f"Graph State benchmark experiment execution concluded !")
1323
-
1325
+ dataset.attrs["total_submit_time"] = total_submit
1326
+ dataset.attrs["total_retrieve_time"] = total_retrieve
1324
1327
  return dataset
1325
1328
 
1326
1329
 
@@ -741,6 +741,8 @@ class QScoreBenchmark(Benchmark):
741
741
  ) -> xr.Dataset:
742
742
  """Executes the benchmark."""
743
743
  self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
744
+ total_submit: float = 0
745
+ total_retrieve: float = 0
744
746
 
745
747
  dataset = xr.Dataset()
746
748
  self.add_all_meta_to_dataset(dataset)
@@ -874,7 +876,7 @@ class QScoreBenchmark(Benchmark):
874
876
 
875
877
  sorted_transpiled_qc_list = {tuple(qubit_set): transpiled_qc}
876
878
  # Execute on the backend
877
- jobs, _ = submit_execute(
879
+ jobs, time_submit = submit_execute(
878
880
  sorted_transpiled_qc_list,
879
881
  self.backend,
880
882
  self.shots,
@@ -883,13 +885,15 @@ class QScoreBenchmark(Benchmark):
883
885
  max_circuits_per_batch=self.configuration.max_circuits_per_batch,
884
886
  circuit_compilation_options=self.circuit_compilation_options,
885
887
  )
888
+ total_submit += time_submit
886
889
  qc_transpiled_list.append(transpiled_qc)
887
890
  qcvv_logger.setLevel(logging.INFO)
888
891
  instance_with_edges = set(range(self.num_instances)) - set(no_edge_instances)
889
892
  num_instances_with_edges = len(instance_with_edges)
890
893
  if self.REM:
894
+ counts_retrieved, time_retrieve = retrieve_all_counts(jobs)
891
895
  rem_counts = apply_readout_error_mitigation(
892
- backend, transpiled_qc, retrieve_all_counts(jobs)[0], self.mit_shots
896
+ backend, transpiled_qc, counts_retrieved, self.mit_shots
893
897
  )
894
898
  execution_results.extend(
895
899
  rem_counts[0][instance].nearest_probability_distribution()
@@ -897,8 +901,9 @@ class QScoreBenchmark(Benchmark):
897
901
  )
898
902
  # execution_results.append(rem_distribution)
899
903
  else:
900
- execution_results.extend(retrieve_all_counts(jobs)[0])
901
-
904
+ counts_retrieved, time_retrieve = retrieve_all_counts(jobs)
905
+ execution_results.extend(counts_retrieved)
906
+ total_retrieve += time_retrieve
902
907
  dataset.attrs.update(
903
908
  {
904
909
  num_nodes: {
@@ -921,6 +926,8 @@ class QScoreBenchmark(Benchmark):
921
926
  )
922
927
 
923
928
  self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
929
+ dataset.attrs["total_submit_time"] = total_submit
930
+ dataset.attrs["total_retrieve_time"] = total_retrieve
924
931
 
925
932
  return dataset
926
933
 
@@ -17,7 +17,7 @@ Quantum Volume benchmark
17
17
  """
18
18
 
19
19
  from copy import deepcopy
20
- from time import strftime
20
+ from time import strftime, time
21
21
  from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Type
22
22
 
23
23
  from matplotlib.figure import Figure
@@ -716,6 +716,8 @@ class QuantumVolumeBenchmark(Benchmark):
716
716
  """Executes the benchmark."""
717
717
 
718
718
  self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
719
+ total_submit: float = 0
720
+ total_retrieve: float = 0
719
721
 
720
722
  dataset = xr.Dataset()
721
723
  self.add_all_meta_to_dataset(dataset)
@@ -780,7 +782,9 @@ class QuantumVolumeBenchmark(Benchmark):
780
782
  all_op_counts[str(qubits)] = count_native_gates(backend, transpiled_qc_list)
781
783
 
782
784
  # Submit
785
+ t_start = time()
783
786
  all_qv_jobs.append(self.submit_single_qv_job(backend, qubits, sorted_transpiled_qc_list))
787
+ total_submit += time() - t_start
784
788
  qcvv_logger.info(f"Job for layout {qubits} submitted successfully!")
785
789
 
786
790
  # Retrieve counts of jobs for all qubit layouts
@@ -791,7 +795,7 @@ class QuantumVolumeBenchmark(Benchmark):
791
795
  execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], str(qubits))
792
796
  # Retrieve all job meta data
793
797
  all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
794
-
798
+ total_retrieve += time_retrieve
795
799
  # Export all to dataset
796
800
  dataset.attrs.update(
797
801
  {
@@ -829,7 +833,8 @@ class QuantumVolumeBenchmark(Benchmark):
829
833
  self.mit_shots,
830
834
  )
831
835
  dataset.attrs.update({"REM_quasidistributions": rem_quasidistros})
832
-
836
+ dataset.attrs["total_submit_time"] = total_submit
837
+ dataset.attrs["total_retrieve_time"] = total_retrieve
833
838
  qcvv_logger.info(f"QV experiment execution concluded !")
834
839
  return dataset
835
840
 
@@ -16,7 +16,7 @@
16
16
  'Standard' Clifford Randomized Benchmarking.
17
17
  """
18
18
 
19
- from time import strftime
19
+ from time import strftime, time
20
20
  from typing import Any, Dict, List, Sequence, Type
21
21
 
22
22
  import numpy as np
@@ -252,6 +252,8 @@ class CliffordRandomizedBenchmarking(Benchmark):
252
252
  # Submit jobs for all qubit layouts
253
253
  all_rb_jobs: List[Dict[str, Any]] = []
254
254
  time_circuit_generation: Dict[str, float] = {}
255
+ total_submit: float = 0
256
+ total_retrieve: float = 0
255
257
 
256
258
  # Initialize the variable to contain the circuits for each layout
257
259
  self.untranspiled_circuits = BenchmarkCircuit("untranspiled_circuits")
@@ -290,6 +292,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
290
292
  # Submit all
291
293
  flat_qubits_array = [x for y in self.qubits_array for x in y]
292
294
  sorted_transpiled_qc_list = {tuple(flat_qubits_array): parallel_transpiled_rb_circuits[seq_length]}
295
+ t_start = time()
293
296
  all_rb_jobs.append(
294
297
  submit_parallel_rb_job(
295
298
  backend,
@@ -302,6 +305,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
302
305
  self.configuration.max_circuits_per_batch,
303
306
  )
304
307
  )
308
+ total_submit += time() - t_start
305
309
  qcvv_logger.info(f"Job for sequence length {seq_length} submitted successfully!")
306
310
 
307
311
  self.untranspiled_circuits.circuit_groups.append(
@@ -351,6 +355,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
351
355
  )
352
356
 
353
357
  # Submit
358
+ t_start = time()
354
359
  all_rb_jobs.extend(
355
360
  submit_sequential_rb_jobs(
356
361
  qubits,
@@ -363,6 +368,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
363
368
  circuit_compilation_options=self.circuit_compilation_options,
364
369
  )
365
370
  )
371
+ total_submit += time() - t_start
366
372
  qcvv_logger.info(
367
373
  f"All jobs for qubits {qubits} and sequence lengths {self.sequence_lengths} submitted successfully!"
368
374
  )
@@ -386,6 +392,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
386
392
  execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], identifier)
387
393
  # Retrieve all job meta data
388
394
  all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
395
+ total_retrieve += time_retrieve
389
396
  # Export all to dataset
390
397
  dataset.attrs[qubit_idx[str(qubits)]].update(
391
398
  {
@@ -401,6 +408,8 @@ class CliffordRandomizedBenchmarking(Benchmark):
401
408
  qcvv_logger.info(f"Adding counts of qubits {qubits} and depth {depth} run to the dataset")
402
409
  dataset, _ = add_counts_to_dataset(execution_results, identifier, dataset)
403
410
 
411
+ dataset.attrs["total_submit_time"] = total_submit
412
+ dataset.attrs["total_retrieve_time"] = total_retrieve
404
413
  qcvv_logger.info(f"RB experiment concluded !")
405
414
  self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
406
415
 
@@ -3,7 +3,7 @@ Direct Randomized Benchmarking.
3
3
  """
4
4
 
5
5
  import random
6
- from time import strftime
6
+ from time import strftime, time
7
7
  from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Type, cast
8
8
 
9
9
  import numpy as np
@@ -751,6 +751,8 @@ class DirectRandomizedBenchmarking(Benchmark):
751
751
  xr.Dataset: Dataset containing benchmark results and metadata
752
752
  """
753
753
  self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
754
+ total_submit: float = 0
755
+ total_retrieve: float = 0
754
756
 
755
757
  dataset = xr.Dataset()
756
758
 
@@ -816,6 +818,7 @@ class DirectRandomizedBenchmarking(Benchmark):
816
818
  # Submit all
817
819
  flat_qubits_array = [x for y in loop_qubits_sequence for x in y]
818
820
  sorted_transpiled_qc_list = {tuple(flat_qubits_array): parallel_drb_circuits[depth]["transpiled"]}
821
+ t_start = time()
819
822
  all_drb_jobs.append(
820
823
  submit_parallel_rb_job(
821
824
  backend,
@@ -828,6 +831,7 @@ class DirectRandomizedBenchmarking(Benchmark):
828
831
  max_circuits_per_batch=self.configuration.max_circuits_per_batch,
829
832
  )
830
833
  )
834
+ total_submit += time() - t_start
831
835
  qcvv_logger.info(f"Job for depth {depth} submitted successfully!")
832
836
 
833
837
  self.untranspiled_circuits.circuit_groups.append(
@@ -884,6 +888,7 @@ class DirectRandomizedBenchmarking(Benchmark):
884
888
  sorted_transpiled_qc_list = {
885
889
  cast(Tuple[int, ...], tuple(qubits)): drb_transpiled_circuits_lists[depth]
886
890
  }
891
+ t_start = time()
887
892
  all_drb_jobs.append(
888
893
  self.submit_single_drb_job(
889
894
  backend,
@@ -892,7 +897,7 @@ class DirectRandomizedBenchmarking(Benchmark):
892
897
  cast(dict[tuple[int, ...], list[Any]], sorted_transpiled_qc_list),
893
898
  )
894
899
  )
895
-
900
+ total_submit += time() - t_start
896
901
  qcvv_logger.info(f"Job for layout {qubits} & depth {depth} submitted successfully!")
897
902
 
898
903
  self.untranspiled_circuits.circuit_groups.append(
@@ -916,6 +921,7 @@ class DirectRandomizedBenchmarking(Benchmark):
916
921
  execution_results, time_retrieve = retrieve_all_counts(
917
922
  job_dict["jobs"], f"qubits_{str(qubits)}_depth_{str(depth)}"
918
923
  )
924
+ total_retrieve += time_retrieve
919
925
  # Retrieve all job meta data
920
926
  all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
921
927
  # Export all to dataset
@@ -934,7 +940,8 @@ class DirectRandomizedBenchmarking(Benchmark):
934
940
  dataset, _ = add_counts_to_dataset(execution_results, f"qubits_{str(qubits)}_depth_{str(depth)}", dataset)
935
941
 
936
942
  self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
937
-
943
+ dataset.attrs["total_submit_time"] = total_submit
944
+ dataset.attrs["total_retrieve_time"] = total_retrieve
938
945
  qcvv_logger.info(f"DRB experiment execution concluded!")
939
946
 
940
947
  return dataset
@@ -16,12 +16,13 @@
16
16
  Interleaved Clifford Randomized Benchmarking.
17
17
  """
18
18
 
19
- from time import strftime
19
+ from time import strftime, time
20
20
  from typing import Any, Dict, List, Literal, Optional, Sequence, Type
21
21
 
22
22
  from matplotlib.figure import Figure
23
23
  import numpy as np
24
24
  import xarray as xr
25
+ from pycparser.ply.ctokens import t_STRING
25
26
 
26
27
  from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
27
28
  from iqm.benchmarks.benchmark_definition import (
@@ -323,6 +324,8 @@ class InterleavedRandomizedBenchmarking(Benchmark):
323
324
  # Submit jobs for all qubit layouts
324
325
  all_rb_jobs: Dict[str, List[Dict[str, Any]]] = {} # Label by Clifford or Interleaved
325
326
  time_circuit_generation: Dict[str, float] = {}
327
+ total_submit: float = 0
328
+ total_retrieve: float = 0
326
329
 
327
330
  # Initialize the variable to contain the circuits for each layout
328
331
 
@@ -398,6 +401,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
398
401
  sorted_transpiled_interleaved_rb_qc_list = {
399
402
  tuple(flat_qubits_array): parallel_transpiled_interleaved_rb_circuits[seq_length]
400
403
  }
404
+ t_start = time()
401
405
  all_rb_jobs["clifford"].append(
402
406
  submit_parallel_rb_job(
403
407
  backend,
@@ -422,6 +426,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
422
426
  self.configuration.max_circuits_per_batch,
423
427
  )
424
428
  )
429
+ total_submit += time() - t_start
425
430
  qcvv_logger.info(f"Both jobs for sequence length {seq_length} submitted successfully!")
426
431
 
427
432
  self.untranspiled_circuits.circuit_groups.append(
@@ -507,6 +512,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
507
512
  time_circuit_generation[str(qubits)] = t_clifford + t_inter
508
513
 
509
514
  # Submit Clifford then Interleaved
515
+ t_start = time()
510
516
  all_rb_jobs["clifford"].extend(
511
517
  submit_sequential_rb_jobs(
512
518
  qubits,
@@ -531,6 +537,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
531
537
  circuit_compilation_options=self.circuit_compilation_options,
532
538
  )
533
539
  )
540
+ total_submit += time() - t_start
534
541
  qcvv_logger.info(
535
542
  f"All jobs for qubits {qubits} and sequence lengths {self.sequence_lengths} submitted successfully!"
536
543
  )
@@ -568,6 +575,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
568
575
  execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], identifier)
569
576
  # Retrieve all job meta data
570
577
  all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
578
+ total_retrieve += time_retrieve
571
579
  # Export all to dataset
572
580
  dataset.attrs[qubit_idx[str(qubits)]].update(
573
581
  {
@@ -585,6 +593,8 @@ class InterleavedRandomizedBenchmarking(Benchmark):
585
593
  qcvv_logger.info(f"Adding counts of qubits {qubits} and depth {depth} run to the dataset")
586
594
  dataset, _ = add_counts_to_dataset(execution_results, identifier, dataset)
587
595
 
596
+ dataset.attrs["total_submit_time"] = total_submit
597
+ dataset.attrs["total_retrieve_time"] = total_retrieve
588
598
  qcvv_logger.info(f"Interleaved RB experiment concluded !")
589
599
  self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
590
600
 
@@ -2,7 +2,7 @@
2
2
  Mirror Randomized Benchmarking.
3
3
  """
4
4
 
5
- from time import strftime
5
+ from time import strftime, time
6
6
  from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Type
7
7
  import warnings
8
8
 
@@ -592,6 +592,8 @@ class MirrorRandomizedBenchmarking(Benchmark):
592
592
  # Submit jobs for all qubit layouts
593
593
  all_mrb_jobs: List[Dict[str, Any]] = []
594
594
  time_circuit_generation: Dict[str, float] = {}
595
+ total_submit: float = 0
596
+ total_retrieve: float = 0
595
597
 
596
598
  # The depths should be assigned to each set of qubits!
597
599
  # The real final MRB depths are twice the originally specified, must be taken into account here!
@@ -653,7 +655,9 @@ class MirrorRandomizedBenchmarking(Benchmark):
653
655
 
654
656
  # Submit
655
657
  sorted_transpiled_qc_list = {tuple(qubits): mrb_transpiled_circuits_lists[depth]}
658
+ t_start = time()
656
659
  all_mrb_jobs.append(self.submit_single_mrb_job(backend, qubits, depth, sorted_transpiled_qc_list))
660
+ total_retrieve += time() - t_start
657
661
  qcvv_logger.info(f"Job for layout {qubits} & depth {depth} submitted successfully!")
658
662
 
659
663
  self.untranspiled_circuits.circuit_groups.append(
@@ -675,6 +679,7 @@ class MirrorRandomizedBenchmarking(Benchmark):
675
679
  )
676
680
  # Retrieve all job meta data
677
681
  all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
682
+ total_retrieve += time_retrieve
678
683
  # Export all to dataset
679
684
  dataset.attrs[qubit_idx[str(qubits)]].update(
680
685
  {
@@ -690,6 +695,8 @@ class MirrorRandomizedBenchmarking(Benchmark):
690
695
  qcvv_logger.info(f"Adding counts of qubits {qubits} and depth {depth} run to the dataset")
691
696
  dataset, _ = add_counts_to_dataset(execution_results, f"qubits_{str(qubits)}_depth_{str(depth)}", dataset)
692
697
 
698
+ dataset.attrs["total_submit_time"] = total_submit
699
+ dataset.attrs["total_retrieve_time"] = total_retrieve
693
700
  self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
694
701
 
695
702
  qcvv_logger.info(f"MRB experiment execution concluded !")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: iqm-benchmarks
3
- Version: 2.48
3
+ Version: 2.50
4
4
  Summary: A package for implementation of Quantum Characterization, Verification and Validation (QCVV) techniques on IQM's hardware at gate level abstraction
5
5
  Author-email: IQM Finland Oy <developers@meetiqm.com>, Adrian Auer <adrian.auer@meetiqm.com>, Raphael Brieger <raphael.brieger@meetiqm.com>, Alessio Calzona <alessio.calzona@meetiqm.com>, Pedro Figueroa Romero <pedro.romero@meetiqm.com>, Amin Hosseinkhani <amin.hosseinkhani@meetiqm.com>, Miikka Koistinen <miikka@meetiqm.com>, Nadia Milazzo <nadia.milazzo@meetiqm.com>, Vicente Pina Canelles <vicente.pina@meetiqm.com>, Aniket Rath <aniket.rath@meetiqm.com>, Jami Rönkkö <jami@meetiqm.com>, Stefan Seegerer <stefan.seegerer@meetiqm.com>
6
6
  Project-URL: Homepage, https://github.com/iqm-finland/iqm-benchmarks
@@ -19,8 +19,8 @@ Requires-Dist: networkx<4.0,>=3.3
19
19
  Requires-Dist: rustworkx>=0.16.0
20
20
  Requires-Dist: numpy<2.0,>=1.25.2
21
21
  Requires-Dist: qiskit<=1.4.2,>=1.2.4
22
- Requires-Dist: iqm-client[qiskit]<31.0,>=30.1.0
23
- Requires-Dist: iqm-station-control-client>=9.3.0
22
+ Requires-Dist: iqm-client[qiskit]<33.0,>=32.1.1
23
+ Requires-Dist: iqm-station-control-client<12.0,>=11.3.1
24
24
  Requires-Dist: requests<3.0,>=2.32.3
25
25
  Requires-Dist: scikit-optimize<0.11.0,>=0.10.2
26
26
  Requires-Dist: tabulate<1.0.0,>=0.9.0
@@ -6,8 +6,8 @@ networkx<4.0,>=3.3
6
6
  rustworkx>=0.16.0
7
7
  numpy<2.0,>=1.25.2
8
8
  qiskit<=1.4.2,>=1.2.4
9
- iqm-client[qiskit]<31.0,>=30.1.0
10
- iqm-station-control-client>=9.3.0
9
+ iqm-client[qiskit]<33.0,>=32.1.1
10
+ iqm-station-control-client<12.0,>=11.3.1
11
11
  requests<3.0,>=2.32.3
12
12
  scikit-optimize<0.11.0,>=0.10.2
13
13
  tabulate<1.0.0,>=0.9.0
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes