qoro-divi 0.2.0b1__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. divi/__init__.py +1 -2
  2. divi/backends/__init__.py +9 -0
  3. divi/backends/_circuit_runner.py +70 -0
  4. divi/backends/_execution_result.py +70 -0
  5. divi/backends/_parallel_simulator.py +486 -0
  6. divi/backends/_qoro_service.py +663 -0
  7. divi/backends/_qpu_system.py +101 -0
  8. divi/backends/_results_processing.py +133 -0
  9. divi/circuits/__init__.py +8 -0
  10. divi/{exp/cirq → circuits/_cirq}/__init__.py +1 -2
  11. divi/circuits/_cirq/_parser.py +110 -0
  12. divi/circuits/_cirq/_qasm_export.py +78 -0
  13. divi/circuits/_core.py +369 -0
  14. divi/{qasm.py → circuits/_qasm_conversion.py} +73 -14
  15. divi/circuits/_qasm_validation.py +694 -0
  16. divi/qprog/__init__.py +24 -6
  17. divi/qprog/_expectation.py +181 -0
  18. divi/qprog/_hamiltonians.py +281 -0
  19. divi/qprog/algorithms/__init__.py +14 -0
  20. divi/qprog/algorithms/_ansatze.py +356 -0
  21. divi/qprog/algorithms/_qaoa.py +572 -0
  22. divi/qprog/algorithms/_vqe.py +249 -0
  23. divi/qprog/batch.py +383 -73
  24. divi/qprog/checkpointing.py +556 -0
  25. divi/qprog/exceptions.py +9 -0
  26. divi/qprog/optimizers.py +1014 -43
  27. divi/qprog/quantum_program.py +231 -413
  28. divi/qprog/variational_quantum_algorithm.py +995 -0
  29. divi/qprog/workflows/__init__.py +10 -0
  30. divi/qprog/{_graph_partitioning.py → workflows/_graph_partitioning.py} +139 -95
  31. divi/qprog/workflows/_qubo_partitioning.py +220 -0
  32. divi/qprog/workflows/_vqe_sweep.py +560 -0
  33. divi/reporting/__init__.py +7 -0
  34. divi/reporting/_pbar.py +127 -0
  35. divi/reporting/_qlogger.py +68 -0
  36. divi/reporting/_reporter.py +133 -0
  37. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.5.0.dist-info}/METADATA +43 -15
  38. qoro_divi-0.5.0.dist-info/RECORD +43 -0
  39. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.5.0.dist-info}/WHEEL +1 -1
  40. qoro_divi-0.5.0.dist-info/licenses/LICENSES/.license-header +3 -0
  41. divi/_pbar.py +0 -73
  42. divi/circuits.py +0 -139
  43. divi/exp/cirq/_lexer.py +0 -126
  44. divi/exp/cirq/_parser.py +0 -889
  45. divi/exp/cirq/_qasm_export.py +0 -37
  46. divi/exp/cirq/_qasm_import.py +0 -35
  47. divi/exp/cirq/exception.py +0 -21
  48. divi/exp/scipy/_cobyla.py +0 -342
  49. divi/exp/scipy/pyprima/LICENCE.txt +0 -28
  50. divi/exp/scipy/pyprima/__init__.py +0 -263
  51. divi/exp/scipy/pyprima/cobyla/__init__.py +0 -0
  52. divi/exp/scipy/pyprima/cobyla/cobyla.py +0 -599
  53. divi/exp/scipy/pyprima/cobyla/cobylb.py +0 -849
  54. divi/exp/scipy/pyprima/cobyla/geometry.py +0 -240
  55. divi/exp/scipy/pyprima/cobyla/initialize.py +0 -269
  56. divi/exp/scipy/pyprima/cobyla/trustregion.py +0 -540
  57. divi/exp/scipy/pyprima/cobyla/update.py +0 -331
  58. divi/exp/scipy/pyprima/common/__init__.py +0 -0
  59. divi/exp/scipy/pyprima/common/_bounds.py +0 -41
  60. divi/exp/scipy/pyprima/common/_linear_constraints.py +0 -46
  61. divi/exp/scipy/pyprima/common/_nonlinear_constraints.py +0 -64
  62. divi/exp/scipy/pyprima/common/_project.py +0 -224
  63. divi/exp/scipy/pyprima/common/checkbreak.py +0 -107
  64. divi/exp/scipy/pyprima/common/consts.py +0 -48
  65. divi/exp/scipy/pyprima/common/evaluate.py +0 -101
  66. divi/exp/scipy/pyprima/common/history.py +0 -39
  67. divi/exp/scipy/pyprima/common/infos.py +0 -30
  68. divi/exp/scipy/pyprima/common/linalg.py +0 -452
  69. divi/exp/scipy/pyprima/common/message.py +0 -336
  70. divi/exp/scipy/pyprima/common/powalg.py +0 -131
  71. divi/exp/scipy/pyprima/common/preproc.py +0 -393
  72. divi/exp/scipy/pyprima/common/present.py +0 -5
  73. divi/exp/scipy/pyprima/common/ratio.py +0 -56
  74. divi/exp/scipy/pyprima/common/redrho.py +0 -49
  75. divi/exp/scipy/pyprima/common/selectx.py +0 -346
  76. divi/interfaces.py +0 -25
  77. divi/parallel_simulator.py +0 -258
  78. divi/qlogger.py +0 -119
  79. divi/qoro_service.py +0 -343
  80. divi/qprog/_mlae.py +0 -182
  81. divi/qprog/_qaoa.py +0 -440
  82. divi/qprog/_vqe.py +0 -275
  83. divi/qprog/_vqe_sweep.py +0 -144
  84. divi/utils.py +0 -116
  85. qoro_divi-0.2.0b1.dist-info/RECORD +0 -58
  86. /divi/{qem.py → circuits/qem.py} +0 -0
  87. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.5.0.dist-info/licenses}/LICENSE +0 -0
  88. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.5.0.dist-info/licenses}/LICENSES/Apache-2.0.txt +0 -0
divi/__init__.py CHANGED
@@ -2,7 +2,6 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- from .qlogger import enable_logging
6
- from .qoro_service import QoroService
5
+ from .reporting import enable_logging
7
6
 
8
7
  enable_logging()
@@ -0,0 +1,9 @@
1
+ # SPDX-FileCopyrightText: 2025 Qoro Quantum Ltd <divi@qoroquantum.de>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from ._circuit_runner import CircuitRunner
6
+ from ._execution_result import ExecutionResult
7
+ from ._parallel_simulator import ParallelSimulator
8
+ from ._qoro_service import JobConfig, JobStatus, JobType, QoroService
9
+ from ._results_processing import convert_counts_to_probs, reverse_dict_endianness
@@ -0,0 +1,70 @@
1
+ # SPDX-FileCopyrightText: 2025 Qoro Quantum Ltd <divi@qoroquantum.de>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from abc import ABC, abstractmethod
6
+
7
+ from divi.backends._execution_result import ExecutionResult
8
+
9
+
10
+ class CircuitRunner(ABC):
11
+ """
12
+ A generic interface for anything that can "run" quantum circuits.
13
+ """
14
+
15
+ def __init__(self, shots: int):
16
+ if shots <= 0:
17
+ raise ValueError(f"Shots must be a positive integer. Got {shots}.")
18
+
19
+ self._shots = shots
20
+
21
+ @property
22
+ def shots(self):
23
+ """
24
+ Get the number of measurement shots for circuit execution.
25
+
26
+ Returns:
27
+ int: Number of shots configured for this runner.
28
+ """
29
+ return self._shots
30
+
31
+ @property
32
+ @abstractmethod
33
+ def supports_expval(self) -> bool:
34
+ """
35
+ Whether the backend supports expectation value measurements.
36
+ """
37
+ return False
38
+
39
+ @property
40
+ @abstractmethod
41
+ def is_async(self) -> bool:
42
+ """
43
+ Whether the backend executes circuits asynchronously.
44
+
45
+ Returns:
46
+ bool: True if the backend returns a job ID and requires polling
47
+ for results (e.g., QoroService). False if the backend
48
+ returns results immediately (e.g., ParallelSimulator).
49
+ """
50
+ return False
51
+
52
+ @abstractmethod
53
+ def submit_circuits(self, circuits: dict[str, str], **kwargs) -> ExecutionResult:
54
+ """
55
+ Submit quantum circuits for execution.
56
+
57
+ This abstract method must be implemented by subclasses to define how
58
+ circuits are executed on their respective backends (simulator, hardware, etc.).
59
+
60
+ Args:
61
+ circuits (dict[str, str]): Dictionary mapping circuit labels to their
62
+ OpenQASM string representations.
63
+ **kwargs: Additional backend-specific parameters for circuit execution.
64
+
65
+ Returns:
66
+ ExecutionResult: For synchronous backends, contains results directly.
67
+ For asynchronous backends, contains a job_id that can be used to
68
+ fetch results later.
69
+ """
70
+ pass
@@ -0,0 +1,70 @@
1
+ # SPDX-FileCopyrightText: 2025 Qoro Quantum Ltd <divi@qoroquantum.de>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from dataclasses import dataclass, replace
6
+
7
+
8
+ @dataclass(frozen=True)
9
+ class ExecutionResult:
10
+ """Result container for circuit execution.
11
+
12
+ This class provides a unified return type for all CircuitRunner.submit_circuits()
13
+ methods. For synchronous backends, it contains the results directly. For
14
+ asynchronous backends, it contains the job_id that can be used to fetch results later.
15
+
16
+ The class is frozen (immutable) to ensure data integrity. Use the `with_results()`
17
+ method to create a new instance with results populated from an async ExecutionResult.
18
+
19
+ Attributes:
20
+ results (list[dict] | None): For sync backends or after fetching: List of result
21
+ dictionaries, each containing 'label' and 'results' keys. Format:
22
+ [{"label": str, "results": dict}, ...]
23
+ job_id (str | None): For async backends: Job identifier that can be used
24
+ to poll and retrieve results from the backend.
25
+
26
+ Examples:
27
+ >>> # Synchronous backend
28
+ >>> result = ExecutionResult(results=[{"label": "circuit_0", "results": {"00": 100}}])
29
+ >>> result.is_async()
30
+ False
31
+
32
+ >>> # Asynchronous backend
33
+ >>> result = ExecutionResult(job_id="job-12345")
34
+ >>> result.is_async()
35
+ True
36
+ >>> # After fetching results
37
+ >>> result = backend.get_job_results(result)
38
+ >>> result.results is not None
39
+ True
40
+ """
41
+
42
+ results: list[dict] | None = None
43
+ """Results for synchronous backends: [{"label": str, "results": dict}, ...]"""
44
+
45
+ job_id: str | None = None
46
+ """Job identifier for asynchronous backends."""
47
+
48
+ def is_async(self) -> bool:
49
+ """Check if this result represents an async job.
50
+
51
+ Returns:
52
+ bool: True if job_id is not None and results are None (async backend),
53
+ False otherwise (sync backend or results already fetched).
54
+ """
55
+ return self.job_id is not None and self.results is None
56
+
57
+ def with_results(self, results: list[dict]) -> "ExecutionResult":
58
+ """Create a new ExecutionResult with results populated.
59
+
60
+ This method creates a new instance with results set, effectively converting
61
+ an async ExecutionResult to a completed one.
62
+
63
+ Args:
64
+ results: The job results to populate.
65
+
66
+ Returns:
67
+ ExecutionResult: A new ExecutionResult instance with results populated
68
+ and job_id preserved.
69
+ """
70
+ return replace(self, results=results)
@@ -0,0 +1,486 @@
1
+ # SPDX-FileCopyrightText: 2025 Qoro Quantum Ltd <divi@qoroquantum.de>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ import bisect
6
+ import heapq
7
+ import logging
8
+ import os
9
+ import threading
10
+ from functools import partial
11
+ from multiprocessing import Pool, current_process
12
+ from typing import Any, Literal
13
+ from warnings import warn
14
+
15
+ from qiskit import QuantumCircuit, transpile
16
+ from qiskit.converters import circuit_to_dag
17
+ from qiskit.dagcircuit import DAGOpNode
18
+ from qiskit.providers import Backend
19
+ from qiskit_aer import AerSimulator
20
+ from qiskit_aer.noise import NoiseModel
21
+
22
+ from divi.backends import CircuitRunner
23
+ from divi.backends._execution_result import ExecutionResult
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+ # Suppress stevedore extension loading errors (harmless Qiskit v2/provider issue)
28
+ _stevedore_logger = logging.getLogger("stevedore.extension")
29
+ _stevedore_logger.setLevel(logging.CRITICAL)
30
+
31
+ # Lazy-loaded fake backends dictionary
32
+ _FAKE_BACKENDS_CACHE: dict[int, list] | None = None
33
+
34
+
35
+ def _load_fake_backends() -> dict[int, list]:
36
+ """Lazy load and return the FAKE_BACKENDS dictionary."""
37
+ global _FAKE_BACKENDS_CACHE
38
+ if _FAKE_BACKENDS_CACHE is None:
39
+ # Import only when actually needed
40
+ import qiskit_ibm_runtime.fake_provider as fk_prov
41
+
42
+ _FAKE_BACKENDS_CACHE = {
43
+ 5: [
44
+ fk_prov.FakeManilaV2,
45
+ fk_prov.FakeBelemV2,
46
+ fk_prov.FakeLimaV2,
47
+ fk_prov.FakeQuitoV2,
48
+ ],
49
+ 7: [
50
+ fk_prov.FakeOslo,
51
+ fk_prov.FakePerth,
52
+ fk_prov.FakeLagosV2,
53
+ fk_prov.FakeNairobiV2,
54
+ ],
55
+ 15: [fk_prov.FakeMelbourneV2],
56
+ 16: [fk_prov.FakeGuadalupeV2],
57
+ 20: [
58
+ fk_prov.FakeAlmadenV2,
59
+ fk_prov.FakeJohannesburgV2,
60
+ fk_prov.FakeSingaporeV2,
61
+ fk_prov.FakeBoeblingenV2,
62
+ ],
63
+ 27: [
64
+ fk_prov.FakeGeneva,
65
+ fk_prov.FakePeekskill,
66
+ fk_prov.FakeAuckland,
67
+ fk_prov.FakeCairoV2,
68
+ ],
69
+ }
70
+ return _FAKE_BACKENDS_CACHE
71
+
72
+
73
+ def _find_best_fake_backend(circuit: QuantumCircuit) -> list[type] | None:
74
+ """Find the best fake backend for a given circuit based on qubit count.
75
+
76
+ Args:
77
+ circuit: QuantumCircuit to find a backend for.
78
+
79
+ Returns:
80
+ List of fake backend classes that support the circuit's qubit count, or None.
81
+ """
82
+ fake_backends = _load_fake_backends()
83
+ keys = sorted(fake_backends.keys())
84
+ pos = bisect.bisect_left(keys, circuit.num_qubits)
85
+ return fake_backends[keys[pos]] if pos < len(keys) else None
86
+
87
+
88
+ # Public API for backward compatibility with tests
89
+ def __getattr__(name: str):
90
+ """Lazy load FAKE_BACKENDS when accessed."""
91
+ if name == "FAKE_BACKENDS":
92
+ return _load_fake_backends()
93
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
94
+
95
+
96
+ def _default_n_processes() -> int:
97
+ """Get a reasonable default number of processes based on CPU count.
98
+
99
+ Uses most available CPU cores (all minus 1, or 3/4 if many cores), with a
100
+ minimum of 2 and maximum of 16. This provides good parallelism while leaving
101
+ one core free for system processes.
102
+
103
+ If running in a different thread or process (not the main thread/process),
104
+ limits to 2 cores to avoid resource contention.
105
+
106
+ Returns:
107
+ int: Default number of processes to use.
108
+ """
109
+ # Check if we're running in a worker thread or subprocess
110
+ is_main_thread = threading.current_thread() is threading.main_thread()
111
+ is_main_process = current_process().name == "MainProcess"
112
+
113
+ if not (is_main_thread and is_main_process):
114
+ # Running in a different thread/process - limit to 2 cores
115
+ return 2
116
+
117
+ cpu_count = os.cpu_count() or 4
118
+ if cpu_count <= 4:
119
+ # For small systems, use all but 1 core
120
+ return max(2, cpu_count - 1)
121
+ elif cpu_count <= 16:
122
+ # For medium systems, use all but 1 core
123
+ return cpu_count - 1
124
+ else:
125
+ # For large systems, use 3/4 of cores, capped at 16
126
+ return min(16, int(cpu_count * 0.75))
127
+
128
+
129
+ class ParallelSimulator(CircuitRunner):
130
+ def __init__(
131
+ self,
132
+ n_processes: int | None = None,
133
+ shots: int = 5000,
134
+ simulation_seed: int | None = None,
135
+ qiskit_backend: Backend | Literal["auto"] | None = None,
136
+ noise_model: NoiseModel | None = None,
137
+ _deterministic_execution: bool = False,
138
+ ):
139
+ """
140
+ A parallel wrapper around Qiskit's AerSimulator using Qiskit's built-in parallelism.
141
+
142
+ Args:
143
+ n_processes (int | None, optional): Number of parallel processes to use for transpilation and
144
+ simulation. If None, defaults to half the available CPU cores (min 2, max 8).
145
+ Controls both transpilation parallelism and execution parallelism. The execution
146
+ parallelism mode (circuit or shot) is automatically selected based on workload
147
+ characteristics.
148
+ shots (int, optional): Number of shots to perform. Defaults to 5000.
149
+ simulation_seed (int, optional): Seed for the random number generator to ensure reproducibility. Defaults to None.
150
+ qiskit_backend (Backend | Literal["auto"] | None, optional): A Qiskit backend to initiate the simulator from.
151
+ If "auto" is passed, the best-fit most recent fake backend will be chosen for the given circuit.
152
+ Defaults to None, resulting in noiseless simulation.
153
+ noise_model (NoiseModel, optional): Qiskit noise model to use in simulation. Defaults to None.
154
+ """
155
+ super().__init__(shots=shots)
156
+
157
+ if qiskit_backend and noise_model:
158
+ warn(
159
+ "Both `qiskit_backend` and `noise_model` have been provided."
160
+ " `noise_model` will be ignored and the model from the backend will be used instead."
161
+ )
162
+
163
+ if n_processes is None:
164
+ n_processes = _default_n_processes()
165
+ elif n_processes < 1:
166
+ raise ValueError(f"n_processes must be >= 1, got {n_processes}")
167
+ self._n_processes = n_processes
168
+ self.simulation_seed = simulation_seed
169
+ self.qiskit_backend = qiskit_backend
170
+ self.noise_model = noise_model
171
+ self._deterministic_execution = _deterministic_execution
172
+
173
+ def set_seed(self, seed: int):
174
+ """
175
+ Set the random seed for circuit simulation.
176
+
177
+ Args:
178
+ seed (int): Seed value for the random number generator used in simulation.
179
+ """
180
+ self.simulation_seed = seed
181
+
182
+ @property
183
+ def n_processes(self) -> int:
184
+ """
185
+ Get the current number of parallel processes.
186
+
187
+ Returns:
188
+ int: Number of parallel processes configured.
189
+ """
190
+ return self._n_processes
191
+
192
+ @n_processes.setter
193
+ def n_processes(self, value: int):
194
+ """
195
+ Set the number of parallel processes (>= 1).
196
+
197
+ Controls:
198
+ - Transpilation parallelism
199
+ - OpenMP thread limit
200
+ - Circuit/Shot parallelism (auto-selected based on workload)
201
+ """
202
+ if value < 1:
203
+ raise ValueError(f"n_processes must be >= 1, got {value}")
204
+ self._n_processes = value
205
+
206
+ @property
207
+ def supports_expval(self) -> bool:
208
+ """
209
+ Whether the backend supports expectation value measurements.
210
+ """
211
+ return False
212
+
213
+ @property
214
+ def is_async(self) -> bool:
215
+ """
216
+ Whether the backend executes circuits asynchronously.
217
+ """
218
+ return False
219
+
220
+ def _resolve_backend(self, circuit: QuantumCircuit | None = None) -> Backend | None:
221
+ """Resolve the backend from qiskit_backend setting."""
222
+ if self.qiskit_backend == "auto":
223
+ if circuit is None:
224
+ raise ValueError(
225
+ "Circuit must be provided when qiskit_backend is 'auto'"
226
+ )
227
+ backend_list = _find_best_fake_backend(circuit)
228
+ if backend_list is None:
229
+ raise ValueError(
230
+ f"No fake backend available for circuit with {circuit.num_qubits} qubits. "
231
+ "Please provide an explicit backend or use a smaller circuit."
232
+ )
233
+ return backend_list[-1]()
234
+ return self.qiskit_backend
235
+
236
+ def _create_simulator(self, resolved_backend: Backend | None) -> AerSimulator:
237
+ """Create an AerSimulator instance from a resolved backend or noise model."""
238
+ return (
239
+ AerSimulator.from_backend(resolved_backend)
240
+ if resolved_backend is not None
241
+ else AerSimulator(noise_model=self.noise_model)
242
+ )
243
+
244
+ def _execute_circuits_deterministically(
245
+ self,
246
+ circuit_labels: list[str],
247
+ transpiled_circuits: list[QuantumCircuit],
248
+ resolved_backend: Backend | None,
249
+ ) -> list[dict[str, Any]]:
250
+ """
251
+ Execute circuits individually for debugging purposes.
252
+
253
+ This method ensures deterministic results by running each circuit with its own
254
+ simulator instance and the same seed. Used internally for debugging non-deterministic
255
+ behavior in batch execution.
256
+
257
+ Args:
258
+ circuit_labels: List of circuit labels
259
+ transpiled_circuits: List of transpiled QuantumCircuit objects
260
+ resolved_backend: Resolved backend for simulator creation
261
+
262
+ Returns:
263
+ List of result dictionaries
264
+ """
265
+ results = []
266
+ for i, (label, transpiled_circuit) in enumerate(
267
+ zip(circuit_labels, transpiled_circuits)
268
+ ):
269
+ # Create a new simulator instance for each circuit with the same seed
270
+ circuit_simulator = self._create_simulator(resolved_backend)
271
+
272
+ if self.simulation_seed is not None:
273
+ circuit_simulator.set_option("seed_simulator", self.simulation_seed)
274
+
275
+ # Run the single circuit
276
+ job = circuit_simulator.run(transpiled_circuit, shots=self.shots)
277
+ circuit_result = job.result()
278
+ counts = circuit_result.get_counts(0)
279
+ results.append({"label": label, "results": dict(counts)})
280
+
281
+ return results
282
+
283
+ def _configure_simulator_parallelism(
284
+ self, aer_simulator: AerSimulator, num_circuits: int
285
+ ):
286
+ """Configure AerSimulator parallelism options based on workload."""
287
+ if self.simulation_seed is not None:
288
+ aer_simulator.set_options(seed_simulator=self.simulation_seed)
289
+
290
+ # Default to utilizing all allocated processes for threads
291
+ options = {"max_parallel_threads": self.n_processes}
292
+
293
+ if num_circuits > 1:
294
+ # Batch mode: parallelize experiments
295
+ options.update(
296
+ {
297
+ "max_parallel_experiments": min(num_circuits, self.n_processes),
298
+ "max_parallel_shots": 1,
299
+ }
300
+ )
301
+ elif self.shots >= self.n_processes:
302
+ # Single circuit, high shots: parallelize shots
303
+ options.update(
304
+ {
305
+ "max_parallel_experiments": 1,
306
+ "max_parallel_shots": self.n_processes,
307
+ }
308
+ )
309
+ else:
310
+ # Single circuit, low shots: default behavior (usually serial shots)
311
+ options.update(
312
+ {
313
+ "max_parallel_experiments": 1,
314
+ "max_parallel_shots": 1,
315
+ }
316
+ )
317
+
318
+ aer_simulator.set_options(**options)
319
+
320
+ def submit_circuits(self, circuits: dict[str, str]) -> ExecutionResult:
321
+ """
322
+ Submit multiple circuits for parallel simulation using Qiskit's built-in parallelism.
323
+
324
+ Uses Qiskit's native batch transpilation and execution, which handles parallelism
325
+ internally.
326
+
327
+ Args:
328
+ circuits (dict[str, str]): Dictionary mapping circuit labels to OpenQASM
329
+ string representations.
330
+
331
+ Returns:
332
+ ExecutionResult: Contains results directly (synchronous execution).
333
+ Results are in the format: [{"label": str, "results": dict}, ...]
334
+ """
335
+ logger.debug(
336
+ f"Simulating {len(circuits)} circuits with {self.n_processes} processes"
337
+ )
338
+
339
+ # 1. Parse Circuits
340
+ circuit_labels = list(circuits.keys())
341
+ qiskit_circuits = [
342
+ QuantumCircuit.from_qasm_str(qasm) for qasm in circuits.values()
343
+ ]
344
+
345
+ # 2. Resolve Backend
346
+ if self.qiskit_backend == "auto":
347
+ max_qubits_circ = max(qiskit_circuits, key=lambda x: x.num_qubits)
348
+ resolved_backend = self._resolve_backend(max_qubits_circ)
349
+ else:
350
+ resolved_backend = self._resolve_backend()
351
+
352
+ # 3. Configure Simulator
353
+ aer_simulator = self._create_simulator(resolved_backend)
354
+ self._configure_simulator_parallelism(aer_simulator, len(qiskit_circuits))
355
+
356
+ # 4. Transpile
357
+ transpiled_circuits = transpile(
358
+ qiskit_circuits, aer_simulator, num_processes=self.n_processes
359
+ )
360
+
361
+ # 5. Execute
362
+ if self._deterministic_execution:
363
+ results = self._execute_circuits_deterministically(
364
+ circuit_labels, transpiled_circuits, resolved_backend
365
+ )
366
+ return ExecutionResult(results=results)
367
+
368
+ job = aer_simulator.run(transpiled_circuits, shots=self.shots)
369
+ batch_result = job.result()
370
+
371
+ # Check for non-determinism warnings
372
+ metadata = batch_result.metadata
373
+ if (
374
+ parallel_experiments := metadata.get("parallel_experiments", 1)
375
+ ) > 1 and self.simulation_seed is not None:
376
+ omp_nested = metadata.get("omp_nested", False)
377
+ logger.warning(
378
+ f"Parallel execution detected (parallel_experiments={parallel_experiments}, "
379
+ f"omp_nested={omp_nested}). Results may not be deterministic across different "
380
+ "grouping strategies. Consider enabling deterministic mode for "
381
+ "deterministic results."
382
+ )
383
+
384
+ # 6. Format Results
385
+ results = [
386
+ {"label": label, "results": dict(batch_result.get_counts(i))}
387
+ for i, label in enumerate(circuit_labels)
388
+ ]
389
+ return ExecutionResult(results=results)
390
+
391
+ @staticmethod
392
+ def estimate_run_time_single_circuit(
393
+ circuit: str,
394
+ qiskit_backend: Backend | Literal["auto"],
395
+ **transpilation_kwargs,
396
+ ) -> float:
397
+ """
398
+ Estimate the execution time of a quantum circuit on a given backend, accounting for parallel gate execution.
399
+
400
+ Parameters:
401
+ circuit: The quantum circuit to estimate execution time for as a QASM string.
402
+ qiskit_backend: A Qiskit backend to use for gate time estimation.
403
+
404
+ Returns:
405
+ float: Estimated execution time in seconds.
406
+ """
407
+ qiskit_circuit = QuantumCircuit.from_qasm_str(circuit)
408
+
409
+ if qiskit_backend == "auto":
410
+ if not (backend_list := _find_best_fake_backend(qiskit_circuit)):
411
+ raise ValueError(
412
+ f"No fake backend available for circuit with {qiskit_circuit.num_qubits} qubits. "
413
+ "Please provide an explicit backend or use a smaller circuit."
414
+ )
415
+ resolved_backend = backend_list[-1]()
416
+ else:
417
+ resolved_backend = qiskit_backend
418
+
419
+ transpiled_circuit = transpile(
420
+ qiskit_circuit, resolved_backend, **transpilation_kwargs
421
+ )
422
+
423
+ total_run_time_s = 0.0
424
+ durations = resolved_backend.instruction_durations
425
+
426
+ for node in circuit_to_dag(transpiled_circuit).longest_path():
427
+ if not isinstance(node, DAGOpNode) or not node.num_qubits:
428
+ continue
429
+
430
+ try:
431
+ idx = tuple(q._index for q in node.qargs)
432
+ total_run_time_s += durations.duration_by_name_qubits[(node.name, idx)][
433
+ 0
434
+ ]
435
+ except KeyError:
436
+ if node.name != "barrier":
437
+ warn(f"Instruction duration not found: {node.name}")
438
+
439
+ return total_run_time_s
440
+
441
+ @staticmethod
442
+ def estimate_run_time_batch(
443
+ circuits: list[str] | None = None,
444
+ precomputed_durations: list[float] | None = None,
445
+ n_qpus: int = 5,
446
+ **transpilation_kwargs,
447
+ ) -> float:
448
+ """
449
+ Estimate the execution time of a quantum circuit on a given backend, accounting for parallel gate execution.
450
+
451
+ Parameters:
452
+ circuits (list[str]): The quantum circuits to estimate execution time for, as QASM strings.
453
+ precomputed_durations (list[float]): A list of precomputed durations to use.
454
+ n_qpus (int): Number of QPU nodes in the pre-supposed cluster we are estimating runtime against.
455
+
456
+ Returns:
457
+ float: Estimated execution time in seconds.
458
+ """
459
+
460
+ # Compute the run time estimates for each given circuit, in descending order
461
+ if precomputed_durations is None:
462
+ with Pool() as p:
463
+ estimated_run_times = p.map(
464
+ partial(
465
+ ParallelSimulator.estimate_run_time_single_circuit,
466
+ qiskit_backend="auto",
467
+ **transpilation_kwargs,
468
+ ),
469
+ circuits,
470
+ )
471
+ estimated_run_times_sorted = sorted(estimated_run_times, reverse=True)
472
+ else:
473
+ estimated_run_times_sorted = sorted(precomputed_durations, reverse=True)
474
+
475
+ # Optimization for trivial case
476
+ if n_qpus >= len(estimated_run_times_sorted):
477
+ return estimated_run_times_sorted[0] if estimated_run_times_sorted else 0.0
478
+
479
+ # LPT (Longest Processing Time) scheduling using a min-heap of processor finish times
480
+ processor_finish_times = [0.0] * n_qpus
481
+ for run_time in estimated_run_times_sorted:
482
+ heapq.heappush(
483
+ processor_finish_times, heapq.heappop(processor_finish_times) + run_time
484
+ )
485
+
486
+ return max(processor_finish_times)