qoro-divi 0.2.0b1__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- divi/__init__.py +1 -2
- divi/backends/__init__.py +10 -0
- divi/backends/_backend_properties_conversion.py +227 -0
- divi/backends/_circuit_runner.py +70 -0
- divi/backends/_execution_result.py +70 -0
- divi/backends/_parallel_simulator.py +486 -0
- divi/backends/_qoro_service.py +663 -0
- divi/backends/_qpu_system.py +101 -0
- divi/backends/_results_processing.py +133 -0
- divi/circuits/__init__.py +13 -0
- divi/{exp/cirq → circuits/_cirq}/__init__.py +1 -2
- divi/circuits/_cirq/_parser.py +110 -0
- divi/circuits/_cirq/_qasm_export.py +78 -0
- divi/circuits/_core.py +391 -0
- divi/{qasm.py → circuits/_qasm_conversion.py} +73 -14
- divi/circuits/_qasm_validation.py +694 -0
- divi/qprog/__init__.py +27 -8
- divi/qprog/_expectation.py +181 -0
- divi/qprog/_hamiltonians.py +281 -0
- divi/qprog/algorithms/__init__.py +16 -0
- divi/qprog/algorithms/_ansatze.py +368 -0
- divi/qprog/algorithms/_custom_vqa.py +263 -0
- divi/qprog/algorithms/_pce.py +262 -0
- divi/qprog/algorithms/_qaoa.py +579 -0
- divi/qprog/algorithms/_vqe.py +262 -0
- divi/qprog/batch.py +387 -74
- divi/qprog/checkpointing.py +556 -0
- divi/qprog/exceptions.py +9 -0
- divi/qprog/optimizers.py +1014 -43
- divi/qprog/quantum_program.py +243 -412
- divi/qprog/typing.py +62 -0
- divi/qprog/variational_quantum_algorithm.py +1208 -0
- divi/qprog/workflows/__init__.py +10 -0
- divi/qprog/{_graph_partitioning.py → workflows/_graph_partitioning.py} +139 -95
- divi/qprog/workflows/_qubo_partitioning.py +221 -0
- divi/qprog/workflows/_vqe_sweep.py +560 -0
- divi/reporting/__init__.py +7 -0
- divi/reporting/_pbar.py +127 -0
- divi/reporting/_qlogger.py +68 -0
- divi/reporting/_reporter.py +155 -0
- {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info}/METADATA +43 -15
- qoro_divi-0.6.0.dist-info/RECORD +47 -0
- {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info}/WHEEL +1 -1
- qoro_divi-0.6.0.dist-info/licenses/LICENSES/.license-header +3 -0
- divi/_pbar.py +0 -73
- divi/circuits.py +0 -139
- divi/exp/cirq/_lexer.py +0 -126
- divi/exp/cirq/_parser.py +0 -889
- divi/exp/cirq/_qasm_export.py +0 -37
- divi/exp/cirq/_qasm_import.py +0 -35
- divi/exp/cirq/exception.py +0 -21
- divi/exp/scipy/_cobyla.py +0 -342
- divi/exp/scipy/pyprima/LICENCE.txt +0 -28
- divi/exp/scipy/pyprima/__init__.py +0 -263
- divi/exp/scipy/pyprima/cobyla/__init__.py +0 -0
- divi/exp/scipy/pyprima/cobyla/cobyla.py +0 -599
- divi/exp/scipy/pyprima/cobyla/cobylb.py +0 -849
- divi/exp/scipy/pyprima/cobyla/geometry.py +0 -240
- divi/exp/scipy/pyprima/cobyla/initialize.py +0 -269
- divi/exp/scipy/pyprima/cobyla/trustregion.py +0 -540
- divi/exp/scipy/pyprima/cobyla/update.py +0 -331
- divi/exp/scipy/pyprima/common/__init__.py +0 -0
- divi/exp/scipy/pyprima/common/_bounds.py +0 -41
- divi/exp/scipy/pyprima/common/_linear_constraints.py +0 -46
- divi/exp/scipy/pyprima/common/_nonlinear_constraints.py +0 -64
- divi/exp/scipy/pyprima/common/_project.py +0 -224
- divi/exp/scipy/pyprima/common/checkbreak.py +0 -107
- divi/exp/scipy/pyprima/common/consts.py +0 -48
- divi/exp/scipy/pyprima/common/evaluate.py +0 -101
- divi/exp/scipy/pyprima/common/history.py +0 -39
- divi/exp/scipy/pyprima/common/infos.py +0 -30
- divi/exp/scipy/pyprima/common/linalg.py +0 -452
- divi/exp/scipy/pyprima/common/message.py +0 -336
- divi/exp/scipy/pyprima/common/powalg.py +0 -131
- divi/exp/scipy/pyprima/common/preproc.py +0 -393
- divi/exp/scipy/pyprima/common/present.py +0 -5
- divi/exp/scipy/pyprima/common/ratio.py +0 -56
- divi/exp/scipy/pyprima/common/redrho.py +0 -49
- divi/exp/scipy/pyprima/common/selectx.py +0 -346
- divi/interfaces.py +0 -25
- divi/parallel_simulator.py +0 -258
- divi/qlogger.py +0 -119
- divi/qoro_service.py +0 -343
- divi/qprog/_mlae.py +0 -182
- divi/qprog/_qaoa.py +0 -440
- divi/qprog/_vqe.py +0 -275
- divi/qprog/_vqe_sweep.py +0 -144
- divi/utils.py +0 -116
- qoro_divi-0.2.0b1.dist-info/RECORD +0 -58
- /divi/{qem.py → circuits/qem.py} +0 -0
- {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info/licenses}/LICENSE +0 -0
- {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info/licenses}/LICENSES/Apache-2.0.txt +0 -0
|
@@ -0,0 +1,663 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: 2025 Qoro Quantum Ltd <divi@qoroquantum.de>
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import base64
|
|
6
|
+
import gzip
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import time
|
|
10
|
+
from collections.abc import Callable
|
|
11
|
+
from dataclasses import dataclass, fields, replace
|
|
12
|
+
from enum import Enum
|
|
13
|
+
from http import HTTPStatus
|
|
14
|
+
|
|
15
|
+
import requests
|
|
16
|
+
from dotenv import dotenv_values
|
|
17
|
+
from requests.adapters import HTTPAdapter, Retry
|
|
18
|
+
from rich.console import Console
|
|
19
|
+
|
|
20
|
+
from divi.backends import CircuitRunner
|
|
21
|
+
from divi.backends._execution_result import ExecutionResult
|
|
22
|
+
from divi.backends._qpu_system import (
|
|
23
|
+
QPUSystem,
|
|
24
|
+
get_qpu_system,
|
|
25
|
+
parse_qpu_systems,
|
|
26
|
+
update_qpu_systems_cache,
|
|
27
|
+
)
|
|
28
|
+
from divi.backends._results_processing import _decode_qh1_b64
|
|
29
|
+
from divi.circuits import is_valid_qasm, validate_qasm
|
|
30
|
+
|
|
31
|
+
API_URL = "https://app.qoroquantum.net/api"
|
|
32
|
+
_MAX_PAYLOAD_SIZE_MB = 0.95
|
|
33
|
+
|
|
34
|
+
session = requests.Session()
|
|
35
|
+
retry_configuration = Retry(
|
|
36
|
+
total=5,
|
|
37
|
+
backoff_factor=0.1,
|
|
38
|
+
status_forcelist=[502],
|
|
39
|
+
allowed_methods=["GET", "POST", "DELETE"],
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
session.mount("http://", HTTPAdapter(max_retries=retry_configuration))
|
|
43
|
+
session.mount("https://", HTTPAdapter(max_retries=retry_configuration))
|
|
44
|
+
|
|
45
|
+
logger = logging.getLogger(__name__)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _raise_with_details(resp: requests.Response):
|
|
49
|
+
try:
|
|
50
|
+
data = resp.json()
|
|
51
|
+
body = json.dumps(data, ensure_ascii=False)
|
|
52
|
+
except ValueError:
|
|
53
|
+
body = resp.text
|
|
54
|
+
msg = f"{resp.status_code} {resp.reason}: {body}"
|
|
55
|
+
raise requests.HTTPError(msg, response=resp)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class JobStatus(Enum):
|
|
59
|
+
"""Status of a job on the Qoro Service."""
|
|
60
|
+
|
|
61
|
+
PENDING = "PENDING"
|
|
62
|
+
"""Job is queued and waiting to be processed."""
|
|
63
|
+
|
|
64
|
+
RUNNING = "RUNNING"
|
|
65
|
+
"""Job is currently being executed."""
|
|
66
|
+
|
|
67
|
+
COMPLETED = "COMPLETED"
|
|
68
|
+
"""Job has finished successfully."""
|
|
69
|
+
|
|
70
|
+
FAILED = "FAILED"
|
|
71
|
+
"""Job execution encountered an error."""
|
|
72
|
+
|
|
73
|
+
CANCELLED = "CANCELLED"
|
|
74
|
+
"""Job was cancelled before completion."""
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class JobType(Enum):
|
|
78
|
+
"""Type of job to execute on the Qoro Service."""
|
|
79
|
+
|
|
80
|
+
EXECUTE = "EXECUTE"
|
|
81
|
+
"""Execute circuits on real quantum hardware (sampling mode only)."""
|
|
82
|
+
|
|
83
|
+
SIMULATE = "SIMULATE"
|
|
84
|
+
"""Simulate circuits using cloud-based simulation services (sampling mode)."""
|
|
85
|
+
|
|
86
|
+
EXPECTATION = "EXPECTATION"
|
|
87
|
+
"""Compute expectation values for Hamiltonian operators (simulation only)."""
|
|
88
|
+
|
|
89
|
+
CIRCUIT_CUT = "CIRCUIT_CUT"
|
|
90
|
+
"""Automatically decompose large circuits that wouldn't fit on a QPU."""
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
@dataclass(frozen=True)
|
|
94
|
+
class JobConfig:
|
|
95
|
+
"""Configuration for a Qoro Service job."""
|
|
96
|
+
|
|
97
|
+
shots: int | None = None
|
|
98
|
+
"""Number of shots for the job."""
|
|
99
|
+
|
|
100
|
+
qpu_system: QPUSystem | str | None = None
|
|
101
|
+
"""The QPU system to use, can be a string or a QPUSystem object."""
|
|
102
|
+
|
|
103
|
+
use_circuit_packing: bool | None = None
|
|
104
|
+
"""Whether to use circuit packing optimization."""
|
|
105
|
+
|
|
106
|
+
tag: str = "default"
|
|
107
|
+
"""Tag to associate with the job for identification."""
|
|
108
|
+
|
|
109
|
+
force_sampling: bool = False
|
|
110
|
+
"""Whether to force sampling instead of expectation value measurements."""
|
|
111
|
+
|
|
112
|
+
def override(self, other: "JobConfig") -> "JobConfig":
|
|
113
|
+
"""Creates a new config by overriding attributes with non-None values.
|
|
114
|
+
|
|
115
|
+
This method ensures immutability by always returning a new `JobConfig` object
|
|
116
|
+
and leaving the original instance unmodified.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
other: Another JobConfig instance to take values from. Only non-None
|
|
120
|
+
attributes from this instance will be used for the override.
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
A new JobConfig instance with the merged configurations.
|
|
124
|
+
"""
|
|
125
|
+
current_attrs = {f.name: getattr(self, f.name) for f in fields(self)}
|
|
126
|
+
|
|
127
|
+
for f in fields(other):
|
|
128
|
+
other_value = getattr(other, f.name)
|
|
129
|
+
if other_value is not None:
|
|
130
|
+
current_attrs[f.name] = other_value
|
|
131
|
+
|
|
132
|
+
return JobConfig(**current_attrs)
|
|
133
|
+
|
|
134
|
+
def __post_init__(self):
|
|
135
|
+
"""Sanitizes and validates the configuration."""
|
|
136
|
+
if self.shots is not None and self.shots <= 0:
|
|
137
|
+
raise ValueError(f"Shots must be a positive integer. Got {self.shots}.")
|
|
138
|
+
|
|
139
|
+
if isinstance(self.qpu_system, str):
|
|
140
|
+
# Defer resolution - will be resolved in QoroService.__init__() after fetch_qpu_systems()
|
|
141
|
+
# This allows JobConfig to be created before QoroService exists
|
|
142
|
+
pass
|
|
143
|
+
elif self.qpu_system is not None and not isinstance(self.qpu_system, QPUSystem):
|
|
144
|
+
raise TypeError(
|
|
145
|
+
f"Expected a QPUSystem instance or str, got {type(self.qpu_system)}"
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
if self.use_circuit_packing is not None and not isinstance(
|
|
149
|
+
self.use_circuit_packing, bool
|
|
150
|
+
):
|
|
151
|
+
raise TypeError(f"Expected a bool, got {type(self.use_circuit_packing)}")
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
class MaxRetriesReachedError(Exception):
|
|
155
|
+
"""Exception raised when the maximum number of retries is reached."""
|
|
156
|
+
|
|
157
|
+
def __init__(self, job_id, retries):
|
|
158
|
+
self.job_id = job_id
|
|
159
|
+
self.retries = retries
|
|
160
|
+
self.message = (
|
|
161
|
+
f"Maximum retries reached: {retries} retries attempted for job {job_id}"
|
|
162
|
+
)
|
|
163
|
+
super().__init__(self.message)
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
_DEFAULT_QPU_SYSTEM = QPUSystem(name="qoro_maestro", supports_expval=True)
|
|
167
|
+
|
|
168
|
+
_DEFAULT_JOB_CONFIG = JobConfig(
|
|
169
|
+
shots=1000, qpu_system=_DEFAULT_QPU_SYSTEM, use_circuit_packing=False
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
class QoroService(CircuitRunner):
|
|
174
|
+
"""A client for interacting with the Qoro Quantum Service API.
|
|
175
|
+
|
|
176
|
+
This class provides methods to submit circuits, check job status,
|
|
177
|
+
and retrieve results from the Qoro platform.
|
|
178
|
+
"""
|
|
179
|
+
|
|
180
|
+
def __init__(
|
|
181
|
+
self,
|
|
182
|
+
auth_token: str | None = None,
|
|
183
|
+
config: JobConfig | None = None,
|
|
184
|
+
polling_interval: float = 3.0,
|
|
185
|
+
max_retries: int = 5000,
|
|
186
|
+
):
|
|
187
|
+
"""Initializes the QoroService client.
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
auth_token (str | None, optional):
|
|
191
|
+
The authentication token for the Qoro API. If not provided,
|
|
192
|
+
it will be read from the QORO_API_KEY in a .env file.
|
|
193
|
+
config (JobConfig | None, optional):
|
|
194
|
+
A JobConfig object containing default job settings. If not
|
|
195
|
+
provided, a default configuration will be created.
|
|
196
|
+
polling_interval (float, optional):
|
|
197
|
+
The interval in seconds for polling job status. Defaults to 3.0.
|
|
198
|
+
max_retries (int, optional):
|
|
199
|
+
The maximum number of retries for polling. Defaults to 5000.
|
|
200
|
+
"""
|
|
201
|
+
|
|
202
|
+
# Set up auth_token first (needed for API calls like fetch_qpu_systems)
|
|
203
|
+
if auth_token is None:
|
|
204
|
+
try:
|
|
205
|
+
auth_token = dotenv_values()["QORO_API_KEY"]
|
|
206
|
+
except KeyError:
|
|
207
|
+
raise ValueError("Qoro API key not provided nor found in a .env file.")
|
|
208
|
+
|
|
209
|
+
self.auth_token = "Bearer " + auth_token
|
|
210
|
+
self.polling_interval = polling_interval
|
|
211
|
+
self.max_retries = max_retries
|
|
212
|
+
|
|
213
|
+
# Fetch QPU systems (needs auth_token to be set)
|
|
214
|
+
self.fetch_qpu_systems()
|
|
215
|
+
|
|
216
|
+
# Set up config
|
|
217
|
+
if config is None:
|
|
218
|
+
config = _DEFAULT_JOB_CONFIG
|
|
219
|
+
|
|
220
|
+
# Resolve string qpu_system names and validate that one is present.
|
|
221
|
+
self.config = self._resolve_and_validate_qpu_system(config)
|
|
222
|
+
|
|
223
|
+
super().__init__(shots=self.config.shots)
|
|
224
|
+
|
|
225
|
+
@property
|
|
226
|
+
def supports_expval(self) -> bool:
|
|
227
|
+
"""
|
|
228
|
+
Whether the backend supports expectation value measurements.
|
|
229
|
+
"""
|
|
230
|
+
return self.config.qpu_system.supports_expval and not self.config.force_sampling
|
|
231
|
+
|
|
232
|
+
@property
|
|
233
|
+
def is_async(self) -> bool:
|
|
234
|
+
"""
|
|
235
|
+
Whether the backend executes circuits asynchronously.
|
|
236
|
+
"""
|
|
237
|
+
return True
|
|
238
|
+
|
|
239
|
+
def _resolve_and_validate_qpu_system(self, config: JobConfig) -> JobConfig:
|
|
240
|
+
"""Ensures the config has a valid QPUSystem object, resolving from string if needed."""
|
|
241
|
+
if config.qpu_system is None:
|
|
242
|
+
raise ValueError(
|
|
243
|
+
"JobConfig must have a qpu_system. It cannot be None. "
|
|
244
|
+
"Please provide a QPUSystem object or a valid system name string."
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
if isinstance(config.qpu_system, str):
|
|
248
|
+
resolved_qpu = get_qpu_system(config.qpu_system)
|
|
249
|
+
return replace(config, qpu_system=resolved_qpu)
|
|
250
|
+
|
|
251
|
+
return config
|
|
252
|
+
|
|
253
|
+
def _make_request(self, method: str, endpoint: str, **kwargs) -> requests.Response:
|
|
254
|
+
"""
|
|
255
|
+
Make an authenticated HTTP request to the Qoro API.
|
|
256
|
+
|
|
257
|
+
This internal method centralizes all API communication, handling authentication
|
|
258
|
+
headers and error responses consistently.
|
|
259
|
+
|
|
260
|
+
Args:
|
|
261
|
+
method (str): HTTP method to use (e.g., 'get', 'post', 'delete').
|
|
262
|
+
endpoint (str): API endpoint path (without base URL).
|
|
263
|
+
**kwargs: Additional arguments to pass to requests.request(), such as
|
|
264
|
+
'json', 'timeout', 'params', etc.
|
|
265
|
+
|
|
266
|
+
Returns:
|
|
267
|
+
requests.Response: The HTTP response object from the API.
|
|
268
|
+
|
|
269
|
+
Raises:
|
|
270
|
+
requests.exceptions.HTTPError: If the response status code is 400 or above.
|
|
271
|
+
"""
|
|
272
|
+
url = f"{API_URL}/{endpoint}"
|
|
273
|
+
|
|
274
|
+
headers = {"Authorization": self.auth_token}
|
|
275
|
+
|
|
276
|
+
if method.upper() in ["POST", "PUT", "PATCH"]:
|
|
277
|
+
headers["Content-Type"] = "application/json"
|
|
278
|
+
|
|
279
|
+
# Allow overriding default headers
|
|
280
|
+
if "headers" in kwargs:
|
|
281
|
+
headers.update(kwargs.pop("headers"))
|
|
282
|
+
|
|
283
|
+
response = session.request(method, url, headers=headers, **kwargs)
|
|
284
|
+
|
|
285
|
+
# Raise with comprehensive error details if request failed
|
|
286
|
+
if response.status_code >= 400:
|
|
287
|
+
_raise_with_details(response)
|
|
288
|
+
|
|
289
|
+
return response
|
|
290
|
+
|
|
291
|
+
def _extract_job_id(self, execution_result: ExecutionResult) -> str:
|
|
292
|
+
job_id = execution_result.job_id
|
|
293
|
+
if job_id is None:
|
|
294
|
+
raise ValueError(
|
|
295
|
+
"ExecutionResult must have a job_id. "
|
|
296
|
+
"This ExecutionResult appears to be from a synchronous backend."
|
|
297
|
+
)
|
|
298
|
+
return job_id
|
|
299
|
+
|
|
300
|
+
def test_connection(self):
|
|
301
|
+
"""
|
|
302
|
+
Test the connection to the Qoro API.
|
|
303
|
+
|
|
304
|
+
Sends a simple GET request to verify that the API is reachable and
|
|
305
|
+
the authentication token is valid.
|
|
306
|
+
|
|
307
|
+
Returns:
|
|
308
|
+
requests.Response: The response from the API ping endpoint.
|
|
309
|
+
|
|
310
|
+
Raises:
|
|
311
|
+
requests.exceptions.HTTPError: If the connection fails or authentication
|
|
312
|
+
is invalid.
|
|
313
|
+
"""
|
|
314
|
+
return self._make_request("get", "", timeout=10)
|
|
315
|
+
|
|
316
|
+
def fetch_qpu_systems(self) -> list[QPUSystem]:
|
|
317
|
+
"""
|
|
318
|
+
Get the list of available QPU systems from the Qoro API.
|
|
319
|
+
|
|
320
|
+
Returns:
|
|
321
|
+
List of QPUSystem objects.
|
|
322
|
+
"""
|
|
323
|
+
response = self._make_request("get", "qpusystem/", timeout=10)
|
|
324
|
+
systems = parse_qpu_systems(response.json())
|
|
325
|
+
update_qpu_systems_cache(systems)
|
|
326
|
+
return systems
|
|
327
|
+
|
|
328
|
+
@staticmethod
|
|
329
|
+
def _compress_data(value) -> bytes:
|
|
330
|
+
return base64.b64encode(gzip.compress(value.encode("utf-8"))).decode("utf-8")
|
|
331
|
+
|
|
332
|
+
def _split_circuits(self, circuits: dict[str, str]) -> list[dict[str, str]]:
|
|
333
|
+
"""
|
|
334
|
+
Splits circuits into chunks by estimating payload size with a simplified,
|
|
335
|
+
consistent overhead calculation.
|
|
336
|
+
Assumes that BASE64 encoding produces ASCI characters, which are 1 byte each.
|
|
337
|
+
"""
|
|
338
|
+
max_payload_bytes = _MAX_PAYLOAD_SIZE_MB * 1024 * 1024
|
|
339
|
+
circuit_chunks = []
|
|
340
|
+
current_chunk = {}
|
|
341
|
+
|
|
342
|
+
# Start with size 2 for the opening and closing curly braces '{}'
|
|
343
|
+
current_chunk_size_bytes = 2
|
|
344
|
+
|
|
345
|
+
for key, value in circuits.items():
|
|
346
|
+
compressed_value = self._compress_data(value)
|
|
347
|
+
|
|
348
|
+
item_size_bytes = len(key) + len(compressed_value) + 6
|
|
349
|
+
|
|
350
|
+
# If adding this item would exceed the limit, finalize the current chunk.
|
|
351
|
+
# This check only runs if the chunk is not empty.
|
|
352
|
+
if current_chunk and (
|
|
353
|
+
current_chunk_size_bytes + item_size_bytes > max_payload_bytes
|
|
354
|
+
):
|
|
355
|
+
circuit_chunks.append(current_chunk)
|
|
356
|
+
|
|
357
|
+
# Start a new chunk
|
|
358
|
+
current_chunk = {}
|
|
359
|
+
current_chunk_size_bytes = 2
|
|
360
|
+
|
|
361
|
+
# Add the new item to the current chunk and update its size
|
|
362
|
+
current_chunk[key] = compressed_value
|
|
363
|
+
current_chunk_size_bytes += item_size_bytes
|
|
364
|
+
|
|
365
|
+
# Add the last remaining chunk if it's not empty
|
|
366
|
+
if current_chunk:
|
|
367
|
+
circuit_chunks.append(current_chunk)
|
|
368
|
+
|
|
369
|
+
return circuit_chunks
|
|
370
|
+
|
|
371
|
+
def submit_circuits(
|
|
372
|
+
self,
|
|
373
|
+
circuits: dict[str, str],
|
|
374
|
+
ham_ops: str | None = None,
|
|
375
|
+
job_type: JobType | None = None,
|
|
376
|
+
override_config: JobConfig | None = None,
|
|
377
|
+
) -> ExecutionResult:
|
|
378
|
+
"""
|
|
379
|
+
Submit quantum circuits to the Qoro API for execution.
|
|
380
|
+
|
|
381
|
+
This method first initializes a job and then sends the circuits in
|
|
382
|
+
one or more chunks, associating them all with a single job ID.
|
|
383
|
+
|
|
384
|
+
Args:
|
|
385
|
+
circuits (dict[str, str]):
|
|
386
|
+
Dictionary mapping unique circuit IDs to QASM circuit strings.
|
|
387
|
+
ham_ops (str | None, optional):
|
|
388
|
+
String representing the Hamiltonian operators to measure, semicolon-separated.
|
|
389
|
+
Each term is a combination of Pauli operators, e.g. "XYZ;XXZ;ZIZ".
|
|
390
|
+
If None, no Hamiltonian operators will be measured.
|
|
391
|
+
job_type (JobType | None, optional):
|
|
392
|
+
Type of job to execute (e.g., SIMULATE, EXECUTE, EXPECTATION, CIRCUIT_CUT).
|
|
393
|
+
If not provided, the job type will be determined from the service configuration.
|
|
394
|
+
override_config (JobConfig | None, optional):
|
|
395
|
+
Configuration object to override the service's default settings.
|
|
396
|
+
If not provided, default values are used.
|
|
397
|
+
|
|
398
|
+
Raises:
|
|
399
|
+
ValueError: If more than one circuit is submitted for a CIRCUIT_CUT job,
|
|
400
|
+
or if any circuit is not valid QASM.
|
|
401
|
+
requests.exceptions.HTTPError: If any API request fails.
|
|
402
|
+
|
|
403
|
+
Returns:
|
|
404
|
+
ExecutionResult: Contains job_id for asynchronous execution. Use the job_id
|
|
405
|
+
to poll for results using backend.poll_job_status() and get_job_results().
|
|
406
|
+
"""
|
|
407
|
+
# Create final job configuration by layering configurations:
|
|
408
|
+
# service defaults -> user overrides
|
|
409
|
+
if override_config:
|
|
410
|
+
config = self.config.override(override_config)
|
|
411
|
+
job_config = self._resolve_and_validate_qpu_system(config)
|
|
412
|
+
else:
|
|
413
|
+
job_config = self.config
|
|
414
|
+
|
|
415
|
+
# Handle Hamiltonian operators: validate compatibility and auto-infer job type
|
|
416
|
+
if ham_ops is not None:
|
|
417
|
+
# Validate that if job_type is explicitly set, it must be EXPECTATION
|
|
418
|
+
if job_type is not None and job_type != JobType.EXPECTATION:
|
|
419
|
+
raise ValueError(
|
|
420
|
+
"Hamiltonian operators are only supported for EXPECTATION job type."
|
|
421
|
+
)
|
|
422
|
+
# Auto-infer job type if not explicitly set
|
|
423
|
+
if job_type is None:
|
|
424
|
+
job_type = JobType.EXPECTATION
|
|
425
|
+
|
|
426
|
+
# Validate observables format
|
|
427
|
+
|
|
428
|
+
terms = ham_ops.split(";")
|
|
429
|
+
if len(terms) == 0:
|
|
430
|
+
raise ValueError(
|
|
431
|
+
"Hamiltonian operators must be non-empty semicolon-separated strings."
|
|
432
|
+
)
|
|
433
|
+
ham_ops_length = len(terms[0])
|
|
434
|
+
if not all(len(term) == ham_ops_length for term in terms):
|
|
435
|
+
raise ValueError("All Hamiltonian operators must have the same length.")
|
|
436
|
+
# Validate that each term only contains I, X, Y, Z
|
|
437
|
+
valid_paulis = {"I", "X", "Y", "Z"}
|
|
438
|
+
if not all(all(c in valid_paulis for c in term) for term in terms):
|
|
439
|
+
raise ValueError(
|
|
440
|
+
"Hamiltonian operators must contain only I, X, Y, Z characters."
|
|
441
|
+
)
|
|
442
|
+
|
|
443
|
+
if job_type is None:
|
|
444
|
+
job_type = JobType.SIMULATE
|
|
445
|
+
|
|
446
|
+
# Validate circuits
|
|
447
|
+
if job_type == JobType.CIRCUIT_CUT and len(circuits) > 1:
|
|
448
|
+
raise ValueError("Only one circuit allowed for circuit-cutting jobs.")
|
|
449
|
+
|
|
450
|
+
for key, circuit in circuits.items():
|
|
451
|
+
if not is_valid_qasm(circuit):
|
|
452
|
+
# Get the actual error message for better error reporting
|
|
453
|
+
try:
|
|
454
|
+
validate_qasm(circuit)
|
|
455
|
+
except SyntaxError as e:
|
|
456
|
+
raise ValueError(f"Circuit '{key}' is not a valid QASM: {e}") from e
|
|
457
|
+
|
|
458
|
+
# Initialize the job without circuits to get a job_id
|
|
459
|
+
init_payload = {
|
|
460
|
+
"tag": job_config.tag,
|
|
461
|
+
"job_type": job_type.value,
|
|
462
|
+
"qpu_system_name": (
|
|
463
|
+
job_config.qpu_system.name if job_config.qpu_system else None
|
|
464
|
+
),
|
|
465
|
+
"use_packing": job_config.use_circuit_packing or False,
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
init_response = self._make_request(
|
|
469
|
+
"post", "job/init/", json=init_payload, timeout=100
|
|
470
|
+
)
|
|
471
|
+
if init_response.status_code not in [HTTPStatus.OK, HTTPStatus.CREATED]:
|
|
472
|
+
_raise_with_details(init_response)
|
|
473
|
+
job_id = init_response.json()["job_id"]
|
|
474
|
+
|
|
475
|
+
# Split circuits and add them to the created job
|
|
476
|
+
circuit_chunks = self._split_circuits(circuits)
|
|
477
|
+
num_chunks = len(circuit_chunks)
|
|
478
|
+
|
|
479
|
+
for i, chunk in enumerate(circuit_chunks):
|
|
480
|
+
is_last_chunk = i == num_chunks - 1
|
|
481
|
+
add_circuits_payload = {
|
|
482
|
+
"circuits": chunk,
|
|
483
|
+
"mode": "append",
|
|
484
|
+
"finalized": "true" if is_last_chunk else "false",
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
# Include shots/ham_ops in add_circuits payload
|
|
488
|
+
if ham_ops is not None:
|
|
489
|
+
add_circuits_payload["observables"] = ham_ops
|
|
490
|
+
else:
|
|
491
|
+
add_circuits_payload["shots"] = job_config.shots
|
|
492
|
+
|
|
493
|
+
add_circuits_response = self._make_request(
|
|
494
|
+
"post",
|
|
495
|
+
f"job/{job_id}/add_circuits/",
|
|
496
|
+
json=add_circuits_payload,
|
|
497
|
+
timeout=100,
|
|
498
|
+
)
|
|
499
|
+
if add_circuits_response.status_code != HTTPStatus.OK:
|
|
500
|
+
_raise_with_details(add_circuits_response)
|
|
501
|
+
|
|
502
|
+
return ExecutionResult(results=None, job_id=job_id)
|
|
503
|
+
|
|
504
|
+
def delete_job(self, execution_result: ExecutionResult) -> requests.Response:
|
|
505
|
+
"""
|
|
506
|
+
Delete a job from the Qoro Database.
|
|
507
|
+
|
|
508
|
+
Args:
|
|
509
|
+
execution_result: An ExecutionResult instance with a job_id to delete.
|
|
510
|
+
Returns:
|
|
511
|
+
requests.Response: The response from the API.
|
|
512
|
+
Raises:
|
|
513
|
+
ValueError: If the ExecutionResult does not have a job_id.
|
|
514
|
+
"""
|
|
515
|
+
job_id = self._extract_job_id(execution_result)
|
|
516
|
+
return self._make_request(
|
|
517
|
+
"delete",
|
|
518
|
+
f"job/{job_id}",
|
|
519
|
+
timeout=50,
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
def cancel_job(self, execution_result: ExecutionResult) -> requests.Response:
|
|
523
|
+
"""
|
|
524
|
+
Cancel a job on the Qoro Service.
|
|
525
|
+
|
|
526
|
+
Args:
|
|
527
|
+
execution_result: An ExecutionResult instance with a job_id to cancel.
|
|
528
|
+
Returns:
|
|
529
|
+
requests.Response: The response from the API. Use response.json() to get
|
|
530
|
+
the cancellation details (status, job_id, circuits_cancelled).
|
|
531
|
+
Raises:
|
|
532
|
+
ValueError: If the ExecutionResult does not have a job_id.
|
|
533
|
+
requests.exceptions.HTTPError: If the cancellation fails (e.g., 403 Forbidden,
|
|
534
|
+
or 409 Conflict if job is not in a cancellable state).
|
|
535
|
+
"""
|
|
536
|
+
job_id = self._extract_job_id(execution_result)
|
|
537
|
+
return self._make_request(
|
|
538
|
+
"post",
|
|
539
|
+
f"job/{job_id}/cancel/",
|
|
540
|
+
timeout=50,
|
|
541
|
+
)
|
|
542
|
+
|
|
543
|
+
def get_job_results(self, execution_result: ExecutionResult) -> ExecutionResult:
|
|
544
|
+
"""
|
|
545
|
+
Get the results of a job from the Qoro Database.
|
|
546
|
+
|
|
547
|
+
Args:
|
|
548
|
+
execution_result: An ExecutionResult instance with a job_id to fetch results for.
|
|
549
|
+
|
|
550
|
+
Returns:
|
|
551
|
+
ExecutionResult: A new ExecutionResult instance with results populated.
|
|
552
|
+
|
|
553
|
+
Raises:
|
|
554
|
+
ValueError: If the ExecutionResult does not have a job_id.
|
|
555
|
+
requests.exceptions.HTTPError: If the job results are not available
|
|
556
|
+
(e.g., job is still running) or if the request fails.
|
|
557
|
+
"""
|
|
558
|
+
job_id = self._extract_job_id(execution_result)
|
|
559
|
+
|
|
560
|
+
try:
|
|
561
|
+
response = self._make_request(
|
|
562
|
+
"get",
|
|
563
|
+
f"job/{job_id}/resultsV2/?limit=100&offset=0",
|
|
564
|
+
timeout=100,
|
|
565
|
+
)
|
|
566
|
+
except requests.exceptions.HTTPError as e:
|
|
567
|
+
# Provide a more specific error message for 400 Bad Request
|
|
568
|
+
if e.response.status_code == HTTPStatus.BAD_REQUEST:
|
|
569
|
+
raise requests.exceptions.HTTPError(
|
|
570
|
+
"400 Bad Request: Job results not available, likely job is still running"
|
|
571
|
+
) from e
|
|
572
|
+
# Re-raise any other HTTP error
|
|
573
|
+
raise e
|
|
574
|
+
|
|
575
|
+
# If the request was successful, process the data
|
|
576
|
+
data = response.json()
|
|
577
|
+
|
|
578
|
+
for result in data["results"]:
|
|
579
|
+
result["results"] = _decode_qh1_b64(result["results"])
|
|
580
|
+
|
|
581
|
+
# Return a new ExecutionResult with results populated
|
|
582
|
+
return execution_result.with_results(data["results"])
|
|
583
|
+
|
|
584
|
+
def poll_job_status(
|
|
585
|
+
self,
|
|
586
|
+
execution_result: ExecutionResult,
|
|
587
|
+
loop_until_complete: bool = False,
|
|
588
|
+
on_complete: Callable[[requests.Response], None] | None = None,
|
|
589
|
+
verbose: bool = True,
|
|
590
|
+
progress_callback: Callable[[int, str], None] | None = None,
|
|
591
|
+
) -> JobStatus:
|
|
592
|
+
"""
|
|
593
|
+
Get the status of a job and optionally execute a function on completion.
|
|
594
|
+
|
|
595
|
+
Args:
|
|
596
|
+
execution_result: An ExecutionResult instance with a job_id to check.
|
|
597
|
+
loop_until_complete (bool): If True, polls until the job is complete or failed.
|
|
598
|
+
on_complete (Callable, optional): A function to call with the final response
|
|
599
|
+
object when the job finishes.
|
|
600
|
+
verbose (bool, optional): If True, prints polling status to the logger.
|
|
601
|
+
progress_callback (Callable, optional): A function for updating progress bars.
|
|
602
|
+
Takes `(retry_count, status)`.
|
|
603
|
+
|
|
604
|
+
Returns:
|
|
605
|
+
JobStatus: The current job status.
|
|
606
|
+
|
|
607
|
+
Raises:
|
|
608
|
+
ValueError: If the ExecutionResult does not have a job_id.
|
|
609
|
+
"""
|
|
610
|
+
job_id = self._extract_job_id(execution_result)
|
|
611
|
+
|
|
612
|
+
polling_status = None
|
|
613
|
+
|
|
614
|
+
# Decide once at the start which update function to use
|
|
615
|
+
if progress_callback:
|
|
616
|
+
update_fn = progress_callback
|
|
617
|
+
elif verbose:
|
|
618
|
+
# Use Rich's status for overwriting polling messages
|
|
619
|
+
polling_status = Console(file=None).status("", spinner="aesthetic")
|
|
620
|
+
polling_status.start()
|
|
621
|
+
|
|
622
|
+
def update_polling_status(retry_count, job_status):
|
|
623
|
+
status_msg = (
|
|
624
|
+
f"Job [cyan]{job_id.split('-')[0]}[/cyan] is {job_status}. "
|
|
625
|
+
f"Polling attempt {retry_count} / {self.max_retries}"
|
|
626
|
+
)
|
|
627
|
+
polling_status.update(status_msg)
|
|
628
|
+
|
|
629
|
+
update_fn = update_polling_status
|
|
630
|
+
else:
|
|
631
|
+
update_fn = lambda _, __: None
|
|
632
|
+
|
|
633
|
+
try:
|
|
634
|
+
if not loop_until_complete:
|
|
635
|
+
response = self._make_request(
|
|
636
|
+
"get", f"job/{job_id}/status/", timeout=200
|
|
637
|
+
)
|
|
638
|
+
return JobStatus(response.json()["status"])
|
|
639
|
+
|
|
640
|
+
terminal_statuses = {
|
|
641
|
+
JobStatus.COMPLETED,
|
|
642
|
+
JobStatus.FAILED,
|
|
643
|
+
JobStatus.CANCELLED,
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
for retry_count in range(1, self.max_retries + 1):
|
|
647
|
+
response = self._make_request(
|
|
648
|
+
"get", f"job/{job_id}/status/", timeout=200
|
|
649
|
+
)
|
|
650
|
+
status = JobStatus(response.json()["status"])
|
|
651
|
+
|
|
652
|
+
if status in terminal_statuses:
|
|
653
|
+
if on_complete:
|
|
654
|
+
on_complete(response)
|
|
655
|
+
return status
|
|
656
|
+
|
|
657
|
+
update_fn(retry_count, status.value)
|
|
658
|
+
time.sleep(self.polling_interval)
|
|
659
|
+
|
|
660
|
+
raise MaxRetriesReachedError(job_id, self.max_retries)
|
|
661
|
+
finally:
|
|
662
|
+
if polling_status:
|
|
663
|
+
polling_status.stop()
|