qoro-divi 0.3.4__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of qoro-divi might be problematic. Click here for more details.
- divi/backends/__init__.py +1 -1
- divi/backends/_circuit_runner.py +42 -0
- divi/backends/_parallel_simulator.py +145 -49
- divi/backends/_qoro_service.py +451 -182
- divi/backends/_qpu_system.py +77 -3
- divi/circuits/_core.py +124 -4
- divi/circuits/qasm.py +20 -3
- divi/extern/cirq/_validator.py +12 -3
- divi/qprog/__init__.py +1 -0
- divi/qprog/algorithms/_ansatze.py +112 -12
- divi/qprog/algorithms/_qaoa.py +179 -110
- divi/qprog/algorithms/_vqe.py +192 -58
- divi/qprog/batch.py +270 -51
- divi/qprog/exceptions.py +9 -0
- divi/qprog/optimizers.py +336 -51
- divi/qprog/quantum_program.py +162 -339
- divi/qprog/variational_quantum_algorithm.py +786 -0
- divi/qprog/workflows/_graph_partitioning.py +43 -38
- divi/qprog/workflows/_qubo_partitioning.py +41 -24
- divi/qprog/workflows/_vqe_sweep.py +67 -39
- divi/reporting/_pbar.py +51 -9
- divi/reporting/_qlogger.py +35 -1
- divi/reporting/_reporter.py +11 -20
- divi/utils.py +100 -4
- {qoro_divi-0.3.4.dist-info → qoro_divi-0.4.0.dist-info}/METADATA +16 -1
- {qoro_divi-0.3.4.dist-info → qoro_divi-0.4.0.dist-info}/RECORD +30 -28
- {qoro_divi-0.3.4.dist-info → qoro_divi-0.4.0.dist-info}/LICENSE +0 -0
- {qoro_divi-0.3.4.dist-info → qoro_divi-0.4.0.dist-info}/LICENSES/.license-header +0 -0
- {qoro_divi-0.3.4.dist-info → qoro_divi-0.4.0.dist-info}/LICENSES/Apache-2.0.txt +0 -0
- {qoro_divi-0.3.4.dist-info → qoro_divi-0.4.0.dist-info}/WHEEL +0 -0
divi/backends/_qoro_service.py
CHANGED
|
@@ -8,6 +8,7 @@ import json
|
|
|
8
8
|
import logging
|
|
9
9
|
import time
|
|
10
10
|
from collections.abc import Callable
|
|
11
|
+
from dataclasses import dataclass, fields, replace
|
|
11
12
|
from enum import Enum
|
|
12
13
|
from http import HTTPStatus
|
|
13
14
|
|
|
@@ -16,11 +17,16 @@ from dotenv import dotenv_values
|
|
|
16
17
|
from requests.adapters import HTTPAdapter, Retry
|
|
17
18
|
|
|
18
19
|
from divi.backends import CircuitRunner
|
|
19
|
-
from divi.backends._qpu_system import
|
|
20
|
+
from divi.backends._qpu_system import (
|
|
21
|
+
QPUSystem,
|
|
22
|
+
get_qpu_system,
|
|
23
|
+
parse_qpu_systems,
|
|
24
|
+
update_qpu_systems_cache,
|
|
25
|
+
)
|
|
20
26
|
from divi.extern.cirq import is_valid_qasm
|
|
21
27
|
|
|
22
28
|
API_URL = "https://app.qoroquantum.net/api"
|
|
23
|
-
|
|
29
|
+
_MAX_PAYLOAD_SIZE_MB = 0.95
|
|
24
30
|
|
|
25
31
|
session = requests.Session()
|
|
26
32
|
retry_configuration = Retry(
|
|
@@ -36,6 +42,106 @@ session.mount("https://", HTTPAdapter(max_retries=retry_configuration))
|
|
|
36
42
|
logger = logging.getLogger(__name__)
|
|
37
43
|
|
|
38
44
|
|
|
45
|
+
def _decode_qh1_b64(encoded: dict) -> dict[str, int]:
|
|
46
|
+
"""
|
|
47
|
+
Decode a {'encoding':'qh1','n_bits':N,'payload':base64} histogram
|
|
48
|
+
into a dict with bitstring keys -> int counts.
|
|
49
|
+
|
|
50
|
+
If `encoded` is None, returns None.
|
|
51
|
+
If `encoded` is an empty dict or has a missing/empty payload, returns `encoded` unchanged.
|
|
52
|
+
Otherwise, decodes the payload and returns a dict mapping bitstrings to counts.
|
|
53
|
+
"""
|
|
54
|
+
if not encoded or not encoded.get("payload"):
|
|
55
|
+
return encoded
|
|
56
|
+
|
|
57
|
+
if encoded.get("encoding") != "qh1":
|
|
58
|
+
raise ValueError(f"Unsupported encoding: {encoded.get('encoding')}")
|
|
59
|
+
|
|
60
|
+
blob = base64.b64decode(encoded["payload"])
|
|
61
|
+
hist_int = _decompress_histogram(blob)
|
|
62
|
+
return {str(k): v for k, v in hist_int.items()}
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _uleb128_decode(data: bytes, pos: int = 0) -> tuple[int, int]:
|
|
66
|
+
x = 0
|
|
67
|
+
shift = 0
|
|
68
|
+
while True:
|
|
69
|
+
if pos >= len(data):
|
|
70
|
+
raise ValueError("truncated varint")
|
|
71
|
+
b = data[pos]
|
|
72
|
+
pos += 1
|
|
73
|
+
x |= (b & 0x7F) << shift
|
|
74
|
+
if (b & 0x80) == 0:
|
|
75
|
+
break
|
|
76
|
+
shift += 7
|
|
77
|
+
return x, pos
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _int_to_bitstr(x: int, n_bits: int) -> str:
|
|
81
|
+
return format(x, f"0{n_bits}b")
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _rle_bool_decode(data: bytes, pos=0) -> tuple[list[bool], int]:
|
|
85
|
+
num_runs, pos = _uleb128_decode(data, pos)
|
|
86
|
+
if num_runs == 0:
|
|
87
|
+
return [], pos
|
|
88
|
+
first_val = data[pos] != 0
|
|
89
|
+
pos += 1
|
|
90
|
+
total, val = [], first_val
|
|
91
|
+
for _ in range(num_runs):
|
|
92
|
+
ln, pos = _uleb128_decode(data, pos)
|
|
93
|
+
total.extend([val] * ln)
|
|
94
|
+
val = not val
|
|
95
|
+
return total, pos
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def _decompress_histogram(buf: bytes) -> dict[str, int]:
|
|
99
|
+
if not buf:
|
|
100
|
+
return {}
|
|
101
|
+
pos = 0
|
|
102
|
+
if buf[pos : pos + 3] != b"QH1":
|
|
103
|
+
raise ValueError("bad magic")
|
|
104
|
+
pos += 3
|
|
105
|
+
n_bits = buf[pos]
|
|
106
|
+
pos += 1
|
|
107
|
+
unique, pos = _uleb128_decode(buf, pos)
|
|
108
|
+
total_shots, pos = _uleb128_decode(buf, pos)
|
|
109
|
+
|
|
110
|
+
num_gaps, pos = _uleb128_decode(buf, pos)
|
|
111
|
+
gaps = []
|
|
112
|
+
for _ in range(num_gaps):
|
|
113
|
+
g, pos = _uleb128_decode(buf, pos)
|
|
114
|
+
gaps.append(g)
|
|
115
|
+
|
|
116
|
+
idxs, acc = [], 0
|
|
117
|
+
for i, g in enumerate(gaps):
|
|
118
|
+
acc = g if i == 0 else acc + g
|
|
119
|
+
idxs.append(acc)
|
|
120
|
+
|
|
121
|
+
rb_len, pos = _uleb128_decode(buf, pos)
|
|
122
|
+
is_one, _ = _rle_bool_decode(buf[pos : pos + rb_len], 0)
|
|
123
|
+
pos += rb_len
|
|
124
|
+
|
|
125
|
+
extras_len, pos = _uleb128_decode(buf, pos)
|
|
126
|
+
extras = []
|
|
127
|
+
for _ in range(extras_len):
|
|
128
|
+
e, pos = _uleb128_decode(buf, pos)
|
|
129
|
+
extras.append(e)
|
|
130
|
+
|
|
131
|
+
counts, it = [], iter(extras)
|
|
132
|
+
for flag in is_one:
|
|
133
|
+
counts.append(1 if flag else next(it) + 2)
|
|
134
|
+
|
|
135
|
+
hist = {_int_to_bitstr(i, n_bits): c for i, c in zip(idxs, counts)}
|
|
136
|
+
|
|
137
|
+
# optional integrity check
|
|
138
|
+
if sum(counts) != total_shots:
|
|
139
|
+
raise ValueError("corrupt stream: shot sum mismatch")
|
|
140
|
+
if len(counts) != unique:
|
|
141
|
+
raise ValueError("corrupt stream: unique mismatch")
|
|
142
|
+
return hist
|
|
143
|
+
|
|
144
|
+
|
|
39
145
|
def _raise_with_details(resp: requests.Response):
|
|
40
146
|
try:
|
|
41
147
|
data = resp.json()
|
|
@@ -47,53 +153,150 @@ def _raise_with_details(resp: requests.Response):
|
|
|
47
153
|
|
|
48
154
|
|
|
49
155
|
class JobStatus(Enum):
|
|
156
|
+
"""Status of a job on the Qoro Service."""
|
|
157
|
+
|
|
50
158
|
PENDING = "PENDING"
|
|
159
|
+
"""Job is queued and waiting to be processed."""
|
|
160
|
+
|
|
51
161
|
RUNNING = "RUNNING"
|
|
162
|
+
"""Job is currently being executed."""
|
|
163
|
+
|
|
52
164
|
COMPLETED = "COMPLETED"
|
|
165
|
+
"""Job has finished successfully."""
|
|
166
|
+
|
|
53
167
|
FAILED = "FAILED"
|
|
168
|
+
"""Job execution encountered an error."""
|
|
169
|
+
|
|
54
170
|
CANCELLED = "CANCELLED"
|
|
171
|
+
"""Job was cancelled before completion."""
|
|
55
172
|
|
|
56
173
|
|
|
57
174
|
class JobType(Enum):
|
|
175
|
+
"""Type of job to execute on the Qoro Service."""
|
|
176
|
+
|
|
58
177
|
EXECUTE = "EXECUTE"
|
|
178
|
+
"""Execute circuits on real quantum hardware (sampling mode only)."""
|
|
179
|
+
|
|
59
180
|
SIMULATE = "SIMULATE"
|
|
60
|
-
|
|
181
|
+
"""Simulate circuits using cloud-based simulation services (sampling mode)."""
|
|
182
|
+
|
|
183
|
+
EXPECTATION = "EXPECTATION"
|
|
184
|
+
"""Compute expectation values for Hamiltonian operators (simulation only)."""
|
|
185
|
+
|
|
61
186
|
CIRCUIT_CUT = "CIRCUIT_CUT"
|
|
187
|
+
"""Automatically decompose large circuits that wouldn't fit on a QPU."""
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
@dataclass(frozen=True)
|
|
191
|
+
class JobConfig:
|
|
192
|
+
"""Configuration for a Qoro Service job."""
|
|
193
|
+
|
|
194
|
+
shots: int | None = None
|
|
195
|
+
"""Number of shots for the job."""
|
|
196
|
+
|
|
197
|
+
qpu_system: QPUSystem | str | None = None
|
|
198
|
+
"""The QPU system to use, can be a string or a QPUSystem object."""
|
|
199
|
+
|
|
200
|
+
use_circuit_packing: bool | None = None
|
|
201
|
+
"""Whether to use circuit packing optimization."""
|
|
202
|
+
|
|
203
|
+
tag: str = "default"
|
|
204
|
+
"""Tag to associate with the job for identification."""
|
|
205
|
+
|
|
206
|
+
force_sampling: bool = False
|
|
207
|
+
"""Whether to force sampling instead of expectation value measurements."""
|
|
208
|
+
|
|
209
|
+
def override(self, other: "JobConfig") -> "JobConfig":
|
|
210
|
+
"""Creates a new config by overriding attributes with non-None values.
|
|
211
|
+
|
|
212
|
+
This method ensures immutability by always returning a new `JobConfig` object
|
|
213
|
+
and leaving the original instance unmodified.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
other: Another JobConfig instance to take values from. Only non-None
|
|
217
|
+
attributes from this instance will be used for the override.
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
A new JobConfig instance with the merged configurations.
|
|
221
|
+
"""
|
|
222
|
+
current_attrs = {f.name: getattr(self, f.name) for f in fields(self)}
|
|
223
|
+
|
|
224
|
+
for f in fields(other):
|
|
225
|
+
other_value = getattr(other, f.name)
|
|
226
|
+
if other_value is not None:
|
|
227
|
+
current_attrs[f.name] = other_value
|
|
228
|
+
|
|
229
|
+
return JobConfig(**current_attrs)
|
|
230
|
+
|
|
231
|
+
def __post_init__(self):
|
|
232
|
+
"""Sanitizes and validates the configuration."""
|
|
233
|
+
if self.shots is not None and self.shots <= 0:
|
|
234
|
+
raise ValueError(f"Shots must be a positive integer. Got {self.shots}.")
|
|
235
|
+
|
|
236
|
+
if isinstance(self.qpu_system, str):
|
|
237
|
+
# Defer resolution - will be resolved in QoroService.__init__() after fetch_qpu_systems()
|
|
238
|
+
# This allows JobConfig to be created before QoroService exists
|
|
239
|
+
pass
|
|
240
|
+
elif self.qpu_system is not None and not isinstance(self.qpu_system, QPUSystem):
|
|
241
|
+
raise TypeError(
|
|
242
|
+
f"Expected a QPUSystem instance or str, got {type(self.qpu_system)}"
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
if self.use_circuit_packing is not None and not isinstance(
|
|
246
|
+
self.use_circuit_packing, bool
|
|
247
|
+
):
|
|
248
|
+
raise TypeError(f"Expected a bool, got {type(self.use_circuit_packing)}")
|
|
62
249
|
|
|
63
250
|
|
|
64
251
|
class MaxRetriesReachedError(Exception):
|
|
65
252
|
"""Exception raised when the maximum number of retries is reached."""
|
|
66
253
|
|
|
67
|
-
def __init__(self, retries):
|
|
254
|
+
def __init__(self, job_id, retries):
|
|
255
|
+
self.job_id = job_id
|
|
68
256
|
self.retries = retries
|
|
69
|
-
self.message =
|
|
257
|
+
self.message = (
|
|
258
|
+
f"Maximum retries reached: {retries} retries attempted for job {job_id}"
|
|
259
|
+
)
|
|
70
260
|
super().__init__(self.message)
|
|
71
261
|
|
|
72
262
|
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
access_level=system_data["access_level"],
|
|
79
|
-
)
|
|
80
|
-
for system_data in json_data
|
|
81
|
-
]
|
|
263
|
+
_DEFAULT_QPU_SYSTEM = QPUSystem(name="qoro_maestro", supports_expval=True)
|
|
264
|
+
|
|
265
|
+
_DEFAULT_JOB_CONFIG = JobConfig(
|
|
266
|
+
shots=1000, qpu_system=_DEFAULT_QPU_SYSTEM, use_circuit_packing=False
|
|
267
|
+
)
|
|
82
268
|
|
|
83
269
|
|
|
84
270
|
class QoroService(CircuitRunner):
|
|
271
|
+
"""A client for interacting with the Qoro Quantum Service API.
|
|
272
|
+
|
|
273
|
+
This class provides methods to submit circuits, check job status,
|
|
274
|
+
and retrieve results from the Qoro platform.
|
|
275
|
+
"""
|
|
85
276
|
|
|
86
277
|
def __init__(
|
|
87
278
|
self,
|
|
88
279
|
auth_token: str | None = None,
|
|
280
|
+
config: JobConfig | None = None,
|
|
89
281
|
polling_interval: float = 3.0,
|
|
90
282
|
max_retries: int = 5000,
|
|
91
|
-
shots: int = 1000,
|
|
92
|
-
qpu_system_name: str | QPUSystem | None = None,
|
|
93
|
-
use_circuit_packing: bool = False,
|
|
94
283
|
):
|
|
95
|
-
|
|
284
|
+
"""Initializes the QoroService client.
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
auth_token (str | None, optional):
|
|
288
|
+
The authentication token for the Qoro API. If not provided,
|
|
289
|
+
it will be read from the QORO_API_KEY in a .env file.
|
|
290
|
+
config (JobConfig | None, optional):
|
|
291
|
+
A JobConfig object containing default job settings. If not
|
|
292
|
+
provided, a default configuration will be created.
|
|
293
|
+
polling_interval (float, optional):
|
|
294
|
+
The interval in seconds for polling job status. Defaults to 3.0.
|
|
295
|
+
max_retries (int, optional):
|
|
296
|
+
The maximum number of retries for polling. Defaults to 5000.
|
|
297
|
+
"""
|
|
96
298
|
|
|
299
|
+
# Set up auth_token first (needed for API calls like fetch_qpu_systems)
|
|
97
300
|
if auth_token is None:
|
|
98
301
|
try:
|
|
99
302
|
auth_token = dotenv_values()["QORO_API_KEY"]
|
|
@@ -103,32 +306,66 @@ class QoroService(CircuitRunner):
|
|
|
103
306
|
self.auth_token = "Bearer " + auth_token
|
|
104
307
|
self.polling_interval = polling_interval
|
|
105
308
|
self.max_retries = max_retries
|
|
106
|
-
self._qpu_system_name = qpu_system_name
|
|
107
|
-
self.use_circuit_packing = use_circuit_packing
|
|
108
309
|
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
return self._qpu_system_name
|
|
310
|
+
# Fetch QPU systems (needs auth_token to be set)
|
|
311
|
+
self.fetch_qpu_systems()
|
|
112
312
|
|
|
113
|
-
|
|
114
|
-
|
|
313
|
+
# Set up config
|
|
314
|
+
if config is None:
|
|
315
|
+
config = _DEFAULT_JOB_CONFIG
|
|
316
|
+
|
|
317
|
+
# Resolve string qpu_system names and validate that one is present.
|
|
318
|
+
self.config = self._resolve_and_validate_qpu_system(config)
|
|
319
|
+
|
|
320
|
+
super().__init__(shots=self.config.shots)
|
|
321
|
+
|
|
322
|
+
@property
|
|
323
|
+
def supports_expval(self) -> bool:
|
|
324
|
+
"""
|
|
325
|
+
Whether the backend supports expectation value measurements.
|
|
115
326
|
"""
|
|
116
|
-
|
|
327
|
+
return self.config.qpu_system.supports_expval and not self.config.force_sampling
|
|
117
328
|
|
|
118
|
-
|
|
119
|
-
|
|
329
|
+
@property
|
|
330
|
+
def is_async(self) -> bool:
|
|
120
331
|
"""
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
raise
|
|
332
|
+
Whether the backend executes circuits asynchronously.
|
|
333
|
+
"""
|
|
334
|
+
return True
|
|
335
|
+
|
|
336
|
+
def _resolve_and_validate_qpu_system(self, config: JobConfig) -> JobConfig:
|
|
337
|
+
"""Ensures the config has a valid QPUSystem object, resolving from string if needed."""
|
|
338
|
+
if config.qpu_system is None:
|
|
339
|
+
raise ValueError(
|
|
340
|
+
"JobConfig must have a qpu_system. It cannot be None. "
|
|
341
|
+
"Please provide a QPUSystem object or a valid system name string."
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
if isinstance(config.qpu_system, str):
|
|
345
|
+
resolved_qpu = get_qpu_system(config.qpu_system)
|
|
346
|
+
return replace(config, qpu_system=resolved_qpu)
|
|
347
|
+
|
|
348
|
+
return config
|
|
129
349
|
|
|
130
350
|
def _make_request(self, method: str, endpoint: str, **kwargs) -> requests.Response:
|
|
131
|
-
"""
|
|
351
|
+
"""
|
|
352
|
+
Make an authenticated HTTP request to the Qoro API.
|
|
353
|
+
|
|
354
|
+
This internal method centralizes all API communication, handling authentication
|
|
355
|
+
headers and error responses consistently.
|
|
356
|
+
|
|
357
|
+
Args:
|
|
358
|
+
method (str): HTTP method to use (e.g., 'get', 'post', 'delete').
|
|
359
|
+
endpoint (str): API endpoint path (without base URL).
|
|
360
|
+
**kwargs: Additional arguments to pass to requests.request(), such as
|
|
361
|
+
'json', 'timeout', 'params', etc.
|
|
362
|
+
|
|
363
|
+
Returns:
|
|
364
|
+
requests.Response: The HTTP response object from the API.
|
|
365
|
+
|
|
366
|
+
Raises:
|
|
367
|
+
requests.exceptions.HTTPError: If the response status code is 400 or above.
|
|
368
|
+
"""
|
|
132
369
|
url = f"{API_URL}/{endpoint}"
|
|
133
370
|
|
|
134
371
|
headers = {"Authorization": self.auth_token}
|
|
@@ -142,16 +379,26 @@ class QoroService(CircuitRunner):
|
|
|
142
379
|
|
|
143
380
|
response = session.request(method, url, headers=headers, **kwargs)
|
|
144
381
|
|
|
145
|
-
#
|
|
382
|
+
# Raise with comprehensive error details if request failed
|
|
146
383
|
if response.status_code >= 400:
|
|
147
|
-
|
|
148
|
-
f"API Error: {response.status_code} {response.reason} for URL {response.url}"
|
|
149
|
-
)
|
|
384
|
+
_raise_with_details(response)
|
|
150
385
|
|
|
151
386
|
return response
|
|
152
387
|
|
|
153
388
|
def test_connection(self):
|
|
154
|
-
"""
|
|
389
|
+
"""
|
|
390
|
+
Test the connection to the Qoro API.
|
|
391
|
+
|
|
392
|
+
Sends a simple GET request to verify that the API is reachable and
|
|
393
|
+
the authentication token is valid.
|
|
394
|
+
|
|
395
|
+
Returns:
|
|
396
|
+
requests.Response: The response from the API ping endpoint.
|
|
397
|
+
|
|
398
|
+
Raises:
|
|
399
|
+
requests.exceptions.HTTPError: If the connection fails or authentication
|
|
400
|
+
is invalid.
|
|
401
|
+
"""
|
|
155
402
|
return self._make_request("get", "", timeout=10)
|
|
156
403
|
|
|
157
404
|
def fetch_qpu_systems(self) -> list[QPUSystem]:
|
|
@@ -162,7 +409,9 @@ class QoroService(CircuitRunner):
|
|
|
162
409
|
List of QPUSystem objects.
|
|
163
410
|
"""
|
|
164
411
|
response = self._make_request("get", "qpusystem/", timeout=10)
|
|
165
|
-
|
|
412
|
+
systems = parse_qpu_systems(response.json())
|
|
413
|
+
update_qpu_systems_cache(systems)
|
|
414
|
+
return systems
|
|
166
415
|
|
|
167
416
|
@staticmethod
|
|
168
417
|
def _compress_data(value) -> bytes:
|
|
@@ -174,7 +423,7 @@ class QoroService(CircuitRunner):
|
|
|
174
423
|
consistent overhead calculation.
|
|
175
424
|
Assumes that BASE64 encoding produces ASCI characters, which are 1 byte each.
|
|
176
425
|
"""
|
|
177
|
-
max_payload_bytes =
|
|
426
|
+
max_payload_bytes = _MAX_PAYLOAD_SIZE_MB * 1024 * 1024
|
|
178
427
|
circuit_chunks = []
|
|
179
428
|
current_chunk = {}
|
|
180
429
|
|
|
@@ -210,159 +459,203 @@ class QoroService(CircuitRunner):
|
|
|
210
459
|
def submit_circuits(
|
|
211
460
|
self,
|
|
212
461
|
circuits: dict[str, str],
|
|
213
|
-
|
|
214
|
-
job_type: JobType =
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
):
|
|
462
|
+
ham_ops: str | None = None,
|
|
463
|
+
job_type: JobType | None = None,
|
|
464
|
+
override_config: JobConfig | None = None,
|
|
465
|
+
) -> str:
|
|
218
466
|
"""
|
|
219
467
|
Submit quantum circuits to the Qoro API for execution.
|
|
220
468
|
|
|
469
|
+
This method first initializes a job and then sends the circuits in
|
|
470
|
+
one or more chunks, associating them all with a single job ID.
|
|
471
|
+
|
|
221
472
|
Args:
|
|
222
473
|
circuits (dict[str, str]):
|
|
223
474
|
Dictionary mapping unique circuit IDs to QASM circuit strings.
|
|
224
|
-
|
|
225
|
-
|
|
475
|
+
ham_ops (str | None, optional):
|
|
476
|
+
String representing the Hamiltonian operators to measure, semicolon-separated.
|
|
477
|
+
Each term is a combination of Pauli operators, e.g. "XYZ;XXZ;ZIZ".
|
|
478
|
+
If None, no Hamiltonian operators will be measured.
|
|
226
479
|
job_type (JobType, optional):
|
|
227
|
-
Type of job to execute (e.g., SIMULATE, EXECUTE,
|
|
228
|
-
|
|
229
|
-
|
|
480
|
+
Type of job to execute (e.g., SIMULATE, EXECUTE, EXPECTATION, CIRCUIT_CUT).
|
|
481
|
+
If not provided, the job type will be determined from the service configuration.
|
|
482
|
+
override_config (JobConfig | None, optional):
|
|
483
|
+
Configuration object to override the service's default settings.
|
|
484
|
+
If not provided, default values are used.
|
|
230
485
|
|
|
231
486
|
Raises:
|
|
232
|
-
ValueError: If more than one circuit is submitted for a CIRCUIT_CUT job
|
|
487
|
+
ValueError: If more than one circuit is submitted for a CIRCUIT_CUT job,
|
|
488
|
+
or if any circuit is not valid QASM.
|
|
489
|
+
requests.exceptions.HTTPError: If any API request fails.
|
|
233
490
|
|
|
234
491
|
Returns:
|
|
235
|
-
str
|
|
236
|
-
The job ID(s) of the created job(s). Returns a single job ID if only one job is created,
|
|
237
|
-
otherwise returns a list of job IDs if the circuits are split into multiple jobs due to payload size.
|
|
492
|
+
str: The job ID for the created job.
|
|
238
493
|
"""
|
|
494
|
+
# Create final job configuration by layering configurations:
|
|
495
|
+
# service defaults -> user overrides
|
|
496
|
+
if override_config:
|
|
497
|
+
config = self.config.override(override_config)
|
|
498
|
+
job_config = self._resolve_and_validate_qpu_system(config)
|
|
499
|
+
else:
|
|
500
|
+
job_config = self.config
|
|
501
|
+
|
|
502
|
+
# Handle Hamiltonian operators: validate compatibility and auto-infer job type
|
|
503
|
+
if ham_ops is not None:
|
|
504
|
+
# Validate that if job_type is explicitly set, it must be EXPECTATION
|
|
505
|
+
if job_type is not None and job_type != JobType.EXPECTATION:
|
|
506
|
+
raise ValueError(
|
|
507
|
+
"Hamiltonian operators are only supported for EXPECTATION job type."
|
|
508
|
+
)
|
|
509
|
+
# Auto-infer job type if not explicitly set
|
|
510
|
+
if job_type is None:
|
|
511
|
+
job_type = JobType.EXPECTATION
|
|
512
|
+
|
|
513
|
+
# Validate observables format
|
|
514
|
+
|
|
515
|
+
terms = ham_ops.split(";")
|
|
516
|
+
if len(terms) == 0:
|
|
517
|
+
raise ValueError(
|
|
518
|
+
"Hamiltonian operators must be non-empty semicolon-separated strings."
|
|
519
|
+
)
|
|
520
|
+
ham_ops_length = len(terms[0])
|
|
521
|
+
if not all(len(term) == ham_ops_length for term in terms):
|
|
522
|
+
raise ValueError("All Hamiltonian operators must have the same length.")
|
|
523
|
+
# Validate that each term only contains I, X, Y, Z
|
|
524
|
+
valid_paulis = {"I", "X", "Y", "Z"}
|
|
525
|
+
if not all(all(c in valid_paulis for c in term) for term in terms):
|
|
526
|
+
raise ValueError(
|
|
527
|
+
"Hamiltonian operators must contain only I, X, Y, Z characters."
|
|
528
|
+
)
|
|
239
529
|
|
|
530
|
+
if job_type is None:
|
|
531
|
+
job_type = JobType.SIMULATE
|
|
532
|
+
|
|
533
|
+
# Validate circuits
|
|
240
534
|
if job_type == JobType.CIRCUIT_CUT and len(circuits) > 1:
|
|
241
535
|
raise ValueError("Only one circuit allowed for circuit-cutting jobs.")
|
|
242
536
|
|
|
243
537
|
for key, circuit in circuits.items():
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
circuit_chunks = self._split_circuits(circuits)
|
|
538
|
+
result = is_valid_qasm(circuit)
|
|
539
|
+
if isinstance(result, str):
|
|
540
|
+
raise ValueError(f"Circuit '{key}' is not a valid QASM: {result}")
|
|
248
541
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
"tag": tag,
|
|
542
|
+
# Initialize the job without circuits to get a job_id
|
|
543
|
+
init_payload = {
|
|
544
|
+
"tag": job_config.tag,
|
|
252
545
|
"job_type": job_type.value,
|
|
253
|
-
"qpu_system_name":
|
|
254
|
-
|
|
255
|
-
override_circuit_packing
|
|
256
|
-
if override_circuit_packing is not None
|
|
257
|
-
else self.use_circuit_packing
|
|
546
|
+
"qpu_system_name": (
|
|
547
|
+
job_config.qpu_system.name if job_config.qpu_system else None
|
|
258
548
|
),
|
|
549
|
+
"use_packing": job_config.use_circuit_packing or False,
|
|
259
550
|
}
|
|
260
551
|
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
552
|
+
init_response = self._make_request(
|
|
553
|
+
"post", "job/init/", json=init_payload, timeout=100
|
|
554
|
+
)
|
|
555
|
+
if init_response.status_code not in [HTTPStatus.OK, HTTPStatus.CREATED]:
|
|
556
|
+
_raise_with_details(init_response)
|
|
557
|
+
job_id = init_response.json()["job_id"]
|
|
264
558
|
|
|
265
|
-
|
|
559
|
+
# Split circuits and add them to the created job
|
|
560
|
+
circuit_chunks = self._split_circuits(circuits)
|
|
561
|
+
num_chunks = len(circuit_chunks)
|
|
562
|
+
|
|
563
|
+
for i, chunk in enumerate(circuit_chunks):
|
|
564
|
+
is_last_chunk = i == num_chunks - 1
|
|
565
|
+
add_circuits_payload = {
|
|
566
|
+
"circuits": chunk,
|
|
567
|
+
"mode": "append",
|
|
568
|
+
"finalized": "true" if is_last_chunk else "false",
|
|
569
|
+
}
|
|
570
|
+
|
|
571
|
+
# Include shots/ham_ops in add_circuits payload
|
|
572
|
+
if ham_ops is not None:
|
|
573
|
+
add_circuits_payload["observables"] = ham_ops
|
|
574
|
+
else:
|
|
575
|
+
add_circuits_payload["shots"] = job_config.shots
|
|
576
|
+
|
|
577
|
+
add_circuits_response = self._make_request(
|
|
266
578
|
"post",
|
|
267
|
-
"job/",
|
|
268
|
-
json=
|
|
579
|
+
f"job/{job_id}/add_circuits/",
|
|
580
|
+
json=add_circuits_payload,
|
|
269
581
|
timeout=100,
|
|
270
582
|
)
|
|
583
|
+
if add_circuits_response.status_code != HTTPStatus.OK:
|
|
584
|
+
_raise_with_details(add_circuits_response)
|
|
271
585
|
|
|
272
|
-
|
|
273
|
-
job_ids.append(response.json()["job_id"])
|
|
274
|
-
else:
|
|
275
|
-
_raise_with_details(response)
|
|
276
|
-
|
|
277
|
-
return job_ids if len(job_ids) > 1 else job_ids[0]
|
|
586
|
+
return job_id
|
|
278
587
|
|
|
279
|
-
def delete_job(self,
|
|
588
|
+
def delete_job(self, job_id: str) -> requests.Response:
|
|
280
589
|
"""
|
|
281
590
|
Delete a job from the Qoro Database.
|
|
282
591
|
|
|
283
592
|
Args:
|
|
284
|
-
job_id: The ID of the
|
|
593
|
+
job_id: The ID of the job to be deleted.
|
|
285
594
|
Returns:
|
|
286
|
-
|
|
595
|
+
requests.Response: The response from the API.
|
|
287
596
|
"""
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
"delete",
|
|
294
|
-
f"job/{job_id}",
|
|
295
|
-
timeout=50,
|
|
296
|
-
)
|
|
297
|
-
for job_id in job_ids
|
|
298
|
-
]
|
|
299
|
-
|
|
300
|
-
return responses if len(responses) > 1 else responses[0]
|
|
597
|
+
return self._make_request(
|
|
598
|
+
"delete",
|
|
599
|
+
f"job/{job_id}",
|
|
600
|
+
timeout=50,
|
|
601
|
+
)
|
|
301
602
|
|
|
302
|
-
def get_job_results(self,
|
|
603
|
+
def get_job_results(self, job_id: str) -> list[dict]:
|
|
303
604
|
"""
|
|
304
605
|
Get the results of a job from the Qoro Database.
|
|
305
606
|
|
|
306
607
|
Args:
|
|
307
|
-
job_id: The ID of the job to get results from
|
|
608
|
+
job_id: The ID of the job to get results from.
|
|
308
609
|
Returns:
|
|
309
|
-
|
|
610
|
+
list[dict]: The results of the job, with histograms decoded.
|
|
310
611
|
"""
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
responses = [
|
|
315
|
-
self._make_request(
|
|
612
|
+
try:
|
|
613
|
+
response = self._make_request(
|
|
316
614
|
"get",
|
|
317
|
-
f"job/{job_id}/
|
|
615
|
+
f"job/{job_id}/resultsV2/?limit=100&offset=0",
|
|
318
616
|
timeout=100,
|
|
319
617
|
)
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
raise requests.exceptions.HTTPError(
|
|
336
|
-
f"{response.status_code}: {response.reason}"
|
|
337
|
-
)
|
|
618
|
+
except requests.exceptions.HTTPError as e:
|
|
619
|
+
# Provide a more specific error message for 400 Bad Request
|
|
620
|
+
if e.response.status_code == HTTPStatus.BAD_REQUEST:
|
|
621
|
+
raise requests.exceptions.HTTPError(
|
|
622
|
+
"400 Bad Request: Job results not available, likely job is still running"
|
|
623
|
+
) from e
|
|
624
|
+
# Re-raise any other HTTP error
|
|
625
|
+
raise e
|
|
626
|
+
|
|
627
|
+
# If the request was successful, process the data
|
|
628
|
+
data = response.json()
|
|
629
|
+
|
|
630
|
+
for result in data["results"]:
|
|
631
|
+
result["results"] = _decode_qh1_b64(result["results"])
|
|
632
|
+
return data["results"]
|
|
338
633
|
|
|
339
634
|
def poll_job_status(
|
|
340
635
|
self,
|
|
341
|
-
|
|
636
|
+
job_id: str,
|
|
342
637
|
loop_until_complete: bool = False,
|
|
343
|
-
on_complete: Callable | None = None,
|
|
638
|
+
on_complete: Callable[[requests.Response], None] | None = None,
|
|
344
639
|
verbose: bool = True,
|
|
345
640
|
poll_callback: Callable[[int, str], None] | None = None,
|
|
346
|
-
):
|
|
641
|
+
) -> str | JobStatus:
|
|
347
642
|
"""
|
|
348
|
-
Get the status of a job and optionally execute function
|
|
349
|
-
if the status is COMPLETE.
|
|
643
|
+
Get the status of a job and optionally execute a function on completion.
|
|
350
644
|
|
|
351
645
|
Args:
|
|
352
|
-
|
|
353
|
-
loop_until_complete (bool):
|
|
354
|
-
on_complete (optional): A function to
|
|
355
|
-
|
|
356
|
-
verbose (optional):
|
|
357
|
-
poll_callback (optional): A function for updating progress bars
|
|
358
|
-
|
|
646
|
+
job_id: The ID of the job to check.
|
|
647
|
+
loop_until_complete (bool): If True, polls until the job is complete or failed.
|
|
648
|
+
on_complete (Callable, optional): A function to call with the final response
|
|
649
|
+
object when the job finishes.
|
|
650
|
+
verbose (bool, optional): If True, prints polling status to the logger.
|
|
651
|
+
poll_callback (Callable, optional): A function for updating progress bars.
|
|
652
|
+
Takes `(retry_count, status)`.
|
|
653
|
+
|
|
359
654
|
Returns:
|
|
360
|
-
|
|
655
|
+
str | JobStatus: The current job status as a string if not looping,
|
|
656
|
+
or a JobStatus enum member (COMPLETED or FAILED) if looping.
|
|
361
657
|
"""
|
|
362
|
-
|
|
363
|
-
job_ids = [job_ids]
|
|
364
|
-
|
|
365
|
-
# Decide once at the start
|
|
658
|
+
# Decide once at the start which update function to use
|
|
366
659
|
if poll_callback:
|
|
367
660
|
update_fn = poll_callback
|
|
368
661
|
elif verbose:
|
|
@@ -370,55 +663,31 @@ class QoroService(CircuitRunner):
|
|
|
370
663
|
RESET = "\033[0m"
|
|
371
664
|
|
|
372
665
|
update_fn = lambda retry_count, status: logger.info(
|
|
373
|
-
rf"Job {CYAN}{
|
|
666
|
+
rf"Job {CYAN}{job_id.split('-')[0]}{RESET} is {status}. Polling attempt {retry_count} / {self.max_retries}\r",
|
|
374
667
|
extra={"append": True},
|
|
375
668
|
)
|
|
376
669
|
else:
|
|
377
670
|
update_fn = lambda _, __: None
|
|
378
671
|
|
|
379
672
|
if not loop_until_complete:
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
f"job/{job_id}/status/",
|
|
384
|
-
timeout=200,
|
|
385
|
-
).json()["status"]
|
|
386
|
-
for job_id in job_ids
|
|
387
|
-
]
|
|
388
|
-
return statuses if len(statuses) > 1 else statuses[0]
|
|
389
|
-
|
|
390
|
-
pending_job_ids = set(job_ids)
|
|
391
|
-
responses = []
|
|
673
|
+
response = self._make_request("get", f"job/{job_id}/status/", timeout=200)
|
|
674
|
+
return response.json()["status"]
|
|
675
|
+
|
|
392
676
|
for retry_count in range(1, self.max_retries + 1):
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
break
|
|
396
|
-
|
|
397
|
-
for job_id in list(pending_job_ids):
|
|
398
|
-
response = self._make_request(
|
|
399
|
-
"get",
|
|
400
|
-
f"job/{job_id}/status/",
|
|
401
|
-
timeout=200,
|
|
402
|
-
)
|
|
677
|
+
response = self._make_request("get", f"job/{job_id}/status/", timeout=200)
|
|
678
|
+
status = response.json()["status"]
|
|
403
679
|
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
pending_job_ids.remove(job_id)
|
|
409
|
-
responses.append(response)
|
|
680
|
+
if status == JobStatus.COMPLETED.value:
|
|
681
|
+
if on_complete:
|
|
682
|
+
on_complete(response)
|
|
683
|
+
return JobStatus.COMPLETED
|
|
410
684
|
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
685
|
+
if status == JobStatus.FAILED.value:
|
|
686
|
+
if on_complete:
|
|
687
|
+
on_complete(response)
|
|
688
|
+
return JobStatus.FAILED
|
|
414
689
|
|
|
690
|
+
update_fn(retry_count, status)
|
|
415
691
|
time.sleep(self.polling_interval)
|
|
416
692
|
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
if not pending_job_ids:
|
|
420
|
-
if on_complete:
|
|
421
|
-
on_complete(responses)
|
|
422
|
-
return JobStatus.COMPLETED
|
|
423
|
-
else:
|
|
424
|
-
raise MaxRetriesReachedError(retry_count)
|
|
693
|
+
raise MaxRetriesReachedError(job_id, self.max_retries)
|