qoro-divi 0.3.3__py3-none-any.whl → 0.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of qoro-divi might be problematic. Click here for more details.
- divi/__init__.py +1 -2
- divi/backends/__init__.py +7 -0
- divi/backends/_circuit_runner.py +46 -0
- divi/{parallel_simulator.py → backends/_parallel_simulator.py} +136 -53
- divi/backends/_qoro_service.py +531 -0
- divi/circuits/__init__.py +5 -0
- divi/circuits/_core.py +226 -0
- divi/{qasm.py → circuits/qasm.py} +21 -2
- divi/{exp → extern}/cirq/_validator.py +9 -7
- divi/qprog/__init__.py +18 -5
- divi/qprog/algorithms/__init__.py +14 -0
- divi/qprog/algorithms/_ansatze.py +311 -0
- divi/qprog/{_qaoa.py → algorithms/_qaoa.py} +69 -41
- divi/qprog/{_vqe.py → algorithms/_vqe.py} +79 -135
- divi/qprog/batch.py +239 -55
- divi/qprog/exceptions.py +9 -0
- divi/qprog/optimizers.py +219 -18
- divi/qprog/quantum_program.py +389 -57
- divi/qprog/workflows/__init__.py +10 -0
- divi/qprog/{_graph_partitioning.py → workflows/_graph_partitioning.py} +3 -34
- divi/qprog/{_qubo_partitioning.py → workflows/_qubo_partitioning.py} +42 -25
- divi/qprog/{_vqe_sweep.py → workflows/_vqe_sweep.py} +59 -26
- divi/reporting/__init__.py +7 -0
- divi/reporting/_pbar.py +112 -0
- divi/{qlogger.py → reporting/_qlogger.py} +37 -2
- divi/{reporter.py → reporting/_reporter.py} +8 -14
- divi/utils.py +49 -10
- {qoro_divi-0.3.3.dist-info → qoro_divi-0.3.5.dist-info}/METADATA +2 -1
- qoro_divi-0.3.5.dist-info/RECORD +69 -0
- divi/_pbar.py +0 -70
- divi/circuits.py +0 -139
- divi/interfaces.py +0 -25
- divi/qoro_service.py +0 -425
- qoro_divi-0.3.3.dist-info/RECORD +0 -62
- /divi/{qpu_system.py → backends/_qpu_system.py} +0 -0
- /divi/{qem.py → circuits/qem.py} +0 -0
- /divi/{exp → extern}/cirq/__init__.py +0 -0
- /divi/{exp → extern}/cirq/_lexer.py +0 -0
- /divi/{exp → extern}/cirq/_parser.py +0 -0
- /divi/{exp → extern}/cirq/_qasm_export.py +0 -0
- /divi/{exp → extern}/cirq/_qasm_import.py +0 -0
- /divi/{exp → extern}/cirq/exception.py +0 -0
- /divi/{exp → extern}/scipy/_cobyla.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/LICENCE.txt +0 -0
- /divi/{exp → extern}/scipy/pyprima/__init__.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/cobyla/__init__.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/cobyla/cobyla.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/cobyla/cobylb.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/cobyla/geometry.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/cobyla/initialize.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/cobyla/trustregion.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/cobyla/update.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/__init__.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/_bounds.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/_linear_constraints.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/_nonlinear_constraints.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/_project.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/checkbreak.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/consts.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/evaluate.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/history.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/infos.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/linalg.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/message.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/powalg.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/preproc.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/present.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/ratio.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/redrho.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/selectx.py +0 -0
- {qoro_divi-0.3.3.dist-info → qoro_divi-0.3.5.dist-info}/LICENSE +0 -0
- {qoro_divi-0.3.3.dist-info → qoro_divi-0.3.5.dist-info}/LICENSES/.license-header +0 -0
- {qoro_divi-0.3.3.dist-info → qoro_divi-0.3.5.dist-info}/LICENSES/Apache-2.0.txt +0 -0
- {qoro_divi-0.3.3.dist-info → qoro_divi-0.3.5.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,531 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: 2025 Qoro Quantum Ltd <divi@qoroquantum.de>
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import base64
|
|
6
|
+
import gzip
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import time
|
|
10
|
+
from collections.abc import Callable
|
|
11
|
+
from enum import Enum
|
|
12
|
+
from http import HTTPStatus
|
|
13
|
+
|
|
14
|
+
import requests
|
|
15
|
+
from dotenv import dotenv_values
|
|
16
|
+
from requests.adapters import HTTPAdapter, Retry
|
|
17
|
+
|
|
18
|
+
from divi.backends import CircuitRunner
|
|
19
|
+
from divi.backends._qpu_system import QPU, QPUSystem
|
|
20
|
+
from divi.extern.cirq import is_valid_qasm
|
|
21
|
+
|
|
22
|
+
API_URL = "https://app.qoroquantum.net/api"
|
|
23
|
+
_MAX_PAYLOAD_SIZE_MB = 0.95
|
|
24
|
+
|
|
25
|
+
session = requests.Session()
|
|
26
|
+
retry_configuration = Retry(
|
|
27
|
+
total=5,
|
|
28
|
+
backoff_factor=0.1,
|
|
29
|
+
status_forcelist=[502],
|
|
30
|
+
allowed_methods=["GET", "POST", "DELETE"],
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
session.mount("http://", HTTPAdapter(max_retries=retry_configuration))
|
|
34
|
+
session.mount("https://", HTTPAdapter(max_retries=retry_configuration))
|
|
35
|
+
|
|
36
|
+
logger = logging.getLogger(__name__)
|
|
37
|
+
|
|
38
|
+
_DEFAULT_QPU_SYSTEM = "qoro_maestro"
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _decode_qh1_b64(encoded: dict) -> dict[str, int]:
|
|
42
|
+
"""
|
|
43
|
+
Decode a {'encoding':'qh1','n_bits':N,'payload':base64} histogram
|
|
44
|
+
into a dict with bitstring keys -> int counts.
|
|
45
|
+
|
|
46
|
+
Returns {} if payload is empty.
|
|
47
|
+
"""
|
|
48
|
+
if not encoded or not encoded.get("payload"):
|
|
49
|
+
return {}
|
|
50
|
+
|
|
51
|
+
if encoded.get("encoding") != "qh1":
|
|
52
|
+
raise ValueError(f"Unsupported encoding: {encoded.get('encoding')}")
|
|
53
|
+
|
|
54
|
+
blob = base64.b64decode(encoded["payload"])
|
|
55
|
+
hist_int = _decompress_histogram(blob)
|
|
56
|
+
return {str(k): v for k, v in hist_int.items()}
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def _uleb128_decode(data: bytes, pos: int = 0) -> tuple[int, int]:
|
|
60
|
+
x = 0
|
|
61
|
+
shift = 0
|
|
62
|
+
while True:
|
|
63
|
+
if pos >= len(data):
|
|
64
|
+
raise ValueError("truncated varint")
|
|
65
|
+
b = data[pos]
|
|
66
|
+
pos += 1
|
|
67
|
+
x |= (b & 0x7F) << shift
|
|
68
|
+
if (b & 0x80) == 0:
|
|
69
|
+
break
|
|
70
|
+
shift += 7
|
|
71
|
+
return x, pos
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _int_to_bitstr(x: int, n_bits: int) -> str:
|
|
75
|
+
return format(x, f"0{n_bits}b")
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _rle_bool_decode(data: bytes, pos=0) -> tuple[list[bool], int]:
|
|
79
|
+
num_runs, pos = _uleb128_decode(data, pos)
|
|
80
|
+
if num_runs == 0:
|
|
81
|
+
return [], pos
|
|
82
|
+
first_val = data[pos] != 0
|
|
83
|
+
pos += 1
|
|
84
|
+
total, val = [], first_val
|
|
85
|
+
for _ in range(num_runs):
|
|
86
|
+
ln, pos = _uleb128_decode(data, pos)
|
|
87
|
+
total.extend([val] * ln)
|
|
88
|
+
val = not val
|
|
89
|
+
return total, pos
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _decompress_histogram(buf: bytes) -> dict[str, int]:
|
|
93
|
+
if not buf:
|
|
94
|
+
return {}
|
|
95
|
+
pos = 0
|
|
96
|
+
if buf[pos : pos + 3] != b"QH1":
|
|
97
|
+
raise ValueError("bad magic")
|
|
98
|
+
pos += 3
|
|
99
|
+
n_bits = buf[pos]
|
|
100
|
+
pos += 1
|
|
101
|
+
unique, pos = _uleb128_decode(buf, pos)
|
|
102
|
+
total_shots, pos = _uleb128_decode(buf, pos)
|
|
103
|
+
|
|
104
|
+
num_gaps, pos = _uleb128_decode(buf, pos)
|
|
105
|
+
gaps = []
|
|
106
|
+
for _ in range(num_gaps):
|
|
107
|
+
g, pos = _uleb128_decode(buf, pos)
|
|
108
|
+
gaps.append(g)
|
|
109
|
+
|
|
110
|
+
idxs, acc = [], 0
|
|
111
|
+
for i, g in enumerate(gaps):
|
|
112
|
+
acc = g if i == 0 else acc + g
|
|
113
|
+
idxs.append(acc)
|
|
114
|
+
|
|
115
|
+
rb_len, pos = _uleb128_decode(buf, pos)
|
|
116
|
+
is_one, _ = _rle_bool_decode(buf[pos : pos + rb_len], 0)
|
|
117
|
+
pos += rb_len
|
|
118
|
+
|
|
119
|
+
extras_len, pos = _uleb128_decode(buf, pos)
|
|
120
|
+
extras = []
|
|
121
|
+
for _ in range(extras_len):
|
|
122
|
+
e, pos = _uleb128_decode(buf, pos)
|
|
123
|
+
extras.append(e)
|
|
124
|
+
|
|
125
|
+
counts, it = [], iter(extras)
|
|
126
|
+
for flag in is_one:
|
|
127
|
+
counts.append(1 if flag else next(it) + 2)
|
|
128
|
+
|
|
129
|
+
hist = {_int_to_bitstr(i, n_bits): c for i, c in zip(idxs, counts)}
|
|
130
|
+
|
|
131
|
+
# optional integrity check
|
|
132
|
+
if sum(counts) != total_shots:
|
|
133
|
+
raise ValueError("corrupt stream: shot sum mismatch")
|
|
134
|
+
if len(counts) != unique:
|
|
135
|
+
raise ValueError("corrupt stream: unique mismatch")
|
|
136
|
+
return hist
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def _raise_with_details(resp: requests.Response):
|
|
140
|
+
try:
|
|
141
|
+
data = resp.json()
|
|
142
|
+
body = json.dumps(data, ensure_ascii=False)
|
|
143
|
+
except ValueError:
|
|
144
|
+
body = resp.text
|
|
145
|
+
msg = f"{resp.status_code} {resp.reason}: {body}"
|
|
146
|
+
raise requests.HTTPError(msg)
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class JobStatus(Enum):
|
|
150
|
+
PENDING = "PENDING"
|
|
151
|
+
RUNNING = "RUNNING"
|
|
152
|
+
COMPLETED = "COMPLETED"
|
|
153
|
+
FAILED = "FAILED"
|
|
154
|
+
CANCELLED = "CANCELLED"
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
class JobType(Enum):
|
|
158
|
+
EXECUTE = "EXECUTE"
|
|
159
|
+
SIMULATE = "SIMULATE"
|
|
160
|
+
ESTIMATE = "ESTIMATE"
|
|
161
|
+
CIRCUIT_CUT = "CIRCUIT_CUT"
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
class MaxRetriesReachedError(Exception):
|
|
165
|
+
"""Exception raised when the maximum number of retries is reached."""
|
|
166
|
+
|
|
167
|
+
def __init__(self, retries):
|
|
168
|
+
self.retries = retries
|
|
169
|
+
self.message = f"Maximum retries reached: {retries} retries attempted"
|
|
170
|
+
super().__init__(self.message)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def _parse_qpu_systems(json_data: list) -> list[QPUSystem]:
|
|
174
|
+
return [
|
|
175
|
+
QPUSystem(
|
|
176
|
+
name=system_data["name"],
|
|
177
|
+
qpus=[QPU(**qpu) for qpu in system_data.get("qpus", [])],
|
|
178
|
+
access_level=system_data["access_level"],
|
|
179
|
+
)
|
|
180
|
+
for system_data in json_data
|
|
181
|
+
]
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
class QoroService(CircuitRunner):
|
|
185
|
+
|
|
186
|
+
def __init__(
|
|
187
|
+
self,
|
|
188
|
+
auth_token: str | None = None,
|
|
189
|
+
polling_interval: float = 3.0,
|
|
190
|
+
max_retries: int = 5000,
|
|
191
|
+
shots: int = 1000,
|
|
192
|
+
qpu_system_name: str | QPUSystem | None = None,
|
|
193
|
+
use_circuit_packing: bool = False,
|
|
194
|
+
):
|
|
195
|
+
super().__init__(shots=shots)
|
|
196
|
+
|
|
197
|
+
if auth_token is None:
|
|
198
|
+
try:
|
|
199
|
+
auth_token = dotenv_values()["QORO_API_KEY"]
|
|
200
|
+
except KeyError:
|
|
201
|
+
raise ValueError("Qoro API key not provided nor found in a .env file.")
|
|
202
|
+
|
|
203
|
+
self.auth_token = "Bearer " + auth_token
|
|
204
|
+
self.polling_interval = polling_interval
|
|
205
|
+
self.max_retries = max_retries
|
|
206
|
+
if qpu_system_name is None:
|
|
207
|
+
qpu_system_name = _DEFAULT_QPU_SYSTEM
|
|
208
|
+
self._qpu_system_name = qpu_system_name
|
|
209
|
+
self.use_circuit_packing = use_circuit_packing
|
|
210
|
+
|
|
211
|
+
@property
|
|
212
|
+
def qpu_system_name(self) -> str | QPUSystem | None:
|
|
213
|
+
return self._qpu_system_name
|
|
214
|
+
|
|
215
|
+
@qpu_system_name.setter
|
|
216
|
+
def qpu_system_name(self, system_name: str | QPUSystem | None):
|
|
217
|
+
"""
|
|
218
|
+
Set the QPU system for the service.
|
|
219
|
+
|
|
220
|
+
Args:
|
|
221
|
+
system_name (str | QPUSystem): The QPU system to set or the name as a string.
|
|
222
|
+
"""
|
|
223
|
+
if isinstance(system_name, str):
|
|
224
|
+
self._qpu_system_name = system_name
|
|
225
|
+
elif isinstance(system_name, QPUSystem):
|
|
226
|
+
self._qpu_system_name = system_name.name
|
|
227
|
+
elif system_name is None:
|
|
228
|
+
self._qpu_system_name = None
|
|
229
|
+
|
|
230
|
+
raise TypeError("Expected a QPUSystem instance or str.")
|
|
231
|
+
|
|
232
|
+
def _make_request(self, method: str, endpoint: str, **kwargs) -> requests.Response:
|
|
233
|
+
"""
|
|
234
|
+
Make an authenticated HTTP request to the Qoro API.
|
|
235
|
+
|
|
236
|
+
This internal method centralizes all API communication, handling authentication
|
|
237
|
+
headers and error responses consistently.
|
|
238
|
+
|
|
239
|
+
Args:
|
|
240
|
+
method (str): HTTP method to use (e.g., 'get', 'post', 'delete').
|
|
241
|
+
endpoint (str): API endpoint path (without base URL).
|
|
242
|
+
**kwargs: Additional arguments to pass to requests.request(), such as
|
|
243
|
+
'json', 'timeout', 'params', etc.
|
|
244
|
+
|
|
245
|
+
Returns:
|
|
246
|
+
requests.Response: The HTTP response object from the API.
|
|
247
|
+
|
|
248
|
+
Raises:
|
|
249
|
+
requests.exceptions.HTTPError: If the response status code is 400 or above.
|
|
250
|
+
"""
|
|
251
|
+
url = f"{API_URL}/{endpoint}"
|
|
252
|
+
|
|
253
|
+
headers = {"Authorization": self.auth_token}
|
|
254
|
+
|
|
255
|
+
if method.upper() in ["POST", "PUT", "PATCH"]:
|
|
256
|
+
headers["Content-Type"] = "application/json"
|
|
257
|
+
|
|
258
|
+
# Allow overriding default headers
|
|
259
|
+
if "headers" in kwargs:
|
|
260
|
+
headers.update(kwargs.pop("headers"))
|
|
261
|
+
|
|
262
|
+
response = session.request(method, url, headers=headers, **kwargs)
|
|
263
|
+
|
|
264
|
+
# Generic error handling for non-OK statuses
|
|
265
|
+
if response.status_code >= 400:
|
|
266
|
+
raise requests.exceptions.HTTPError(
|
|
267
|
+
f"API Error: {response.status_code} {response.reason} for URL {response.url}"
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
return response
|
|
271
|
+
|
|
272
|
+
def test_connection(self):
|
|
273
|
+
"""
|
|
274
|
+
Test the connection to the Qoro API.
|
|
275
|
+
|
|
276
|
+
Sends a simple GET request to verify that the API is reachable and
|
|
277
|
+
the authentication token is valid.
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
requests.Response: The response from the API ping endpoint.
|
|
281
|
+
|
|
282
|
+
Raises:
|
|
283
|
+
requests.exceptions.HTTPError: If the connection fails or authentication
|
|
284
|
+
is invalid.
|
|
285
|
+
"""
|
|
286
|
+
return self._make_request("get", "", timeout=10)
|
|
287
|
+
|
|
288
|
+
def fetch_qpu_systems(self) -> list[QPUSystem]:
|
|
289
|
+
"""
|
|
290
|
+
Get the list of available QPU systems from the Qoro API.
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
List of QPUSystem objects.
|
|
294
|
+
"""
|
|
295
|
+
response = self._make_request("get", "qpusystem/", timeout=10)
|
|
296
|
+
return _parse_qpu_systems(response.json())
|
|
297
|
+
|
|
298
|
+
@staticmethod
|
|
299
|
+
def _compress_data(value) -> bytes:
|
|
300
|
+
return base64.b64encode(gzip.compress(value.encode("utf-8"))).decode("utf-8")
|
|
301
|
+
|
|
302
|
+
def _split_circuits(self, circuits: dict[str, str]) -> list[dict[str, str]]:
|
|
303
|
+
"""
|
|
304
|
+
Splits circuits into chunks by estimating payload size with a simplified,
|
|
305
|
+
consistent overhead calculation.
|
|
306
|
+
Assumes that BASE64 encoding produces ASCI characters, which are 1 byte each.
|
|
307
|
+
"""
|
|
308
|
+
max_payload_bytes = _MAX_PAYLOAD_SIZE_MB * 1024 * 1024
|
|
309
|
+
circuit_chunks = []
|
|
310
|
+
current_chunk = {}
|
|
311
|
+
|
|
312
|
+
# Start with size 2 for the opening and closing curly braces '{}'
|
|
313
|
+
current_chunk_size_bytes = 2
|
|
314
|
+
|
|
315
|
+
for key, value in circuits.items():
|
|
316
|
+
compressed_value = self._compress_data(value)
|
|
317
|
+
|
|
318
|
+
item_size_bytes = len(key) + len(compressed_value) + 6
|
|
319
|
+
|
|
320
|
+
# If adding this item would exceed the limit, finalize the current chunk.
|
|
321
|
+
# This check only runs if the chunk is not empty.
|
|
322
|
+
if current_chunk and (
|
|
323
|
+
current_chunk_size_bytes + item_size_bytes > max_payload_bytes
|
|
324
|
+
):
|
|
325
|
+
circuit_chunks.append(current_chunk)
|
|
326
|
+
|
|
327
|
+
# Start a new chunk
|
|
328
|
+
current_chunk = {}
|
|
329
|
+
current_chunk_size_bytes = 2
|
|
330
|
+
|
|
331
|
+
# Add the new item to the current chunk and update its size
|
|
332
|
+
current_chunk[key] = compressed_value
|
|
333
|
+
current_chunk_size_bytes += item_size_bytes
|
|
334
|
+
|
|
335
|
+
# Add the last remaining chunk if it's not empty
|
|
336
|
+
if current_chunk:
|
|
337
|
+
circuit_chunks.append(current_chunk)
|
|
338
|
+
|
|
339
|
+
return circuit_chunks
|
|
340
|
+
|
|
341
|
+
def submit_circuits(
|
|
342
|
+
self,
|
|
343
|
+
circuits: dict[str, str],
|
|
344
|
+
tag: str = "default",
|
|
345
|
+
job_type: JobType = JobType.SIMULATE,
|
|
346
|
+
qpu_system_name: str | None = None,
|
|
347
|
+
override_circuit_packing: bool | None = None,
|
|
348
|
+
) -> str:
|
|
349
|
+
"""
|
|
350
|
+
Submit quantum circuits to the Qoro API for execution.
|
|
351
|
+
|
|
352
|
+
This method first initializes a job and then sends the circuits in
|
|
353
|
+
one or more chunks, associating them all with a single job ID.
|
|
354
|
+
|
|
355
|
+
Args:
|
|
356
|
+
circuits (dict[str, str]):
|
|
357
|
+
Dictionary mapping unique circuit IDs to QASM circuit strings.
|
|
358
|
+
tag (str, optional):
|
|
359
|
+
Tag to associate with the job for identification. Defaults to "default".
|
|
360
|
+
job_type (JobType, optional):
|
|
361
|
+
Type of job to execute (e.g., SIMULATE, EXECUTE, ESTIMATE, CIRCUIT_CUT).
|
|
362
|
+
Defaults to JobType.SIMULATE.
|
|
363
|
+
qpu_system_name (str | None, optional):
|
|
364
|
+
The name of the QPU system to use. Overrides the service's default.
|
|
365
|
+
override_circuit_packing (bool | None, optional):
|
|
366
|
+
Whether to use circuit packing optimization. Overrides the service's default.
|
|
367
|
+
|
|
368
|
+
Raises:
|
|
369
|
+
ValueError: If more than one circuit is submitted for a CIRCUIT_CUT job,
|
|
370
|
+
or if any circuit is not valid QASM.
|
|
371
|
+
requests.exceptions.HTTPError: If any API request fails.
|
|
372
|
+
|
|
373
|
+
Returns:
|
|
374
|
+
str: The job ID for the created job.
|
|
375
|
+
"""
|
|
376
|
+
if job_type == JobType.CIRCUIT_CUT and len(circuits) > 1:
|
|
377
|
+
raise ValueError("Only one circuit allowed for circuit-cutting jobs.")
|
|
378
|
+
|
|
379
|
+
for key, circuit in circuits.items():
|
|
380
|
+
if not (err := is_valid_qasm(circuit)):
|
|
381
|
+
raise ValueError(f"Circuit '{key}' is not a valid QASM: {err}")
|
|
382
|
+
|
|
383
|
+
# 1. Initialize the job without circuits to get a job_id
|
|
384
|
+
init_payload = {
|
|
385
|
+
"shots": self.shots,
|
|
386
|
+
"tag": tag,
|
|
387
|
+
"job_type": job_type.value,
|
|
388
|
+
"qpu_system_name": qpu_system_name or self.qpu_system_name,
|
|
389
|
+
"use_packing": (
|
|
390
|
+
override_circuit_packing
|
|
391
|
+
if override_circuit_packing is not None
|
|
392
|
+
else self.use_circuit_packing
|
|
393
|
+
),
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
init_response = self._make_request(
|
|
397
|
+
"post", "job/init/", json=init_payload, timeout=100
|
|
398
|
+
)
|
|
399
|
+
if init_response.status_code not in [HTTPStatus.OK, HTTPStatus.CREATED]:
|
|
400
|
+
_raise_with_details(init_response)
|
|
401
|
+
job_id = init_response.json()["job_id"]
|
|
402
|
+
|
|
403
|
+
# 2. Split circuits and add them to the created job
|
|
404
|
+
circuit_chunks = self._split_circuits(circuits)
|
|
405
|
+
num_chunks = len(circuit_chunks)
|
|
406
|
+
|
|
407
|
+
for i, chunk in enumerate(circuit_chunks):
|
|
408
|
+
is_last_chunk = i == num_chunks - 1
|
|
409
|
+
add_circuits_payload = {
|
|
410
|
+
"circuits": chunk,
|
|
411
|
+
"shots": self.shots,
|
|
412
|
+
"mode": "append",
|
|
413
|
+
"finalized": "true" if is_last_chunk else "false",
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
add_circuits_response = self._make_request(
|
|
417
|
+
"post",
|
|
418
|
+
f"job/{job_id}/add_circuits/",
|
|
419
|
+
json=add_circuits_payload,
|
|
420
|
+
timeout=100,
|
|
421
|
+
)
|
|
422
|
+
if add_circuits_response.status_code != HTTPStatus.OK:
|
|
423
|
+
_raise_with_details(add_circuits_response)
|
|
424
|
+
|
|
425
|
+
return job_id
|
|
426
|
+
|
|
427
|
+
def delete_job(self, job_id: str) -> requests.Response:
|
|
428
|
+
"""
|
|
429
|
+
Delete a job from the Qoro Database.
|
|
430
|
+
|
|
431
|
+
Args:
|
|
432
|
+
job_id: The ID of the job to be deleted.
|
|
433
|
+
Returns:
|
|
434
|
+
requests.Response: The response from the API.
|
|
435
|
+
"""
|
|
436
|
+
return self._make_request(
|
|
437
|
+
"delete",
|
|
438
|
+
f"job/{job_id}",
|
|
439
|
+
timeout=50,
|
|
440
|
+
)
|
|
441
|
+
|
|
442
|
+
def get_job_results(self, job_id: str) -> list[dict]:
|
|
443
|
+
"""
|
|
444
|
+
Get the results of a job from the Qoro Database.
|
|
445
|
+
|
|
446
|
+
Args:
|
|
447
|
+
job_id: The ID of the job to get results from.
|
|
448
|
+
Returns:
|
|
449
|
+
list[dict]: The results of the job, with histograms decoded.
|
|
450
|
+
"""
|
|
451
|
+
try:
|
|
452
|
+
response = self._make_request(
|
|
453
|
+
"get",
|
|
454
|
+
f"job/{job_id}/resultsV2/?limit=100&offset=0",
|
|
455
|
+
timeout=100,
|
|
456
|
+
)
|
|
457
|
+
except requests.exceptions.HTTPError as e:
|
|
458
|
+
# Provide a more specific error message for 400 Bad Request
|
|
459
|
+
if e.response.status_code == HTTPStatus.BAD_REQUEST:
|
|
460
|
+
raise requests.exceptions.HTTPError(
|
|
461
|
+
"400 Bad Request: Job results not available, likely job is still running"
|
|
462
|
+
) from e
|
|
463
|
+
# Re-raise any other HTTP error
|
|
464
|
+
raise e
|
|
465
|
+
|
|
466
|
+
# If the request was successful, process the data
|
|
467
|
+
data = response.json()
|
|
468
|
+
for result in data["results"]:
|
|
469
|
+
result["results"] = _decode_qh1_b64(result["results"])
|
|
470
|
+
return data["results"]
|
|
471
|
+
|
|
472
|
+
def poll_job_status(
|
|
473
|
+
self,
|
|
474
|
+
job_id: str,
|
|
475
|
+
loop_until_complete: bool = False,
|
|
476
|
+
on_complete: Callable[[requests.Response], None] | None = None,
|
|
477
|
+
verbose: bool = True,
|
|
478
|
+
poll_callback: Callable[[int, str], None] | None = None,
|
|
479
|
+
) -> str | JobStatus:
|
|
480
|
+
"""
|
|
481
|
+
Get the status of a job and optionally execute a function on completion.
|
|
482
|
+
|
|
483
|
+
Args:
|
|
484
|
+
job_id: The ID of the job to check.
|
|
485
|
+
loop_until_complete (bool): If True, polls until the job is complete or failed.
|
|
486
|
+
on_complete (Callable, optional): A function to call with the final response
|
|
487
|
+
object when the job finishes.
|
|
488
|
+
verbose (bool, optional): If True, prints polling status to the logger.
|
|
489
|
+
poll_callback (Callable, optional): A function for updating progress bars.
|
|
490
|
+
Takes `(retry_count, status)`.
|
|
491
|
+
|
|
492
|
+
Returns:
|
|
493
|
+
str | JobStatus: The current job status as a string if not looping,
|
|
494
|
+
or a JobStatus enum member (COMPLETED or FAILED) if looping.
|
|
495
|
+
"""
|
|
496
|
+
# Decide once at the start which update function to use
|
|
497
|
+
if poll_callback:
|
|
498
|
+
update_fn = poll_callback
|
|
499
|
+
elif verbose:
|
|
500
|
+
CYAN = "\033[36m"
|
|
501
|
+
RESET = "\033[0m"
|
|
502
|
+
|
|
503
|
+
update_fn = lambda retry_count, status: logger.info(
|
|
504
|
+
rf"Job {CYAN}{job_id.split('-')[0]}{RESET} is {status}. Polling attempt {retry_count} / {self.max_retries}\r",
|
|
505
|
+
extra={"append": True},
|
|
506
|
+
)
|
|
507
|
+
else:
|
|
508
|
+
update_fn = lambda _, __: None
|
|
509
|
+
|
|
510
|
+
if not loop_until_complete:
|
|
511
|
+
response = self._make_request("get", f"job/{job_id}/status/", timeout=200)
|
|
512
|
+
return response.json()["status"]
|
|
513
|
+
|
|
514
|
+
for retry_count in range(1, self.max_retries + 1):
|
|
515
|
+
response = self._make_request("get", f"job/{job_id}/status/", timeout=200)
|
|
516
|
+
status = response.json()["status"]
|
|
517
|
+
|
|
518
|
+
if status == JobStatus.COMPLETED.value:
|
|
519
|
+
if on_complete:
|
|
520
|
+
on_complete(response)
|
|
521
|
+
return JobStatus.COMPLETED
|
|
522
|
+
|
|
523
|
+
if status == JobStatus.FAILED.value:
|
|
524
|
+
if on_complete:
|
|
525
|
+
on_complete(response)
|
|
526
|
+
return JobStatus.FAILED
|
|
527
|
+
|
|
528
|
+
update_fn(retry_count, status)
|
|
529
|
+
time.sleep(self.polling_interval)
|
|
530
|
+
|
|
531
|
+
raise MaxRetriesReachedError(self.max_retries)
|