qoro-divi 0.2.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- divi/__init__.py +8 -0
- divi/_pbar.py +73 -0
- divi/circuits.py +139 -0
- divi/exp/cirq/__init__.py +7 -0
- divi/exp/cirq/_lexer.py +126 -0
- divi/exp/cirq/_parser.py +889 -0
- divi/exp/cirq/_qasm_export.py +37 -0
- divi/exp/cirq/_qasm_import.py +35 -0
- divi/exp/cirq/exception.py +21 -0
- divi/exp/scipy/_cobyla.py +342 -0
- divi/exp/scipy/pyprima/LICENCE.txt +28 -0
- divi/exp/scipy/pyprima/__init__.py +263 -0
- divi/exp/scipy/pyprima/cobyla/__init__.py +0 -0
- divi/exp/scipy/pyprima/cobyla/cobyla.py +599 -0
- divi/exp/scipy/pyprima/cobyla/cobylb.py +849 -0
- divi/exp/scipy/pyprima/cobyla/geometry.py +240 -0
- divi/exp/scipy/pyprima/cobyla/initialize.py +269 -0
- divi/exp/scipy/pyprima/cobyla/trustregion.py +540 -0
- divi/exp/scipy/pyprima/cobyla/update.py +331 -0
- divi/exp/scipy/pyprima/common/__init__.py +0 -0
- divi/exp/scipy/pyprima/common/_bounds.py +41 -0
- divi/exp/scipy/pyprima/common/_linear_constraints.py +46 -0
- divi/exp/scipy/pyprima/common/_nonlinear_constraints.py +64 -0
- divi/exp/scipy/pyprima/common/_project.py +224 -0
- divi/exp/scipy/pyprima/common/checkbreak.py +107 -0
- divi/exp/scipy/pyprima/common/consts.py +48 -0
- divi/exp/scipy/pyprima/common/evaluate.py +101 -0
- divi/exp/scipy/pyprima/common/history.py +39 -0
- divi/exp/scipy/pyprima/common/infos.py +30 -0
- divi/exp/scipy/pyprima/common/linalg.py +452 -0
- divi/exp/scipy/pyprima/common/message.py +336 -0
- divi/exp/scipy/pyprima/common/powalg.py +131 -0
- divi/exp/scipy/pyprima/common/preproc.py +393 -0
- divi/exp/scipy/pyprima/common/present.py +5 -0
- divi/exp/scipy/pyprima/common/ratio.py +56 -0
- divi/exp/scipy/pyprima/common/redrho.py +49 -0
- divi/exp/scipy/pyprima/common/selectx.py +346 -0
- divi/interfaces.py +25 -0
- divi/parallel_simulator.py +258 -0
- divi/qasm.py +220 -0
- divi/qem.py +191 -0
- divi/qlogger.py +119 -0
- divi/qoro_service.py +343 -0
- divi/qprog/__init__.py +13 -0
- divi/qprog/_graph_partitioning.py +619 -0
- divi/qprog/_mlae.py +182 -0
- divi/qprog/_qaoa.py +440 -0
- divi/qprog/_vqe.py +275 -0
- divi/qprog/_vqe_sweep.py +144 -0
- divi/qprog/batch.py +235 -0
- divi/qprog/optimizers.py +75 -0
- divi/qprog/quantum_program.py +493 -0
- divi/utils.py +116 -0
- qoro_divi-0.2.0b1.dist-info/LICENSE +190 -0
- qoro_divi-0.2.0b1.dist-info/LICENSES/Apache-2.0.txt +73 -0
- qoro_divi-0.2.0b1.dist-info/METADATA +57 -0
- qoro_divi-0.2.0b1.dist-info/RECORD +58 -0
- qoro_divi-0.2.0b1.dist-info/WHEEL +4 -0
divi/qoro_service.py
ADDED
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: 2025 Qoro Quantum Ltd <divi@qoroquantum.de>
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import base64
|
|
6
|
+
import gzip
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import time
|
|
10
|
+
from collections.abc import Callable
|
|
11
|
+
from enum import Enum
|
|
12
|
+
from http import HTTPStatus
|
|
13
|
+
from typing import Optional
|
|
14
|
+
|
|
15
|
+
import requests
|
|
16
|
+
from requests.adapters import HTTPAdapter, Retry
|
|
17
|
+
|
|
18
|
+
from divi.interfaces import CircuitRunner
|
|
19
|
+
|
|
20
|
+
API_URL = "https://app.qoroquantum.net/api"
|
|
21
|
+
MAX_PAYLOAD_SIZE_MB = 0.95
|
|
22
|
+
|
|
23
|
+
session = requests.Session()
|
|
24
|
+
retries = Retry(
|
|
25
|
+
total=5,
|
|
26
|
+
backoff_factor=0.1,
|
|
27
|
+
status_forcelist=[502],
|
|
28
|
+
allowed_methods=["GET", "POST", "DELETE"],
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
session.mount("http://", HTTPAdapter(max_retries=retries))
|
|
32
|
+
session.mount("https://", HTTPAdapter(max_retries=retries))
|
|
33
|
+
|
|
34
|
+
logger = logging.getLogger(__name__)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class JobStatus(Enum):
|
|
38
|
+
PENDING = "PENDING"
|
|
39
|
+
RUNNING = "RUNNING"
|
|
40
|
+
COMPLETED = "COMPLETED"
|
|
41
|
+
FAILED = "FAILED"
|
|
42
|
+
CANCELLED = "CANCELLED"
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class JobType(Enum):
|
|
46
|
+
EXECUTE = "EXECUTE"
|
|
47
|
+
SIMULATE = "SIMULATE"
|
|
48
|
+
ESTIMATE = "ESTIMATE"
|
|
49
|
+
CIRCUIT_CUT = "CIRCUIT_CUT"
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class MaxRetriesReachedError(Exception):
|
|
53
|
+
"""Exception raised when the maximum number of retries is reached."""
|
|
54
|
+
|
|
55
|
+
def __init__(self, retries, message="Maximum retries reached"):
|
|
56
|
+
self.retries = retries
|
|
57
|
+
self.message = f"{message}: {retries} retries attempted"
|
|
58
|
+
super().__init__(self.message)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class QoroService(CircuitRunner):
|
|
62
|
+
|
|
63
|
+
def __init__(
|
|
64
|
+
self,
|
|
65
|
+
auth_token: str,
|
|
66
|
+
polling_interval: float = 3.0,
|
|
67
|
+
max_retries: int = 5000,
|
|
68
|
+
shots: int = 1000,
|
|
69
|
+
use_circuit_packing: Optional[bool] = False,
|
|
70
|
+
):
|
|
71
|
+
super().__init__(shots=shots)
|
|
72
|
+
|
|
73
|
+
self.auth_token = "Bearer " + auth_token
|
|
74
|
+
self.polling_interval = polling_interval
|
|
75
|
+
self.max_retries = max_retries
|
|
76
|
+
self.use_circuit_packing = use_circuit_packing
|
|
77
|
+
|
|
78
|
+
def test_connection(self):
|
|
79
|
+
"""Test the connection to the Qoro API"""
|
|
80
|
+
response = session.get(
|
|
81
|
+
API_URL, headers={"Authorization": self.auth_token}, timeout=10
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
if response.status_code != HTTPStatus.OK:
|
|
85
|
+
raise requests.exceptions.HTTPError(
|
|
86
|
+
f"Connection failed with error: {response.status_code}: {response.reason}"
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
return response
|
|
90
|
+
|
|
91
|
+
def submit_circuits(
|
|
92
|
+
self,
|
|
93
|
+
circuits: dict[str, str],
|
|
94
|
+
tag: str = "default",
|
|
95
|
+
job_type: JobType = JobType.SIMULATE,
|
|
96
|
+
override_circuit_packing: bool | None = None,
|
|
97
|
+
):
|
|
98
|
+
"""
|
|
99
|
+
Submit quantum circuits to the Qoro API for execution.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
circuits (dict[str, str]):
|
|
103
|
+
Dictionary mapping unique circuit IDs to QASM circuit strings.
|
|
104
|
+
tag (str, optional):
|
|
105
|
+
Tag to associate with the job for identification. Defaults to "default".
|
|
106
|
+
job_type (JobType, optional):
|
|
107
|
+
Type of job to execute (e.g., SIMULATE, EXECUTE, ESTIMATE, CIRCUIT_CUT). Defaults to JobType.SIMULATE.
|
|
108
|
+
use_packing (bool):
|
|
109
|
+
Whether to use circuit packing optimization. Defaults to False.
|
|
110
|
+
|
|
111
|
+
Raises:
|
|
112
|
+
ValueError: If more than one circuit is submitted for a CIRCUIT_CUT job.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
str or list[str]:
|
|
116
|
+
The job ID(s) of the created job(s). Returns a single job ID if only one job is created,
|
|
117
|
+
otherwise returns a list of job IDs if the circuits are split into multiple jobs due to payload size.
|
|
118
|
+
"""
|
|
119
|
+
|
|
120
|
+
if job_type == JobType.CIRCUIT_CUT and len(circuits) > 1:
|
|
121
|
+
raise ValueError("Only one circuit allowed for circuit-cutting jobs.")
|
|
122
|
+
|
|
123
|
+
def _compress_data(value) -> bytes:
|
|
124
|
+
return base64.b64encode(gzip.compress(value.encode("utf-8"))).decode(
|
|
125
|
+
"utf-8"
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
def _split_circuits(circuits: dict[str, str]) -> list[dict[str, str]]:
|
|
129
|
+
"""
|
|
130
|
+
Split circuits into smaller chunks if the payload size exceeds the maximum allowed size.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
circuits: Dictionary of circuits to be sent
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
List of circuit chunks
|
|
137
|
+
"""
|
|
138
|
+
|
|
139
|
+
def _estimate_size(data):
|
|
140
|
+
payload_json = json.dumps(data)
|
|
141
|
+
return len(payload_json.encode("utf-8")) / 1024 / 1024
|
|
142
|
+
|
|
143
|
+
circuit_chunks = []
|
|
144
|
+
current_chunk = {}
|
|
145
|
+
current_size = 0
|
|
146
|
+
|
|
147
|
+
for key, value in circuits.items():
|
|
148
|
+
compressed_value = _compress_data(value)
|
|
149
|
+
estimated_size = _estimate_size({key: compressed_value})
|
|
150
|
+
|
|
151
|
+
if current_size + estimated_size > MAX_PAYLOAD_SIZE_MB:
|
|
152
|
+
circuit_chunks.append(current_chunk)
|
|
153
|
+
current_chunk = {key: compressed_value}
|
|
154
|
+
current_size = estimated_size
|
|
155
|
+
else:
|
|
156
|
+
current_chunk[key] = compressed_value
|
|
157
|
+
current_size += estimated_size
|
|
158
|
+
|
|
159
|
+
if current_chunk:
|
|
160
|
+
circuit_chunks.append(current_chunk)
|
|
161
|
+
|
|
162
|
+
return circuit_chunks
|
|
163
|
+
|
|
164
|
+
circuit_chunks = _split_circuits(circuits)
|
|
165
|
+
|
|
166
|
+
job_ids = []
|
|
167
|
+
for chunk in circuit_chunks:
|
|
168
|
+
response = session.post(
|
|
169
|
+
API_URL + "/job/",
|
|
170
|
+
headers={
|
|
171
|
+
"Authorization": self.auth_token,
|
|
172
|
+
"Content-Type": "application/json",
|
|
173
|
+
},
|
|
174
|
+
json={
|
|
175
|
+
"circuits": chunk,
|
|
176
|
+
"shots": self.shots,
|
|
177
|
+
"tag": tag,
|
|
178
|
+
"job_type": job_type.value,
|
|
179
|
+
"use_packing": (
|
|
180
|
+
override_circuit_packing
|
|
181
|
+
if override_circuit_packing is not None
|
|
182
|
+
else self.use_circuit_packing
|
|
183
|
+
),
|
|
184
|
+
},
|
|
185
|
+
timeout=100,
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
if response.status_code == HTTPStatus.CREATED:
|
|
189
|
+
job_ids.append(response.json()["job_id"])
|
|
190
|
+
else:
|
|
191
|
+
raise requests.exceptions.HTTPError(
|
|
192
|
+
f"{response.status_code}: {response.reason}"
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
return job_ids if len(job_ids) > 1 else job_ids[0]
|
|
196
|
+
|
|
197
|
+
def delete_job(self, job_ids):
|
|
198
|
+
"""
|
|
199
|
+
Delete a job from the Qoro Database.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
job_id: The ID of the jobs to be deleted
|
|
203
|
+
Returns:
|
|
204
|
+
response: The response from the API
|
|
205
|
+
"""
|
|
206
|
+
if not isinstance(job_ids, list):
|
|
207
|
+
job_ids = [job_ids]
|
|
208
|
+
|
|
209
|
+
responses = []
|
|
210
|
+
|
|
211
|
+
for job_id in job_ids:
|
|
212
|
+
response = session.delete(
|
|
213
|
+
API_URL + f"/job/{job_id}",
|
|
214
|
+
headers={"Authorization": self.auth_token},
|
|
215
|
+
timeout=50,
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
responses.append(response)
|
|
219
|
+
|
|
220
|
+
return responses if len(responses) > 1 else responses[0]
|
|
221
|
+
|
|
222
|
+
def get_job_results(self, job_ids):
|
|
223
|
+
"""
|
|
224
|
+
Get the results of a job from the Qoro Database.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
job_id: The ID of the job to get results from
|
|
228
|
+
Returns:
|
|
229
|
+
results: The results of the job
|
|
230
|
+
"""
|
|
231
|
+
if not isinstance(job_ids, list):
|
|
232
|
+
job_ids = [job_ids]
|
|
233
|
+
|
|
234
|
+
responses = []
|
|
235
|
+
for job_id in job_ids:
|
|
236
|
+
response = session.get(
|
|
237
|
+
API_URL + f"/job/{job_id}/results",
|
|
238
|
+
headers={"Authorization": self.auth_token},
|
|
239
|
+
timeout=100,
|
|
240
|
+
)
|
|
241
|
+
responses.append(response)
|
|
242
|
+
|
|
243
|
+
if all(response.status_code == HTTPStatus.OK for response in responses):
|
|
244
|
+
responses = [response.json() for response in responses]
|
|
245
|
+
return sum(responses, [])
|
|
246
|
+
elif any(
|
|
247
|
+
response.status_code == HTTPStatus.BAD_REQUEST for response in responses
|
|
248
|
+
):
|
|
249
|
+
raise requests.exceptions.HTTPError(
|
|
250
|
+
"400 Bad Request: Job results not available, likely job is still running"
|
|
251
|
+
)
|
|
252
|
+
else:
|
|
253
|
+
for response in responses:
|
|
254
|
+
if response.status_code not in [HTTPStatus.OK, HTTPStatus.BAD_REQUEST]:
|
|
255
|
+
raise requests.exceptions.HTTPError(
|
|
256
|
+
f"{response.status_code}: {response.reason}"
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
def poll_job_status(
|
|
260
|
+
self,
|
|
261
|
+
job_ids: str | list[str],
|
|
262
|
+
loop_until_complete: bool = False,
|
|
263
|
+
on_complete: Optional[Callable] = None,
|
|
264
|
+
verbose: bool = True,
|
|
265
|
+
pbar_update_fn: Optional[Callable] = None,
|
|
266
|
+
):
|
|
267
|
+
"""
|
|
268
|
+
Get the status of a job and optionally execute function *on_complete* on the results
|
|
269
|
+
if the status is COMPLETE.
|
|
270
|
+
|
|
271
|
+
Args:
|
|
272
|
+
job_ids: The job id of the jobs to check
|
|
273
|
+
loop_until_complete (bool): A flag to loop until the job is completed
|
|
274
|
+
on_complete (optional): A function to be called when the job is completed
|
|
275
|
+
polling_interval (optional): The time to wait between retries
|
|
276
|
+
max_retries (optional): The maximum number of retries
|
|
277
|
+
verbose (optional): A flag to print the when retrying
|
|
278
|
+
pbar_update_fn (optional): A function for updating progress bars while polling.
|
|
279
|
+
Returns:
|
|
280
|
+
status: The status of the job
|
|
281
|
+
"""
|
|
282
|
+
if not isinstance(job_ids, list):
|
|
283
|
+
job_ids = [job_ids]
|
|
284
|
+
|
|
285
|
+
def _poll_job_status(job_id):
|
|
286
|
+
response = session.get(
|
|
287
|
+
API_URL + f"/job/{job_id}/status/",
|
|
288
|
+
headers={
|
|
289
|
+
"Authorization": self.auth_token,
|
|
290
|
+
"Content-Type": "application/json",
|
|
291
|
+
},
|
|
292
|
+
timeout=200,
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
if response.status_code == HTTPStatus.OK:
|
|
296
|
+
return response.json()["status"], response
|
|
297
|
+
else:
|
|
298
|
+
raise requests.exceptions.HTTPError(
|
|
299
|
+
f"{response.status_code}: {response.reason}"
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
if loop_until_complete:
|
|
303
|
+
retries = 0
|
|
304
|
+
completed = False
|
|
305
|
+
while True:
|
|
306
|
+
responses = []
|
|
307
|
+
statuses = []
|
|
308
|
+
|
|
309
|
+
for job_id in job_ids:
|
|
310
|
+
job_status, response = _poll_job_status(job_id)
|
|
311
|
+
statuses.append(job_status)
|
|
312
|
+
responses.append(response)
|
|
313
|
+
|
|
314
|
+
if all(status == JobStatus.COMPLETED.value for status in statuses):
|
|
315
|
+
responses = [response.json() for response in responses]
|
|
316
|
+
completed = True
|
|
317
|
+
break
|
|
318
|
+
|
|
319
|
+
if retries >= self.max_retries:
|
|
320
|
+
break
|
|
321
|
+
|
|
322
|
+
retries += 1
|
|
323
|
+
|
|
324
|
+
time.sleep(self.polling_interval)
|
|
325
|
+
|
|
326
|
+
if verbose:
|
|
327
|
+
if pbar_update_fn:
|
|
328
|
+
pbar_update_fn(retries)
|
|
329
|
+
else:
|
|
330
|
+
logger.info(
|
|
331
|
+
rf"\cPolling {retries} / {self.max_retries} retries\r"
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
if completed and on_complete:
|
|
335
|
+
on_complete(responses)
|
|
336
|
+
return JobStatus.COMPLETED
|
|
337
|
+
elif completed:
|
|
338
|
+
return JobStatus.COMPLETED
|
|
339
|
+
else:
|
|
340
|
+
raise MaxRetriesReachedError(retries)
|
|
341
|
+
else:
|
|
342
|
+
statuses = [_poll_job_status(job_id)[0] for job_id in job_ids]
|
|
343
|
+
return statuses if len(statuses) > 1 else statuses[0]
|
divi/qprog/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: 2025 Qoro Quantum Ltd <divi@qoroquantum.de>
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
# isort: skip_file
|
|
6
|
+
from .quantum_program import QuantumProgram
|
|
7
|
+
from .batch import ProgramBatch
|
|
8
|
+
from ._qaoa import QAOA, GraphProblem
|
|
9
|
+
from ._vqe import VQE, VQEAnsatz
|
|
10
|
+
from ._mlae import MLAE
|
|
11
|
+
from ._graph_partitioning import GraphPartitioningQAOA, PartitioningConfig
|
|
12
|
+
from ._vqe_sweep import VQEHyperparameterSweep
|
|
13
|
+
from .optimizers import Optimizer
|