qoro-divi 0.2.2b1__py3-none-any.whl → 0.3.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of qoro-divi might be problematic. Click here for more details.
- divi/_pbar.py +1 -3
- divi/circuits.py +3 -3
- divi/exp/cirq/__init__.py +1 -0
- divi/exp/cirq/_validator.py +645 -0
- divi/parallel_simulator.py +9 -9
- divi/qasm.py +2 -3
- divi/qoro_service.py +210 -141
- divi/qprog/__init__.py +2 -2
- divi/qprog/_graph_partitioning.py +103 -66
- divi/qprog/_qaoa.py +33 -8
- divi/qprog/_qubo_partitioning.py +199 -0
- divi/qprog/_vqe.py +48 -39
- divi/qprog/_vqe_sweep.py +413 -46
- divi/qprog/batch.py +61 -14
- divi/qprog/quantum_program.py +10 -11
- divi/qpu_system.py +20 -0
- qoro_divi-0.3.0b1.dist-info/LICENSES/.license-header +3 -0
- {qoro_divi-0.2.2b1.dist-info → qoro_divi-0.3.0b1.dist-info}/METADATA +5 -2
- {qoro_divi-0.2.2b1.dist-info → qoro_divi-0.3.0b1.dist-info}/RECORD +22 -19
- divi/qprog/_mlae.py +0 -182
- {qoro_divi-0.2.2b1.dist-info → qoro_divi-0.3.0b1.dist-info}/LICENSE +0 -0
- {qoro_divi-0.2.2b1.dist-info → qoro_divi-0.3.0b1.dist-info}/LICENSES/Apache-2.0.txt +0 -0
- {qoro_divi-0.2.2b1.dist-info → qoro_divi-0.3.0b1.dist-info}/WHEEL +0 -0
divi/parallel_simulator.py
CHANGED
|
@@ -7,7 +7,7 @@ import heapq
|
|
|
7
7
|
import logging
|
|
8
8
|
from functools import partial
|
|
9
9
|
from multiprocessing import Pool
|
|
10
|
-
from typing import Literal
|
|
10
|
+
from typing import Literal
|
|
11
11
|
from warnings import warn
|
|
12
12
|
|
|
13
13
|
import qiskit_ibm_runtime.fake_provider as fk_prov
|
|
@@ -63,9 +63,9 @@ class ParallelSimulator(CircuitRunner):
|
|
|
63
63
|
self,
|
|
64
64
|
n_processes: int = 2,
|
|
65
65
|
shots: int = 5000,
|
|
66
|
-
simulation_seed:
|
|
66
|
+
simulation_seed: int | None = None,
|
|
67
67
|
qiskit_backend: Backend | Literal["auto"] | None = None,
|
|
68
|
-
noise_model:
|
|
68
|
+
noise_model: NoiseModel | None = None,
|
|
69
69
|
):
|
|
70
70
|
"""
|
|
71
71
|
A multi-process wrapper around Qiskit's AerSimulator.
|
|
@@ -73,7 +73,7 @@ class ParallelSimulator(CircuitRunner):
|
|
|
73
73
|
Args:
|
|
74
74
|
n_processes (int, optional): Number of parallel processes to use for simulation. Defaults to 2.
|
|
75
75
|
shots (int, optional): Number of shots to perform. Defaults to 5000.
|
|
76
|
-
simulation_seed (
|
|
76
|
+
simulation_seed (int, optional): Seed for the random number generator to ensure reproducibility. Defaults to None.
|
|
77
77
|
backend (Backend or "auto, optional): A Qiskit backend to initiate the simulator from. If "auto" is passed,
|
|
78
78
|
the best-fit most recent fake backend will be chosen for the given circuit. Defaults to None, resulting in noiseless simulation.
|
|
79
79
|
noise_model (NoiseModel, optional): Qiskit noise model to use in simulation. Defaults to None.
|
|
@@ -96,9 +96,9 @@ class ParallelSimulator(CircuitRunner):
|
|
|
96
96
|
def simulate_circuit(
|
|
97
97
|
circuit_data: tuple[str, str],
|
|
98
98
|
shots: int,
|
|
99
|
-
simulation_seed:
|
|
100
|
-
qiskit_backend:
|
|
101
|
-
noise_model:
|
|
99
|
+
simulation_seed: int | None = None,
|
|
100
|
+
qiskit_backend: Backend | None = None,
|
|
101
|
+
noise_model: NoiseModel | None = None,
|
|
102
102
|
):
|
|
103
103
|
circuit_label, circuit = circuit_data
|
|
104
104
|
|
|
@@ -207,8 +207,8 @@ class ParallelSimulator(CircuitRunner):
|
|
|
207
207
|
|
|
208
208
|
@staticmethod
|
|
209
209
|
def estimate_run_time_batch(
|
|
210
|
-
circuits:
|
|
211
|
-
precomputed_duration:
|
|
210
|
+
circuits: list[str] | None = None,
|
|
211
|
+
precomputed_duration: list[float] | None = None,
|
|
212
212
|
n_qpus: int = 5,
|
|
213
213
|
**transpilation_kwargs,
|
|
214
214
|
) -> float:
|
divi/qasm.py
CHANGED
|
@@ -5,7 +5,6 @@
|
|
|
5
5
|
import re
|
|
6
6
|
from functools import partial
|
|
7
7
|
from itertools import product
|
|
8
|
-
from typing import Optional
|
|
9
8
|
from warnings import warn
|
|
10
9
|
|
|
11
10
|
import cirq
|
|
@@ -81,10 +80,10 @@ def to_openqasm(
|
|
|
81
80
|
main_qscript,
|
|
82
81
|
measurement_groups: list[list[qml.measurements.ExpectationMP]],
|
|
83
82
|
measure_all: bool = True,
|
|
84
|
-
precision:
|
|
83
|
+
precision: int | None = None,
|
|
85
84
|
return_measurements_separately: bool = False,
|
|
86
85
|
symbols: list[Symbol] = None,
|
|
87
|
-
qem_protocol:
|
|
86
|
+
qem_protocol: QEMProtocol | None = None,
|
|
88
87
|
) -> list[str] | tuple[str, list[str]]:
|
|
89
88
|
"""
|
|
90
89
|
Serialize the circuit as an OpenQASM 2.0 program.
|
divi/qoro_service.py
CHANGED
|
@@ -10,30 +10,42 @@ import time
|
|
|
10
10
|
from collections.abc import Callable
|
|
11
11
|
from enum import Enum
|
|
12
12
|
from http import HTTPStatus
|
|
13
|
-
from typing import Optional
|
|
14
13
|
|
|
15
14
|
import requests
|
|
15
|
+
from dotenv import dotenv_values
|
|
16
16
|
from requests.adapters import HTTPAdapter, Retry
|
|
17
17
|
|
|
18
|
+
from divi.exp.cirq import is_valid_qasm
|
|
18
19
|
from divi.interfaces import CircuitRunner
|
|
20
|
+
from divi.qpu_system import QPU, QPUSystem
|
|
19
21
|
|
|
20
22
|
API_URL = "https://app.qoroquantum.net/api"
|
|
21
23
|
MAX_PAYLOAD_SIZE_MB = 0.95
|
|
22
24
|
|
|
23
25
|
session = requests.Session()
|
|
24
|
-
|
|
26
|
+
retry_configuration = Retry(
|
|
25
27
|
total=5,
|
|
26
28
|
backoff_factor=0.1,
|
|
27
29
|
status_forcelist=[502],
|
|
28
30
|
allowed_methods=["GET", "POST", "DELETE"],
|
|
29
31
|
)
|
|
30
32
|
|
|
31
|
-
session.mount("http://", HTTPAdapter(max_retries=
|
|
32
|
-
session.mount("https://", HTTPAdapter(max_retries=
|
|
33
|
+
session.mount("http://", HTTPAdapter(max_retries=retry_configuration))
|
|
34
|
+
session.mount("https://", HTTPAdapter(max_retries=retry_configuration))
|
|
33
35
|
|
|
34
36
|
logger = logging.getLogger(__name__)
|
|
35
37
|
|
|
36
38
|
|
|
39
|
+
def _raise_with_details(resp: requests.Response):
|
|
40
|
+
try:
|
|
41
|
+
data = resp.json()
|
|
42
|
+
body = json.dumps(data, ensure_ascii=False)
|
|
43
|
+
except ValueError:
|
|
44
|
+
body = resp.text
|
|
45
|
+
msg = f"{resp.status_code} {resp.reason}: {body}"
|
|
46
|
+
raise requests.HTTPError(msg)
|
|
47
|
+
|
|
48
|
+
|
|
37
49
|
class JobStatus(Enum):
|
|
38
50
|
PENDING = "PENDING"
|
|
39
51
|
RUNNING = "RUNNING"
|
|
@@ -52,47 +64,155 @@ class JobType(Enum):
|
|
|
52
64
|
class MaxRetriesReachedError(Exception):
|
|
53
65
|
"""Exception raised when the maximum number of retries is reached."""
|
|
54
66
|
|
|
55
|
-
def __init__(self, retries
|
|
67
|
+
def __init__(self, retries):
|
|
56
68
|
self.retries = retries
|
|
57
|
-
self.message = f"
|
|
69
|
+
self.message = f"Maximum retries reached: {retries} retries attempted"
|
|
58
70
|
super().__init__(self.message)
|
|
59
71
|
|
|
60
72
|
|
|
73
|
+
def _parse_qpu_systems(json_data: list) -> list[QPUSystem]:
|
|
74
|
+
return [
|
|
75
|
+
QPUSystem(
|
|
76
|
+
name=system_data["name"],
|
|
77
|
+
qpus=[QPU(**qpu) for qpu in system_data.get("qpus", [])],
|
|
78
|
+
access_level=system_data["access_level"],
|
|
79
|
+
)
|
|
80
|
+
for system_data in json_data
|
|
81
|
+
]
|
|
82
|
+
|
|
83
|
+
|
|
61
84
|
class QoroService(CircuitRunner):
|
|
62
85
|
|
|
63
86
|
def __init__(
|
|
64
87
|
self,
|
|
65
|
-
auth_token: str,
|
|
88
|
+
auth_token: str | None = None,
|
|
66
89
|
polling_interval: float = 3.0,
|
|
67
90
|
max_retries: int = 5000,
|
|
68
91
|
shots: int = 1000,
|
|
69
|
-
|
|
92
|
+
qpu_system_name: str | QPUSystem | None = None,
|
|
93
|
+
use_circuit_packing: bool = False,
|
|
70
94
|
):
|
|
71
95
|
super().__init__(shots=shots)
|
|
72
96
|
|
|
97
|
+
if auth_token is None:
|
|
98
|
+
try:
|
|
99
|
+
auth_token = dotenv_values()["QORO_API_KEY"]
|
|
100
|
+
except KeyError:
|
|
101
|
+
raise ValueError("Qoro API key not provided nor found in a .env file.")
|
|
102
|
+
|
|
73
103
|
self.auth_token = "Bearer " + auth_token
|
|
74
104
|
self.polling_interval = polling_interval
|
|
75
105
|
self.max_retries = max_retries
|
|
106
|
+
self._qpu_system_name = qpu_system_name
|
|
76
107
|
self.use_circuit_packing = use_circuit_packing
|
|
77
108
|
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
109
|
+
@property
|
|
110
|
+
def qpu_system_name(self) -> str | QPUSystem | None:
|
|
111
|
+
return self._qpu_system_name
|
|
112
|
+
|
|
113
|
+
@qpu_system_name.setter
|
|
114
|
+
def qpu_system_name(self, system_name: str | QPUSystem | None):
|
|
115
|
+
"""
|
|
116
|
+
Set the QPU system for the service.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
system_name (str | QPUSystem): The QPU system to set or the name as a string.
|
|
120
|
+
"""
|
|
121
|
+
if isinstance(system_name, str):
|
|
122
|
+
self._qpu_system_name = system_name
|
|
123
|
+
elif isinstance(system_name, QPUSystem):
|
|
124
|
+
self._qpu_system_name = system_name.name
|
|
125
|
+
elif system_name is None:
|
|
126
|
+
self._qpu_system_name = None
|
|
127
|
+
else:
|
|
128
|
+
raise TypeError("Expected a QPUSystem instance or str.")
|
|
129
|
+
|
|
130
|
+
def _make_request(self, method: str, endpoint: str, **kwargs) -> requests.Response:
|
|
131
|
+
"""A centralized helper for making API requests."""
|
|
132
|
+
url = f"{API_URL}/{endpoint}"
|
|
133
|
+
|
|
134
|
+
headers = {"Authorization": self.auth_token}
|
|
135
|
+
|
|
136
|
+
if method.upper() in ["POST", "PUT", "PATCH"]:
|
|
137
|
+
headers["Content-Type"] = "application/json"
|
|
83
138
|
|
|
84
|
-
|
|
139
|
+
# Allow overriding default headers
|
|
140
|
+
if "headers" in kwargs:
|
|
141
|
+
headers.update(kwargs.pop("headers"))
|
|
142
|
+
|
|
143
|
+
response = session.request(method, url, headers=headers, **kwargs)
|
|
144
|
+
|
|
145
|
+
# Generic error handling for non-OK statuses
|
|
146
|
+
if response.status_code >= 400:
|
|
85
147
|
raise requests.exceptions.HTTPError(
|
|
86
|
-
f"
|
|
148
|
+
f"API Error: {response.status_code} {response.reason} for URL {response.url}"
|
|
87
149
|
)
|
|
88
150
|
|
|
89
151
|
return response
|
|
90
152
|
|
|
153
|
+
def test_connection(self):
|
|
154
|
+
"""Test the connection to the Qoro API"""
|
|
155
|
+
return self._make_request("get", "", timeout=10)
|
|
156
|
+
|
|
157
|
+
def fetch_qpu_systems(self) -> list[QPUSystem]:
|
|
158
|
+
"""
|
|
159
|
+
Get the list of available QPU systems from the Qoro API.
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
List of QPUSystem objects.
|
|
163
|
+
"""
|
|
164
|
+
response = self._make_request("get", "qpusystem/", timeout=10)
|
|
165
|
+
return _parse_qpu_systems(response.json())
|
|
166
|
+
|
|
167
|
+
@staticmethod
|
|
168
|
+
def _compress_data(value) -> bytes:
|
|
169
|
+
return base64.b64encode(gzip.compress(value.encode("utf-8"))).decode("utf-8")
|
|
170
|
+
|
|
171
|
+
def _split_circuits(self, circuits: dict[str, str]) -> list[dict[str, str]]:
|
|
172
|
+
"""
|
|
173
|
+
Splits circuits into chunks by estimating payload size with a simplified,
|
|
174
|
+
consistent overhead calculation.
|
|
175
|
+
Assumes that BASE64 encoding produces ASCI characters, which are 1 byte each.
|
|
176
|
+
"""
|
|
177
|
+
max_payload_bytes = MAX_PAYLOAD_SIZE_MB * 1024 * 1024
|
|
178
|
+
circuit_chunks = []
|
|
179
|
+
current_chunk = {}
|
|
180
|
+
|
|
181
|
+
# Start with size 2 for the opening and closing curly braces '{}'
|
|
182
|
+
current_chunk_size_bytes = 2
|
|
183
|
+
|
|
184
|
+
for key, value in circuits.items():
|
|
185
|
+
compressed_value = self._compress_data(value)
|
|
186
|
+
|
|
187
|
+
item_size_bytes = len(key) + len(compressed_value) + 6
|
|
188
|
+
|
|
189
|
+
# If adding this item would exceed the limit, finalize the current chunk.
|
|
190
|
+
# This check only runs if the chunk is not empty.
|
|
191
|
+
if current_chunk and (
|
|
192
|
+
current_chunk_size_bytes + item_size_bytes > max_payload_bytes
|
|
193
|
+
):
|
|
194
|
+
circuit_chunks.append(current_chunk)
|
|
195
|
+
|
|
196
|
+
# Start a new chunk
|
|
197
|
+
current_chunk = {}
|
|
198
|
+
current_chunk_size_bytes = 2
|
|
199
|
+
|
|
200
|
+
# Add the new item to the current chunk and update its size
|
|
201
|
+
current_chunk[key] = compressed_value
|
|
202
|
+
current_chunk_size_bytes += item_size_bytes
|
|
203
|
+
|
|
204
|
+
# Add the last remaining chunk if it's not empty
|
|
205
|
+
if current_chunk:
|
|
206
|
+
circuit_chunks.append(current_chunk)
|
|
207
|
+
|
|
208
|
+
return circuit_chunks
|
|
209
|
+
|
|
91
210
|
def submit_circuits(
|
|
92
211
|
self,
|
|
93
212
|
circuits: dict[str, str],
|
|
94
213
|
tag: str = "default",
|
|
95
214
|
job_type: JobType = JobType.SIMULATE,
|
|
215
|
+
qpu_system_name: str | None = None,
|
|
96
216
|
override_circuit_packing: bool | None = None,
|
|
97
217
|
):
|
|
98
218
|
"""
|
|
@@ -120,77 +240,39 @@ class QoroService(CircuitRunner):
|
|
|
120
240
|
if job_type == JobType.CIRCUIT_CUT and len(circuits) > 1:
|
|
121
241
|
raise ValueError("Only one circuit allowed for circuit-cutting jobs.")
|
|
122
242
|
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
"
|
|
126
|
-
)
|
|
127
|
-
|
|
128
|
-
def _split_circuits(circuits: dict[str, str]) -> list[dict[str, str]]:
|
|
129
|
-
"""
|
|
130
|
-
Split circuits into smaller chunks if the payload size exceeds the maximum allowed size.
|
|
131
|
-
|
|
132
|
-
Args:
|
|
133
|
-
circuits: Dictionary of circuits to be sent
|
|
134
|
-
|
|
135
|
-
Returns:
|
|
136
|
-
List of circuit chunks
|
|
137
|
-
"""
|
|
138
|
-
|
|
139
|
-
def _estimate_size(data):
|
|
140
|
-
payload_json = json.dumps(data)
|
|
141
|
-
return len(payload_json.encode("utf-8")) / 1024 / 1024
|
|
142
|
-
|
|
143
|
-
circuit_chunks = []
|
|
144
|
-
current_chunk = {}
|
|
145
|
-
current_size = 0
|
|
146
|
-
|
|
147
|
-
for key, value in circuits.items():
|
|
148
|
-
compressed_value = _compress_data(value)
|
|
149
|
-
estimated_size = _estimate_size({key: compressed_value})
|
|
150
|
-
|
|
151
|
-
if current_size + estimated_size > MAX_PAYLOAD_SIZE_MB:
|
|
152
|
-
circuit_chunks.append(current_chunk)
|
|
153
|
-
current_chunk = {key: compressed_value}
|
|
154
|
-
current_size = estimated_size
|
|
155
|
-
else:
|
|
156
|
-
current_chunk[key] = compressed_value
|
|
157
|
-
current_size += estimated_size
|
|
158
|
-
|
|
159
|
-
if current_chunk:
|
|
160
|
-
circuit_chunks.append(current_chunk)
|
|
243
|
+
for key, circuit in circuits.items():
|
|
244
|
+
if not is_valid_qasm(circuit):
|
|
245
|
+
raise ValueError(f"Circuit {key} is not a valid QASM string.")
|
|
161
246
|
|
|
162
|
-
|
|
247
|
+
circuit_chunks = self._split_circuits(circuits)
|
|
163
248
|
|
|
164
|
-
|
|
249
|
+
payload = {
|
|
250
|
+
"shots": self.shots,
|
|
251
|
+
"tag": tag,
|
|
252
|
+
"job_type": job_type.value,
|
|
253
|
+
"qpu_system_name": qpu_system_name or self.qpu_system_name,
|
|
254
|
+
"use_packing": (
|
|
255
|
+
override_circuit_packing
|
|
256
|
+
if override_circuit_packing is not None
|
|
257
|
+
else self.use_circuit_packing
|
|
258
|
+
),
|
|
259
|
+
}
|
|
165
260
|
|
|
166
261
|
job_ids = []
|
|
167
262
|
for chunk in circuit_chunks:
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
json={
|
|
175
|
-
"circuits": chunk,
|
|
176
|
-
"shots": self.shots,
|
|
177
|
-
"tag": tag,
|
|
178
|
-
"job_type": job_type.value,
|
|
179
|
-
"use_packing": (
|
|
180
|
-
override_circuit_packing
|
|
181
|
-
if override_circuit_packing is not None
|
|
182
|
-
else self.use_circuit_packing
|
|
183
|
-
),
|
|
184
|
-
},
|
|
263
|
+
payload["circuits"] = chunk
|
|
264
|
+
|
|
265
|
+
response = self._make_request(
|
|
266
|
+
"post",
|
|
267
|
+
"job/",
|
|
268
|
+
json=payload,
|
|
185
269
|
timeout=100,
|
|
186
270
|
)
|
|
187
271
|
|
|
188
272
|
if response.status_code == HTTPStatus.CREATED:
|
|
189
273
|
job_ids.append(response.json()["job_id"])
|
|
190
274
|
else:
|
|
191
|
-
|
|
192
|
-
f"{response.status_code}: {response.reason}"
|
|
193
|
-
)
|
|
275
|
+
_raise_with_details(response)
|
|
194
276
|
|
|
195
277
|
return job_ids if len(job_ids) > 1 else job_ids[0]
|
|
196
278
|
|
|
@@ -206,16 +288,14 @@ class QoroService(CircuitRunner):
|
|
|
206
288
|
if not isinstance(job_ids, list):
|
|
207
289
|
job_ids = [job_ids]
|
|
208
290
|
|
|
209
|
-
responses = [
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
API_URL + f"/job/{job_id}",
|
|
214
|
-
headers={"Authorization": self.auth_token},
|
|
291
|
+
responses = [
|
|
292
|
+
self._make_request(
|
|
293
|
+
"delete",
|
|
294
|
+
f"job/{job_id}",
|
|
215
295
|
timeout=50,
|
|
216
296
|
)
|
|
217
|
-
|
|
218
|
-
|
|
297
|
+
for job_id in job_ids
|
|
298
|
+
]
|
|
219
299
|
|
|
220
300
|
return responses if len(responses) > 1 else responses[0]
|
|
221
301
|
|
|
@@ -231,14 +311,14 @@ class QoroService(CircuitRunner):
|
|
|
231
311
|
if not isinstance(job_ids, list):
|
|
232
312
|
job_ids = [job_ids]
|
|
233
313
|
|
|
234
|
-
responses = [
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
headers={"Authorization": self.auth_token},
|
|
314
|
+
responses = [
|
|
315
|
+
self._make_request(
|
|
316
|
+
"get",
|
|
317
|
+
f"job/{job_id}/results",
|
|
239
318
|
timeout=100,
|
|
240
319
|
)
|
|
241
|
-
|
|
320
|
+
for job_id in job_ids
|
|
321
|
+
]
|
|
242
322
|
|
|
243
323
|
if all(response.status_code == HTTPStatus.OK for response in responses):
|
|
244
324
|
responses = [response.json() for response in responses]
|
|
@@ -260,9 +340,9 @@ class QoroService(CircuitRunner):
|
|
|
260
340
|
self,
|
|
261
341
|
job_ids: str | list[str],
|
|
262
342
|
loop_until_complete: bool = False,
|
|
263
|
-
on_complete:
|
|
343
|
+
on_complete: Callable | None = None,
|
|
264
344
|
verbose: bool = True,
|
|
265
|
-
pbar_update_fn:
|
|
345
|
+
pbar_update_fn: Callable | None = None,
|
|
266
346
|
):
|
|
267
347
|
"""
|
|
268
348
|
Get the status of a job and optionally execute function *on_complete* on the results
|
|
@@ -273,7 +353,6 @@ class QoroService(CircuitRunner):
|
|
|
273
353
|
loop_until_complete (bool): A flag to loop until the job is completed
|
|
274
354
|
on_complete (optional): A function to be called when the job is completed
|
|
275
355
|
polling_interval (optional): The time to wait between retries
|
|
276
|
-
max_retries (optional): The maximum number of retries
|
|
277
356
|
verbose (optional): A flag to print the when retrying
|
|
278
357
|
pbar_update_fn (optional): A function for updating progress bars while polling.
|
|
279
358
|
Returns:
|
|
@@ -282,62 +361,52 @@ class QoroService(CircuitRunner):
|
|
|
282
361
|
if not isinstance(job_ids, list):
|
|
283
362
|
job_ids = [job_ids]
|
|
284
363
|
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
"
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
364
|
+
if not loop_until_complete:
|
|
365
|
+
statuses = [
|
|
366
|
+
self._make_request(
|
|
367
|
+
"get",
|
|
368
|
+
f"job/{job_id}/status/",
|
|
369
|
+
timeout=200,
|
|
370
|
+
).json()["status"]
|
|
371
|
+
for job_id in job_ids
|
|
372
|
+
]
|
|
373
|
+
return statuses if len(statuses) > 1 else statuses[0]
|
|
294
374
|
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
375
|
+
pending_job_ids = set(job_ids)
|
|
376
|
+
responses = []
|
|
377
|
+
for retry_count in range(1, self.max_retries + 1):
|
|
378
|
+
# Exit early if all jobs are done
|
|
379
|
+
if not pending_job_ids:
|
|
380
|
+
break
|
|
381
|
+
|
|
382
|
+
for job_id in list(pending_job_ids):
|
|
383
|
+
response = self._make_request(
|
|
384
|
+
"get",
|
|
385
|
+
f"job/{job_id}/status/",
|
|
386
|
+
timeout=200,
|
|
300
387
|
)
|
|
301
388
|
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
completed = False
|
|
305
|
-
while True:
|
|
306
|
-
responses = []
|
|
307
|
-
statuses = []
|
|
308
|
-
|
|
309
|
-
for job_id in job_ids:
|
|
310
|
-
job_status, response = _poll_job_status(job_id)
|
|
311
|
-
statuses.append(job_status)
|
|
389
|
+
if response.json()["status"] == JobStatus.COMPLETED.value:
|
|
390
|
+
pending_job_ids.remove(job_id)
|
|
312
391
|
responses.append(response)
|
|
313
392
|
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
break
|
|
318
|
-
|
|
319
|
-
if retries >= self.max_retries:
|
|
320
|
-
break
|
|
321
|
-
|
|
322
|
-
retries += 1
|
|
393
|
+
# Exit before sleeping if no jobs are pending
|
|
394
|
+
if not pending_job_ids:
|
|
395
|
+
break
|
|
323
396
|
|
|
324
|
-
|
|
397
|
+
time.sleep(self.polling_interval)
|
|
325
398
|
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
399
|
+
if verbose:
|
|
400
|
+
if pbar_update_fn:
|
|
401
|
+
pbar_update_fn(retry_count)
|
|
402
|
+
else:
|
|
403
|
+
logger.info(
|
|
404
|
+
rf"\cPolling {retry_count} / {self.max_retries} retries\r"
|
|
405
|
+
)
|
|
333
406
|
|
|
334
|
-
|
|
407
|
+
if not pending_job_ids:
|
|
408
|
+
if on_complete:
|
|
335
409
|
on_complete(responses)
|
|
336
|
-
|
|
337
|
-
elif completed:
|
|
338
|
-
return JobStatus.COMPLETED
|
|
339
|
-
else:
|
|
340
|
-
raise MaxRetriesReachedError(retries)
|
|
410
|
+
return JobStatus.COMPLETED
|
|
341
411
|
else:
|
|
342
|
-
|
|
343
|
-
return statuses if len(statuses) > 1 else statuses[0]
|
|
412
|
+
raise MaxRetriesReachedError(retry_count)
|
divi/qprog/__init__.py
CHANGED
|
@@ -7,7 +7,7 @@ from .quantum_program import QuantumProgram
|
|
|
7
7
|
from .batch import ProgramBatch
|
|
8
8
|
from ._qaoa import QAOA, GraphProblem
|
|
9
9
|
from ._vqe import VQE, VQEAnsatz
|
|
10
|
-
from ._mlae import MLAE
|
|
11
10
|
from ._graph_partitioning import GraphPartitioningQAOA, PartitioningConfig
|
|
12
|
-
from .
|
|
11
|
+
from ._qubo_partitioning import QUBOPartitioningQAOA
|
|
12
|
+
from ._vqe_sweep import VQEHyperparameterSweep, MoleculeTransformer
|
|
13
13
|
from .optimizers import Optimizer
|