qoro-divi 0.2.0b1__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. divi/__init__.py +1 -2
  2. divi/backends/__init__.py +10 -0
  3. divi/backends/_backend_properties_conversion.py +227 -0
  4. divi/backends/_circuit_runner.py +70 -0
  5. divi/backends/_execution_result.py +70 -0
  6. divi/backends/_parallel_simulator.py +486 -0
  7. divi/backends/_qoro_service.py +663 -0
  8. divi/backends/_qpu_system.py +101 -0
  9. divi/backends/_results_processing.py +133 -0
  10. divi/circuits/__init__.py +13 -0
  11. divi/{exp/cirq → circuits/_cirq}/__init__.py +1 -2
  12. divi/circuits/_cirq/_parser.py +110 -0
  13. divi/circuits/_cirq/_qasm_export.py +78 -0
  14. divi/circuits/_core.py +391 -0
  15. divi/{qasm.py → circuits/_qasm_conversion.py} +73 -14
  16. divi/circuits/_qasm_validation.py +694 -0
  17. divi/qprog/__init__.py +27 -8
  18. divi/qprog/_expectation.py +181 -0
  19. divi/qprog/_hamiltonians.py +281 -0
  20. divi/qprog/algorithms/__init__.py +16 -0
  21. divi/qprog/algorithms/_ansatze.py +368 -0
  22. divi/qprog/algorithms/_custom_vqa.py +263 -0
  23. divi/qprog/algorithms/_pce.py +262 -0
  24. divi/qprog/algorithms/_qaoa.py +579 -0
  25. divi/qprog/algorithms/_vqe.py +262 -0
  26. divi/qprog/batch.py +387 -74
  27. divi/qprog/checkpointing.py +556 -0
  28. divi/qprog/exceptions.py +9 -0
  29. divi/qprog/optimizers.py +1014 -43
  30. divi/qprog/quantum_program.py +243 -412
  31. divi/qprog/typing.py +62 -0
  32. divi/qprog/variational_quantum_algorithm.py +1208 -0
  33. divi/qprog/workflows/__init__.py +10 -0
  34. divi/qprog/{_graph_partitioning.py → workflows/_graph_partitioning.py} +139 -95
  35. divi/qprog/workflows/_qubo_partitioning.py +221 -0
  36. divi/qprog/workflows/_vqe_sweep.py +560 -0
  37. divi/reporting/__init__.py +7 -0
  38. divi/reporting/_pbar.py +127 -0
  39. divi/reporting/_qlogger.py +68 -0
  40. divi/reporting/_reporter.py +155 -0
  41. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info}/METADATA +43 -15
  42. qoro_divi-0.6.0.dist-info/RECORD +47 -0
  43. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info}/WHEEL +1 -1
  44. qoro_divi-0.6.0.dist-info/licenses/LICENSES/.license-header +3 -0
  45. divi/_pbar.py +0 -73
  46. divi/circuits.py +0 -139
  47. divi/exp/cirq/_lexer.py +0 -126
  48. divi/exp/cirq/_parser.py +0 -889
  49. divi/exp/cirq/_qasm_export.py +0 -37
  50. divi/exp/cirq/_qasm_import.py +0 -35
  51. divi/exp/cirq/exception.py +0 -21
  52. divi/exp/scipy/_cobyla.py +0 -342
  53. divi/exp/scipy/pyprima/LICENCE.txt +0 -28
  54. divi/exp/scipy/pyprima/__init__.py +0 -263
  55. divi/exp/scipy/pyprima/cobyla/__init__.py +0 -0
  56. divi/exp/scipy/pyprima/cobyla/cobyla.py +0 -599
  57. divi/exp/scipy/pyprima/cobyla/cobylb.py +0 -849
  58. divi/exp/scipy/pyprima/cobyla/geometry.py +0 -240
  59. divi/exp/scipy/pyprima/cobyla/initialize.py +0 -269
  60. divi/exp/scipy/pyprima/cobyla/trustregion.py +0 -540
  61. divi/exp/scipy/pyprima/cobyla/update.py +0 -331
  62. divi/exp/scipy/pyprima/common/__init__.py +0 -0
  63. divi/exp/scipy/pyprima/common/_bounds.py +0 -41
  64. divi/exp/scipy/pyprima/common/_linear_constraints.py +0 -46
  65. divi/exp/scipy/pyprima/common/_nonlinear_constraints.py +0 -64
  66. divi/exp/scipy/pyprima/common/_project.py +0 -224
  67. divi/exp/scipy/pyprima/common/checkbreak.py +0 -107
  68. divi/exp/scipy/pyprima/common/consts.py +0 -48
  69. divi/exp/scipy/pyprima/common/evaluate.py +0 -101
  70. divi/exp/scipy/pyprima/common/history.py +0 -39
  71. divi/exp/scipy/pyprima/common/infos.py +0 -30
  72. divi/exp/scipy/pyprima/common/linalg.py +0 -452
  73. divi/exp/scipy/pyprima/common/message.py +0 -336
  74. divi/exp/scipy/pyprima/common/powalg.py +0 -131
  75. divi/exp/scipy/pyprima/common/preproc.py +0 -393
  76. divi/exp/scipy/pyprima/common/present.py +0 -5
  77. divi/exp/scipy/pyprima/common/ratio.py +0 -56
  78. divi/exp/scipy/pyprima/common/redrho.py +0 -49
  79. divi/exp/scipy/pyprima/common/selectx.py +0 -346
  80. divi/interfaces.py +0 -25
  81. divi/parallel_simulator.py +0 -258
  82. divi/qlogger.py +0 -119
  83. divi/qoro_service.py +0 -343
  84. divi/qprog/_mlae.py +0 -182
  85. divi/qprog/_qaoa.py +0 -440
  86. divi/qprog/_vqe.py +0 -275
  87. divi/qprog/_vqe_sweep.py +0 -144
  88. divi/utils.py +0 -116
  89. qoro_divi-0.2.0b1.dist-info/RECORD +0 -58
  90. /divi/{qem.py → circuits/qem.py} +0 -0
  91. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info/licenses}/LICENSE +0 -0
  92. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info/licenses}/LICENSES/Apache-2.0.txt +0 -0
@@ -1,493 +1,324 @@
1
- # SPDX-FileCopyrightText: 2025 Qoro Quantum Ltd <divi@qoroquantum.de>
1
+ # SPDX-FileCopyrightText: 2025-2026 Qoro Quantum Ltd <divi@qoroquantum.de>
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- import logging
6
- import pickle
7
5
  from abc import ABC, abstractmethod
8
- from functools import partial
6
+ from http import HTTPStatus
9
7
  from queue import Queue
10
- from typing import Optional
8
+ from threading import Event
9
+ from typing import Any
10
+ from warnings import warn
11
11
 
12
- import numpy as np
13
- from qiskit.result import marginal_counts, sampled_expectation_value
14
- from scipy.optimize import OptimizeResult, minimize
12
+ import requests
15
13
 
16
- from divi import QoroService
17
- from divi.circuits import Circuit, MetaCircuit
18
- from divi.exp.scipy._cobyla import _minimize_cobyla as cobyla_fn
19
- from divi.interfaces import CircuitRunner
20
- from divi.qem import _NoMitigation
21
- from divi.qoro_service import JobStatus
22
- from divi.qprog.optimizers import Optimizer
23
-
24
- logger = logging.getLogger(__name__)
14
+ from divi.backends import CircuitRunner, JobStatus
15
+ from divi.backends._execution_result import ExecutionResult
16
+ from divi.circuits import CircuitBundle
17
+ from divi.qprog.exceptions import _CancelledError
18
+ from divi.reporting import LoggingProgressReporter, QueueProgressReporter
25
19
 
26
20
 
27
21
  class QuantumProgram(ABC):
22
+ """Abstract base class for quantum programs.
23
+
24
+ This class defines the interface and provides common functionality for quantum algorithms.
25
+ It handles circuit execution, result processing, and data persistence.
26
+
27
+ Subclasses must implement:
28
+ - run(): Execute the quantum algorithm
29
+ - _generate_circuits(): Generate quantum circuits for execution
30
+ - _post_process_results(): Process execution results
31
+
32
+ Attributes:
33
+ backend (CircuitRunner): The quantum circuit execution backend.
34
+ _seed (int | None): Random seed for reproducible results.
35
+ _progress_queue (Queue | None): Queue for progress reporting.
36
+ _circuits (list): List of circuits to be executed.
37
+ """
38
+
28
39
  def __init__(
29
40
  self,
30
41
  backend: CircuitRunner,
31
- seed: Optional[int] = None,
32
- progress_queue: Optional[Queue] = None,
42
+ seed: int | None = None,
43
+ progress_queue: Queue | None = None,
33
44
  **kwargs,
34
45
  ):
35
- """
36
- Initializes the QuantumProgram class.
37
-
38
- If a child class represents a hybrid quantum-classical algorithm,
39
- the instance variables `n_layers` and `n_params` must be set, where:
40
- - `n_layers` is the number of layers in the quantum circuit.
41
- - `n_params` is the number of parameters per layer.
42
-
43
- For exotic algorithms where these variables may not be applicable,
44
- the `_initialize_params` method should be overridden to set the parameters.
46
+ """Initialize the QuantumProgram.
45
47
 
46
48
  Args:
47
- backend (CircuitRunner): An instance of a CircuitRunner object, which
48
- can either be ParallelSimulator or QoroService.
49
- seed (int): A seed for numpy's random number generator, which will
50
- be used for the parameter initialization.
51
- Defaults to None.
52
- progress_queue (Queue): a queue for progress bar updates.
53
-
54
- **kwargs: Additional keyword arguments that influence behaviour.
55
- - grouping_strategy (Optional[Any]): A strategy for grouping operations, used in Pennylane's transforms.
56
- Defaults to None.
57
- - qem_protocol (Optional[QEMProtocol]): the quantum error mitigation protocol to apply.
58
- Must be of type QEMProtocol. Defaults to None.
59
-
60
- The following key values are reserved for internal use and should not be set by the user:
61
- - losses (Optional[list]): A list to initialize the `losses` attribute. Defaults to an empty list.
62
- - final_params (Optional[list]): A list to initialize the `final_params` attribute. Defaults to an empty list.
63
-
49
+ backend (CircuitRunner): Quantum circuit execution backend.
50
+ seed (int | None): Random seed for reproducible results. Defaults to None.
51
+ progress_queue (Queue | None): Queue for progress reporting. Defaults to None.
52
+ **kwargs: Additional keyword arguments for subclasses.
53
+ program_id (str | None): Program identifier for progress reporting in batch
54
+ operations. If provided along with progress_queue, enables queue-based
55
+ progress reporting.
64
56
  """
65
-
66
- # Shared Variables
67
- self.losses = kwargs.pop("losses", [])
68
- self.final_params = kwargs.pop("final_params", [])
69
-
70
- self.circuits: list[Circuit] = []
71
-
57
+ self.backend = backend
58
+ self._seed = seed
59
+ self._progress_queue = progress_queue
72
60
  self._total_circuit_count = 0
73
61
  self._total_run_time = 0.0
74
- self._curr_params = []
75
-
76
- self._seed = seed
77
- self._rng = np.random.default_rng(self._seed)
62
+ self._curr_circuits = []
63
+ self._current_execution_result = None
78
64
 
79
- # Lets child classes adapt their optimization
80
- # step for grad calculation routine
81
- self._grad_mode = False
82
-
83
- self.backend = backend
84
- self.job_id = kwargs.get("job_id", None)
85
-
86
- self._progress_queue = progress_queue
65
+ # --- Progress Reporting ---
66
+ self.program_id = kwargs.get("program_id", None)
67
+ if progress_queue and self.program_id is not None:
68
+ self.reporter = QueueProgressReporter(self.program_id, progress_queue)
69
+ else:
70
+ self.reporter = LoggingProgressReporter()
87
71
 
88
- # Needed for Pennylane's transforms
89
- self._grouping_strategy = kwargs.pop("grouping_strategy", None)
72
+ @abstractmethod
73
+ def run(self, **kwargs) -> tuple[int, float]:
74
+ """Execute the quantum algorithm.
90
75
 
91
- self._qem_protocol = kwargs.pop("qem_protocol", None) or _NoMitigation()
76
+ Args:
77
+ **kwargs: Additional keyword arguments for subclasses.
92
78
 
93
- self._meta_circuit_factory = partial(
94
- MetaCircuit,
95
- grouping_strategy=self._grouping_strategy,
96
- qem_protocol=self._qem_protocol,
97
- )
79
+ Returns:
80
+ tuple[int, float]: A tuple containing:
81
+ - int: Total number of circuits executed
82
+ - float: Total runtime in seconds
83
+ """
84
+ pass
98
85
 
99
- @property
100
- def total_circuit_count(self):
101
- return self._total_circuit_count
86
+ @abstractmethod
87
+ def _generate_circuits(self, **kwargs) -> list[CircuitBundle]:
88
+ """Generate quantum circuits for execution.
102
89
 
103
- @property
104
- def total_run_time(self):
105
- return self._total_run_time
90
+ This method should generate and return a list of CircuitBundle objects based on
91
+ the current algorithm state. The circuits will be executed by the backend.
106
92
 
107
- @property
108
- def meta_circuits(self):
109
- return self._meta_circuits
93
+ Args:
94
+ **kwargs: Additional keyword arguments for circuit generation.
110
95
 
111
- @abstractmethod
112
- def _create_meta_circuits_dict(self) -> dict[str, MetaCircuit]:
96
+ Returns:
97
+ list[CircuitBundle]: List of CircuitBundle objects to be executed.
98
+ """
113
99
  pass
114
100
 
115
101
  @abstractmethod
116
- def _generate_circuits(self, **kwargs):
117
- pass
118
-
119
- def _initialize_params(self):
120
- self._curr_params = np.array(
121
- [
122
- self._rng.uniform(0, 2 * np.pi, self.n_layers * self.n_params)
123
- for _ in range(self.optimizer.n_param_sets)
124
- ]
125
- )
102
+ def _post_process_results(self, results: dict, **kwargs) -> Any:
103
+ """Process execution results.
126
104
 
127
- def _run_optimization_circuits(self, store_data, data_file):
128
- self.circuits[:] = []
105
+ Args:
106
+ results (dict): Raw results from circuit execution.
129
107
 
130
- self._generate_circuits()
108
+ Returns:
109
+ Any: Processed results specific to the algorithm.
110
+ """
111
+ pass
131
112
 
132
- losses = self._dispatch_circuits_and_process_results(
133
- store_data=store_data, data_file=data_file
134
- )
113
+ def _set_cancellation_event(self, event: Event):
114
+ """Set a cancellation event for graceful program termination.
135
115
 
136
- return losses
116
+ This method is called by batch runners to provide a mechanism
117
+ for stopping the optimization loop cleanly when requested.
137
118
 
138
- def _update_mc_params(self):
139
- """
140
- Updates the parameters based on previous MC iteration.
119
+ Args:
120
+ event (Event): Threading Event object that signals cancellation when set.
141
121
  """
122
+ self._cancellation_event = event
142
123
 
143
- if self.current_iteration == 0:
144
- self._initialize_params()
124
+ @property
125
+ def total_circuit_count(self) -> int:
126
+ """Get the total number of circuits executed.
145
127
 
146
- self.current_iteration += 1
128
+ Returns:
129
+ int: Cumulative count of circuits submitted for execution.
130
+ """
131
+ return self._total_circuit_count
147
132
 
148
- return
133
+ @property
134
+ def total_run_time(self) -> float:
135
+ """Get the total runtime across all circuit executions.
149
136
 
150
- self._curr_params = self.optimizer.compute_new_parameters(
151
- self._curr_params,
152
- self.current_iteration,
153
- losses=self.losses[-1],
154
- rng=self._rng,
155
- )
137
+ Returns:
138
+ float: Cumulative execution time in seconds.
139
+ """
140
+ return self._total_run_time
156
141
 
157
- self.current_iteration += 1
142
+ def _prepare_and_send_circuits(self, **kwargs) -> ExecutionResult:
143
+ """Prepare circuits for execution and submit them to the backend.
158
144
 
159
- def _prepare_and_send_circuits(self):
145
+ Returns:
146
+ ExecutionResult: Result from circuit submission. For async backends,
147
+ contains job_id. For sync backends, contains results directly.
148
+ """
160
149
  job_circuits = {}
150
+ self._reset_tag_cache()
161
151
 
162
- for circuit in self.circuits:
163
- for tag, qasm_circuit in zip(circuit.tags, circuit.qasm_circuits):
164
- job_circuits[tag] = qasm_circuit
152
+ for bundle in self._curr_circuits:
153
+ for executable in bundle.executables:
154
+ job_circuits[self._encode_tag(executable.tag)] = executable.qasm
165
155
 
166
156
  self._total_circuit_count += len(job_circuits)
167
157
 
168
- backend_output = self.backend.submit_circuits(job_circuits)
169
-
170
- if isinstance(self.backend, QoroService):
171
- self._curr_service_job_id = backend_output
158
+ execution_result = self.backend.submit_circuits(job_circuits, **kwargs)
172
159
 
173
- return backend_output
160
+ return execution_result
174
161
 
175
- def _dispatch_circuits_and_process_results(self, store_data=False, data_file=None):
176
- """
177
- Run an iteration of the program. The outputs are stored in the Program object.
178
- Optionally, the data can be stored in a file.
162
+ def _track_runtime(self, response):
163
+ """Extract and track runtime from a backend response.
179
164
 
180
165
  Args:
181
- store_data (bool): Whether to store the data for the iteration
182
- data_file (str): The file to store the data in
166
+ response: Backend response containing runtime information.
167
+ Can be a dict or a list of responses.
183
168
  """
169
+ if isinstance(response, dict):
170
+ self._total_run_time += float(response["run_time"])
171
+ elif isinstance(response, list):
172
+ self._total_run_time += sum(float(r.json()["run_time"]) for r in response)
184
173
 
185
- results = self._prepare_and_send_circuits()
186
-
187
- def add_run_time(response):
188
- if isinstance(response, dict):
189
- self._total_run_time += float(response["run_time"])
190
- elif isinstance(response, list):
191
- self._total_run_time += sum(float(r["run_time"]) for r in response)
192
-
193
- if isinstance(self.backend, QoroService):
194
- status = self.backend.poll_job_status(
195
- self._curr_service_job_id,
196
- loop_until_complete=True,
197
- on_complete=add_run_time,
198
- **(
199
- {
200
- "pbar_update_fn": lambda n_polls: self._progress_queue.put(
201
- {
202
- "job_id": self.job_id,
203
- "progress": 0,
204
- "poll_attempt": n_polls,
205
- }
206
- )
207
- }
208
- if self._progress_queue is not None
209
- else {}
210
- ),
211
- )
212
-
213
- if status != JobStatus.COMPLETED:
214
- raise Exception(
215
- "Job has not completed yet, cannot post-process results"
216
- )
217
-
218
- results = self.backend.get_job_results(self._curr_service_job_id)
219
-
220
- results = {r["label"]: r["results"] for r in results}
221
-
222
- result = self._post_process_results(results)
223
-
224
- if store_data:
225
- self.save_iteration(data_file)
226
-
227
- return result
228
-
229
- def _post_process_results(
230
- self, results: dict[str, dict[str, int]]
231
- ) -> dict[int, float]:
232
- """
233
- Post-process the results of the quantum problem.
174
+ def _wait_for_qoro_job_completion(
175
+ self, execution_result: ExecutionResult
176
+ ) -> list[dict]:
177
+ """Wait for a QoroService job to complete and return results.
234
178
 
235
179
  Args:
236
- results (dict): The shot histograms of the quantum execution step.
237
- The keys should be strings of format {param_id}_*_{measurement_group_id}.
238
- i.e. An underscore-separated bunch of metadata, starting always with
239
- the index of some parameter and ending with the index of some measurement group.
240
- Any extra piece of metadata that might be relevant to the specific application can
241
- be kept in the middle.
180
+ execution_result: The ExecutionResult from circuit submission.
242
181
 
243
182
  Returns:
244
- (dict) The energies for each parameter set grouping, where the dict keys
245
- correspond to the parameter indices.
246
- """
247
-
248
- losses = {}
249
- measurement_groups = self._meta_circuits["cost_circuit"].measurement_groups
250
-
251
- for p in range(self._curr_params.shape[0]):
252
- # Extract relevant entries from the execution results dict
253
- param_results = {k: v for k, v in results.items() if k.startswith(f"{p}_")}
254
-
255
- # Compute the marginal results for each observable
256
- marginal_results = []
257
- for group_idx, curr_measurement_group in enumerate(measurement_groups):
258
- group_results = {
259
- k: v
260
- for k, v in param_results.items()
261
- if k.endswith(f"_{group_idx}")
262
- }
263
-
264
- curr_marginal_results = []
265
- for observable in curr_measurement_group:
266
- intermediate_exp_values = [
267
- sampled_expectation_value(
268
- marginal_counts(shots_dict, observable.wires.tolist()),
269
- "Z" * len(observable.wires),
270
- )
271
- for shots_dict in group_results.values()
272
- ]
273
-
274
- mitigated_exp_value = self._qem_protocol.postprocess_results(
275
- intermediate_exp_values
276
- )
277
-
278
- curr_marginal_results.append(mitigated_exp_value)
279
-
280
- marginal_results.append(
281
- curr_marginal_results
282
- if len(curr_marginal_results) > 1
283
- else curr_marginal_results[0]
284
- )
285
-
286
- pl_loss = (
287
- self._meta_circuits["cost_circuit"]
288
- .postprocessing_fn(marginal_results)[0]
289
- .item()
290
- )
291
-
292
- losses[p] = pl_loss + self.loss_constant
293
-
294
- return losses
183
+ list[dict]: The job results from the backend.
295
184
 
296
- def run(self, store_data=False, data_file=None):
185
+ Raises:
186
+ Exception: If job fails or doesn't complete.
297
187
  """
298
- Run the QAOA problem. The outputs are stored in the QAOA object. Optionally, the data can be stored in a file.
299
-
300
- Args:
301
- store_data (bool): Whether to store the data for the iteration
302
- data_file (str): The file to store the data in
303
- """
304
-
305
- if self._progress_queue is not None:
306
- self._progress_queue.put(
307
- {
308
- "job_id": self.job_id,
309
- "message": "Finished Setup",
310
- "progress": 0,
311
- }
188
+ job_id = execution_result.job_id
189
+ if job_id is None:
190
+ raise ValueError("ExecutionResult must have a job_id for async completion")
191
+
192
+ # Build the poll callback if reporter is available
193
+ if hasattr(self, "reporter"):
194
+ update_function = lambda n_polls, status: self.reporter.info(
195
+ message="",
196
+ poll_attempt=n_polls,
197
+ max_retries=self.backend.max_retries,
198
+ service_job_id=job_id,
199
+ job_status=status,
312
200
  )
313
201
  else:
314
- logger.info("Finished Setup")
315
-
316
- if self.optimizer == Optimizer.MONTE_CARLO:
317
- while self.current_iteration < self.max_iterations:
318
-
319
- self._update_mc_params()
320
-
321
- if self._progress_queue is not None:
322
- self._progress_queue.put(
323
- {
324
- "job_id": self.job_id,
325
- "message": f"⛰️ Sampling from Loss Lansdscape ⛰️",
326
- "progress": 0,
327
- }
328
- )
329
- else:
330
- logger.info(
331
- f"Running Iteration #{self.current_iteration} circuits\r"
332
- )
333
-
334
- curr_losses = self._run_optimization_circuits(store_data, data_file)
335
-
336
- if self._progress_queue is not None:
337
- self._progress_queue.put(
338
- {
339
- "job_id": self.job_id,
340
- "progress": 1,
341
- }
342
- )
343
- else:
344
- logger.info(f"Finished Iteration #{self.current_iteration}\r\n")
345
-
346
- self.losses.append(curr_losses)
347
-
348
- self.final_params[:] = np.atleast_2d(self._curr_params)
349
-
350
- elif self.optimizer in (
351
- Optimizer.NELDER_MEAD,
352
- Optimizer.L_BFGS_B,
353
- Optimizer.COBYLA,
354
- ):
355
-
356
- def cost_fn(params):
357
- task_name = "💸 Computing Cost 💸"
358
-
359
- if self._progress_queue is not None:
360
- self._progress_queue.put(
361
- {
362
- "job_id": self.job_id,
363
- "message": task_name,
364
- "progress": 0,
365
- }
366
- )
367
- else:
368
- logger.info(
369
- f"Running Iteration #{self.current_iteration + 1} circuits: {task_name}\r"
370
- )
371
-
372
- self._curr_params = np.atleast_2d(params)
373
-
374
- losses = self._run_optimization_circuits(store_data, data_file)
202
+ update_function = None
203
+
204
+ # Poll until complete
205
+ status = self.backend.poll_job_status(
206
+ execution_result,
207
+ loop_until_complete=True,
208
+ on_complete=self._track_runtime,
209
+ verbose=False, # Disable the default logger in QoroService
210
+ progress_callback=update_function,
211
+ )
375
212
 
376
- return losses[0]
213
+ if status == JobStatus.FAILED:
214
+ raise RuntimeError(f"Job {job_id} has failed")
215
+
216
+ if status == JobStatus.CANCELLED:
217
+ # If cancellation was requested (e.g., by ProgramBatch), raise _CancelledError
218
+ # so it's handled gracefully. Otherwise, raise RuntimeError for unexpected cancellation.
219
+ if (
220
+ hasattr(self, "_cancellation_event")
221
+ and self._cancellation_event
222
+ and self._cancellation_event.is_set()
223
+ ):
224
+ raise _CancelledError(f"Job {job_id} was cancelled")
225
+ raise RuntimeError(f"Job {job_id} was cancelled")
226
+
227
+ if status != JobStatus.COMPLETED:
228
+ raise Exception("Job has not completed yet, cannot post-process results")
229
+ completed_result = self.backend.get_job_results(execution_result)
230
+ return completed_result.results
231
+
232
+ def cancel_unfinished_job(self):
233
+ """Cancel the currently running cloud job if one exists.
234
+
235
+ This method attempts to cancel the job associated with the current
236
+ ExecutionResult. It is best-effort and will log warnings for any errors
237
+ (e.g., job already completed, permission denied) without raising exceptions.
238
+
239
+ This is typically called by ProgramBatch when handling cancellation
240
+ to ensure cloud jobs are cancelled before local threads terminate.
241
+ """
377
242
 
378
- def grad_fn(params):
379
- self._grad_mode = True
243
+ if self._current_execution_result is None:
244
+ warn("Cannot cancel job: no current execution result", stacklevel=2)
245
+ return
380
246
 
381
- task_name = "📈 Computing Gradients 📈"
247
+ if self._current_execution_result.job_id is None:
248
+ warn("Cannot cancel job: execution result has no job_id", stacklevel=2)
249
+ return
382
250
 
383
- if self._progress_queue is not None:
384
- self._progress_queue.put(
385
- {
386
- "job_id": self.job_id,
387
- "message": task_name,
388
- "progress": 0,
389
- }
251
+ try:
252
+ self.backend.cancel_job(self._current_execution_result)
253
+ except requests.exceptions.HTTPError as e:
254
+ # Check if this is an expected error (job already completed/failed/cancelled)
255
+ if (
256
+ hasattr(e, "response")
257
+ and e.response is not None
258
+ and e.response.status_code == HTTPStatus.CONFLICT
259
+ ):
260
+ # 409 Conflict means job is already in a terminal state - this is expected
261
+ # in race conditions where job completes before we can cancel it.
262
+ if hasattr(self, "reporter"):
263
+ self.reporter.info(
264
+ f"Job {self._current_execution_result.job_id} already completed or cancelled"
390
265
  )
391
- else:
392
- logger.info(
393
- f"Running Iteration #{self.current_iteration + 1} circuits: {task_name}\r"
266
+ else:
267
+ # Unexpected error (403 Forbidden, 404 Not Found, etc.) - report it
268
+ if hasattr(self, "reporter"):
269
+ self.reporter.info(
270
+ f"Failed to cancel job {self._current_execution_result.job_id}: {e}"
394
271
  )
272
+ except Exception as e:
273
+ # Other unexpected errors - report them
274
+ if hasattr(self, "reporter"):
275
+ self.reporter.info(
276
+ f"Failed to cancel job {self._current_execution_result.job_id}: {e}"
277
+ )
395
278
 
396
- shift_mask = self.optimizer.compute_parameter_shift_mask(len(params))
397
-
398
- self._curr_params = shift_mask + params
399
-
400
- exp_vals = self._run_optimization_circuits(store_data, data_file)
401
-
402
- grads = np.zeros_like(params)
403
- for i in range(len(params)):
404
- grads[i] = 0.5 * (exp_vals[2 * i] - exp_vals[2 * i + 1])
405
-
406
- self._grad_mode = False
279
+ def _dispatch_circuits_and_process_results(self, **kwargs):
280
+ """Run an iteration of the program.
407
281
 
408
- return grads
282
+ The outputs are stored in the Program object.
409
283
 
410
- def _iteration_counter(intermediate_result: OptimizeResult):
411
- self.losses.append({0: intermediate_result.fun})
284
+ Args:
285
+ **kwargs: Additional keyword arguments for circuit submission and result processing.
412
286
 
413
- self.final_params[:] = np.atleast_2d(intermediate_result.x)
287
+ Returns:
288
+ Any: Processed results from _post_process_results.
289
+ """
290
+ execution_result = self._prepare_and_send_circuits(**kwargs)
414
291
 
415
- self.current_iteration += 1
292
+ # Store the execution result for potential cancellation
293
+ self._current_execution_result = execution_result
416
294
 
417
- if self._progress_queue is not None:
418
- self._progress_queue.put(
419
- {
420
- "job_id": self.job_id,
421
- "progress": 1,
422
- }
423
- )
424
- else:
425
- logger.info(f"Finished Iteration #{self.current_iteration}\r\n")
426
-
427
- if (
428
- self.optimizer == Optimizer.COBYLA
429
- and intermediate_result.nit + 1 == self.max_iterations
430
- ):
431
- raise StopIteration
432
-
433
- if self.max_iterations is None or self.optimizer == Optimizer.COBYLA:
434
- # COBYLA perceive maxiter as maxfev so we need
435
- # to use the callback fn for counting instead.
436
- maxiter = None
295
+ try:
296
+ # For async backends, poll for results
297
+ if execution_result.job_id is not None:
298
+ results = self._wait_for_qoro_job_completion(execution_result)
437
299
  else:
438
- # Need to add one more iteration for Nelder-Mead's simplex initialization step
439
- maxiter = (
440
- self.max_iterations + 1
441
- if self.optimizer == Optimizer.NELDER_MEAD
442
- else self.max_iterations
443
- )
444
-
445
- self._initialize_params()
446
- self._minimize_res = minimize(
447
- fun=cost_fn,
448
- x0=self._curr_params[0],
449
- method=(
450
- cobyla_fn
451
- if self.optimizer == Optimizer.COBYLA
452
- else self.optimizer.value
453
- ),
454
- jac=grad_fn if self.optimizer == Optimizer.L_BFGS_B else None,
455
- callback=_iteration_counter,
456
- options={"maxiter": maxiter},
457
- )
458
-
459
- if self._progress_queue:
460
- self._progress_queue.put(
461
- {
462
- "job_id": self.job_id,
463
- "progress": 0,
464
- "final_status": "Success",
465
- }
466
- )
467
- else:
468
- logger.info(f"Finished Optimization!")
469
-
470
- return self._total_circuit_count, self._total_run_time
300
+ # For sync backends, results are already available
301
+ results = execution_result.results
302
+ if results is None:
303
+ raise ValueError("ExecutionResult has neither results nor job_id")
471
304
 
472
- def save_iteration(self, data_file):
473
- """
474
- Save the current iteration of the program to a file.
305
+ results = {r["label"]: r["results"] for r in results}
306
+ results = self._decode_tags(results)
475
307
 
476
- Args:
477
- data_file (str): The file to save the iteration to.
478
- """
308
+ result = self._post_process_results(results, **kwargs)
479
309
 
480
- with open(data_file, "wb") as f:
481
- pickle.dump(self, f)
310
+ return result
311
+ finally:
312
+ # Clear the execution result after processing
313
+ self._current_execution_result = None
482
314
 
483
- @staticmethod
484
- def import_iteration(data_file):
485
- """
486
- Import an iteration of the program from a file.
315
+ def _reset_tag_cache(self) -> None:
316
+ """Hook to reset per-run tag caches. Default is no-op."""
487
317
 
488
- Args:
489
- data_file (str): The file to import the iteration from.
490
- """
318
+ def _encode_tag(self, tag: Any) -> str:
319
+ """Convert a tag to a backend-safe string."""
320
+ return str(tag)
491
321
 
492
- with open(data_file, "rb") as f:
493
- return pickle.load(f)
322
+ def _decode_tags(self, results: dict[str, dict[str, int]]) -> dict:
323
+ """Restore structured tags from backend result labels."""
324
+ return results