qoro-divi 0.2.0b1__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. divi/__init__.py +1 -2
  2. divi/backends/__init__.py +9 -0
  3. divi/backends/_circuit_runner.py +70 -0
  4. divi/backends/_execution_result.py +70 -0
  5. divi/backends/_parallel_simulator.py +486 -0
  6. divi/backends/_qoro_service.py +663 -0
  7. divi/backends/_qpu_system.py +101 -0
  8. divi/backends/_results_processing.py +133 -0
  9. divi/circuits/__init__.py +8 -0
  10. divi/{exp/cirq → circuits/_cirq}/__init__.py +1 -2
  11. divi/circuits/_cirq/_parser.py +110 -0
  12. divi/circuits/_cirq/_qasm_export.py +78 -0
  13. divi/circuits/_core.py +369 -0
  14. divi/{qasm.py → circuits/_qasm_conversion.py} +73 -14
  15. divi/circuits/_qasm_validation.py +694 -0
  16. divi/qprog/__init__.py +24 -6
  17. divi/qprog/_expectation.py +181 -0
  18. divi/qprog/_hamiltonians.py +281 -0
  19. divi/qprog/algorithms/__init__.py +14 -0
  20. divi/qprog/algorithms/_ansatze.py +356 -0
  21. divi/qprog/algorithms/_qaoa.py +572 -0
  22. divi/qprog/algorithms/_vqe.py +249 -0
  23. divi/qprog/batch.py +383 -73
  24. divi/qprog/checkpointing.py +556 -0
  25. divi/qprog/exceptions.py +9 -0
  26. divi/qprog/optimizers.py +1014 -43
  27. divi/qprog/quantum_program.py +231 -413
  28. divi/qprog/variational_quantum_algorithm.py +995 -0
  29. divi/qprog/workflows/__init__.py +10 -0
  30. divi/qprog/{_graph_partitioning.py → workflows/_graph_partitioning.py} +139 -95
  31. divi/qprog/workflows/_qubo_partitioning.py +220 -0
  32. divi/qprog/workflows/_vqe_sweep.py +560 -0
  33. divi/reporting/__init__.py +7 -0
  34. divi/reporting/_pbar.py +127 -0
  35. divi/reporting/_qlogger.py +68 -0
  36. divi/reporting/_reporter.py +133 -0
  37. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.5.0.dist-info}/METADATA +43 -15
  38. qoro_divi-0.5.0.dist-info/RECORD +43 -0
  39. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.5.0.dist-info}/WHEEL +1 -1
  40. qoro_divi-0.5.0.dist-info/licenses/LICENSES/.license-header +3 -0
  41. divi/_pbar.py +0 -73
  42. divi/circuits.py +0 -139
  43. divi/exp/cirq/_lexer.py +0 -126
  44. divi/exp/cirq/_parser.py +0 -889
  45. divi/exp/cirq/_qasm_export.py +0 -37
  46. divi/exp/cirq/_qasm_import.py +0 -35
  47. divi/exp/cirq/exception.py +0 -21
  48. divi/exp/scipy/_cobyla.py +0 -342
  49. divi/exp/scipy/pyprima/LICENCE.txt +0 -28
  50. divi/exp/scipy/pyprima/__init__.py +0 -263
  51. divi/exp/scipy/pyprima/cobyla/__init__.py +0 -0
  52. divi/exp/scipy/pyprima/cobyla/cobyla.py +0 -599
  53. divi/exp/scipy/pyprima/cobyla/cobylb.py +0 -849
  54. divi/exp/scipy/pyprima/cobyla/geometry.py +0 -240
  55. divi/exp/scipy/pyprima/cobyla/initialize.py +0 -269
  56. divi/exp/scipy/pyprima/cobyla/trustregion.py +0 -540
  57. divi/exp/scipy/pyprima/cobyla/update.py +0 -331
  58. divi/exp/scipy/pyprima/common/__init__.py +0 -0
  59. divi/exp/scipy/pyprima/common/_bounds.py +0 -41
  60. divi/exp/scipy/pyprima/common/_linear_constraints.py +0 -46
  61. divi/exp/scipy/pyprima/common/_nonlinear_constraints.py +0 -64
  62. divi/exp/scipy/pyprima/common/_project.py +0 -224
  63. divi/exp/scipy/pyprima/common/checkbreak.py +0 -107
  64. divi/exp/scipy/pyprima/common/consts.py +0 -48
  65. divi/exp/scipy/pyprima/common/evaluate.py +0 -101
  66. divi/exp/scipy/pyprima/common/history.py +0 -39
  67. divi/exp/scipy/pyprima/common/infos.py +0 -30
  68. divi/exp/scipy/pyprima/common/linalg.py +0 -452
  69. divi/exp/scipy/pyprima/common/message.py +0 -336
  70. divi/exp/scipy/pyprima/common/powalg.py +0 -131
  71. divi/exp/scipy/pyprima/common/preproc.py +0 -393
  72. divi/exp/scipy/pyprima/common/present.py +0 -5
  73. divi/exp/scipy/pyprima/common/ratio.py +0 -56
  74. divi/exp/scipy/pyprima/common/redrho.py +0 -49
  75. divi/exp/scipy/pyprima/common/selectx.py +0 -346
  76. divi/interfaces.py +0 -25
  77. divi/parallel_simulator.py +0 -258
  78. divi/qlogger.py +0 -119
  79. divi/qoro_service.py +0 -343
  80. divi/qprog/_mlae.py +0 -182
  81. divi/qprog/_qaoa.py +0 -440
  82. divi/qprog/_vqe.py +0 -275
  83. divi/qprog/_vqe_sweep.py +0 -144
  84. divi/utils.py +0 -116
  85. qoro_divi-0.2.0b1.dist-info/RECORD +0 -58
  86. /divi/{qem.py → circuits/qem.py} +0 -0
  87. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.5.0.dist-info/licenses}/LICENSE +0 -0
  88. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.5.0.dist-info/licenses}/LICENSES/Apache-2.0.txt +0 -0
@@ -2,492 +2,310 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- import logging
6
- import pickle
7
5
  from abc import ABC, abstractmethod
8
- from functools import partial
6
+ from http import HTTPStatus
9
7
  from queue import Queue
10
- from typing import Optional
8
+ from threading import Event
9
+ from typing import Any
10
+ from warnings import warn
11
11
 
12
- import numpy as np
13
- from qiskit.result import marginal_counts, sampled_expectation_value
14
- from scipy.optimize import OptimizeResult, minimize
12
+ import requests
15
13
 
16
- from divi import QoroService
17
- from divi.circuits import Circuit, MetaCircuit
18
- from divi.exp.scipy._cobyla import _minimize_cobyla as cobyla_fn
19
- from divi.interfaces import CircuitRunner
20
- from divi.qem import _NoMitigation
21
- from divi.qoro_service import JobStatus
22
- from divi.qprog.optimizers import Optimizer
23
-
24
- logger = logging.getLogger(__name__)
14
+ from divi.backends import CircuitRunner, JobStatus
15
+ from divi.backends._execution_result import ExecutionResult
16
+ from divi.circuits import CircuitBundle
17
+ from divi.qprog.exceptions import _CancelledError
18
+ from divi.reporting import LoggingProgressReporter, QueueProgressReporter
25
19
 
26
20
 
27
21
  class QuantumProgram(ABC):
22
+ """Abstract base class for quantum programs.
23
+
24
+ This class defines the interface and provides common functionality for quantum algorithms.
25
+ It handles circuit execution, result processing, and data persistence.
26
+
27
+ Subclasses must implement:
28
+ - run(): Execute the quantum algorithm
29
+ - _generate_circuits(): Generate quantum circuits for execution
30
+ - _post_process_results(): Process execution results
31
+
32
+ Attributes:
33
+ backend (CircuitRunner): The quantum circuit execution backend.
34
+ _seed (int | None): Random seed for reproducible results.
35
+ _progress_queue (Queue | None): Queue for progress reporting.
36
+ _circuits (list): List of circuits to be executed.
37
+ """
38
+
28
39
  def __init__(
29
40
  self,
30
41
  backend: CircuitRunner,
31
- seed: Optional[int] = None,
32
- progress_queue: Optional[Queue] = None,
42
+ seed: int | None = None,
43
+ progress_queue: Queue | None = None,
33
44
  **kwargs,
34
45
  ):
35
- """
36
- Initializes the QuantumProgram class.
37
-
38
- If a child class represents a hybrid quantum-classical algorithm,
39
- the instance variables `n_layers` and `n_params` must be set, where:
40
- - `n_layers` is the number of layers in the quantum circuit.
41
- - `n_params` is the number of parameters per layer.
42
-
43
- For exotic algorithms where these variables may not be applicable,
44
- the `_initialize_params` method should be overridden to set the parameters.
46
+ """Initialize the QuantumProgram.
45
47
 
46
48
  Args:
47
- backend (CircuitRunner): An instance of a CircuitRunner object, which
48
- can either be ParallelSimulator or QoroService.
49
- seed (int): A seed for numpy's random number generator, which will
50
- be used for the parameter initialization.
51
- Defaults to None.
52
- progress_queue (Queue): a queue for progress bar updates.
53
-
54
- **kwargs: Additional keyword arguments that influence behaviour.
55
- - grouping_strategy (Optional[Any]): A strategy for grouping operations, used in Pennylane's transforms.
56
- Defaults to None.
57
- - qem_protocol (Optional[QEMProtocol]): the quantum error mitigation protocol to apply.
58
- Must be of type QEMProtocol. Defaults to None.
59
-
60
- The following key values are reserved for internal use and should not be set by the user:
61
- - losses (Optional[list]): A list to initialize the `losses` attribute. Defaults to an empty list.
62
- - final_params (Optional[list]): A list to initialize the `final_params` attribute. Defaults to an empty list.
63
-
49
+ backend (CircuitRunner): Quantum circuit execution backend.
50
+ seed (int | None): Random seed for reproducible results. Defaults to None.
51
+ progress_queue (Queue | None): Queue for progress reporting. Defaults to None.
52
+ **kwargs: Additional keyword arguments for subclasses.
53
+ program_id (str | None): Program identifier for progress reporting in batch
54
+ operations. If provided along with progress_queue, enables queue-based
55
+ progress reporting.
64
56
  """
65
-
66
- # Shared Variables
67
- self.losses = kwargs.pop("losses", [])
68
- self.final_params = kwargs.pop("final_params", [])
69
-
70
- self.circuits: list[Circuit] = []
71
-
57
+ self.backend = backend
58
+ self._seed = seed
59
+ self._progress_queue = progress_queue
72
60
  self._total_circuit_count = 0
73
61
  self._total_run_time = 0.0
74
- self._curr_params = []
75
-
76
- self._seed = seed
77
- self._rng = np.random.default_rng(self._seed)
62
+ self._curr_circuits = []
63
+ self._current_execution_result = None
78
64
 
79
- # Lets child classes adapt their optimization
80
- # step for grad calculation routine
81
- self._grad_mode = False
82
-
83
- self.backend = backend
84
- self.job_id = kwargs.get("job_id", None)
85
-
86
- self._progress_queue = progress_queue
65
+ # --- Progress Reporting ---
66
+ self.program_id = kwargs.get("program_id", None)
67
+ if progress_queue and self.program_id is not None:
68
+ self.reporter = QueueProgressReporter(self.program_id, progress_queue)
69
+ else:
70
+ self.reporter = LoggingProgressReporter()
87
71
 
88
- # Needed for Pennylane's transforms
89
- self._grouping_strategy = kwargs.pop("grouping_strategy", None)
72
+ @abstractmethod
73
+ def run(self, **kwargs) -> tuple[int, float]:
74
+ """Execute the quantum algorithm.
90
75
 
91
- self._qem_protocol = kwargs.pop("qem_protocol", None) or _NoMitigation()
76
+ Args:
77
+ **kwargs: Additional keyword arguments for subclasses.
92
78
 
93
- self._meta_circuit_factory = partial(
94
- MetaCircuit,
95
- grouping_strategy=self._grouping_strategy,
96
- qem_protocol=self._qem_protocol,
97
- )
79
+ Returns:
80
+ tuple[int, float]: A tuple containing:
81
+ - int: Total number of circuits executed
82
+ - float: Total runtime in seconds
83
+ """
84
+ pass
98
85
 
99
- @property
100
- def total_circuit_count(self):
101
- return self._total_circuit_count
86
+ @abstractmethod
87
+ def _generate_circuits(self, **kwargs) -> list[CircuitBundle]:
88
+ """Generate quantum circuits for execution.
102
89
 
103
- @property
104
- def total_run_time(self):
105
- return self._total_run_time
90
+ This method should generate and return a list of CircuitBundle objects based on
91
+ the current algorithm state. The circuits will be executed by the backend.
106
92
 
107
- @property
108
- def meta_circuits(self):
109
- return self._meta_circuits
93
+ Args:
94
+ **kwargs: Additional keyword arguments for circuit generation.
110
95
 
111
- @abstractmethod
112
- def _create_meta_circuits_dict(self) -> dict[str, MetaCircuit]:
96
+ Returns:
97
+ list[CircuitBundle]: List of CircuitBundle objects to be executed.
98
+ """
113
99
  pass
114
100
 
115
101
  @abstractmethod
116
- def _generate_circuits(self, **kwargs):
117
- pass
118
-
119
- def _initialize_params(self):
120
- self._curr_params = np.array(
121
- [
122
- self._rng.uniform(0, 2 * np.pi, self.n_layers * self.n_params)
123
- for _ in range(self.optimizer.n_param_sets)
124
- ]
125
- )
102
+ def _post_process_results(self, results: dict, **kwargs) -> Any:
103
+ """Process execution results.
126
104
 
127
- def _run_optimization_circuits(self, store_data, data_file):
128
- self.circuits[:] = []
105
+ Args:
106
+ results (dict): Raw results from circuit execution.
129
107
 
130
- self._generate_circuits()
108
+ Returns:
109
+ Any: Processed results specific to the algorithm.
110
+ """
111
+ pass
131
112
 
132
- losses = self._dispatch_circuits_and_process_results(
133
- store_data=store_data, data_file=data_file
134
- )
113
+ def _set_cancellation_event(self, event: Event):
114
+ """Set a cancellation event for graceful program termination.
135
115
 
136
- return losses
116
+ This method is called by batch runners to provide a mechanism
117
+ for stopping the optimization loop cleanly when requested.
137
118
 
138
- def _update_mc_params(self):
139
- """
140
- Updates the parameters based on previous MC iteration.
119
+ Args:
120
+ event (Event): Threading Event object that signals cancellation when set.
141
121
  """
122
+ self._cancellation_event = event
142
123
 
143
- if self.current_iteration == 0:
144
- self._initialize_params()
124
+ @property
125
+ def total_circuit_count(self) -> int:
126
+ """Get the total number of circuits executed.
145
127
 
146
- self.current_iteration += 1
128
+ Returns:
129
+ int: Cumulative count of circuits submitted for execution.
130
+ """
131
+ return self._total_circuit_count
147
132
 
148
- return
133
+ @property
134
+ def total_run_time(self) -> float:
135
+ """Get the total runtime across all circuit executions.
149
136
 
150
- self._curr_params = self.optimizer.compute_new_parameters(
151
- self._curr_params,
152
- self.current_iteration,
153
- losses=self.losses[-1],
154
- rng=self._rng,
155
- )
137
+ Returns:
138
+ float: Cumulative execution time in seconds.
139
+ """
140
+ return self._total_run_time
156
141
 
157
- self.current_iteration += 1
142
+ def _prepare_and_send_circuits(self, **kwargs) -> ExecutionResult:
143
+ """Prepare circuits for execution and submit them to the backend.
158
144
 
159
- def _prepare_and_send_circuits(self):
145
+ Returns:
146
+ ExecutionResult: Result from circuit submission. For async backends,
147
+ contains job_id. For sync backends, contains results directly.
148
+ """
160
149
  job_circuits = {}
161
150
 
162
- for circuit in self.circuits:
163
- for tag, qasm_circuit in zip(circuit.tags, circuit.qasm_circuits):
164
- job_circuits[tag] = qasm_circuit
151
+ for bundle in self._curr_circuits:
152
+ for executable in bundle.executables:
153
+ job_circuits[executable.tag] = executable.qasm
165
154
 
166
155
  self._total_circuit_count += len(job_circuits)
167
156
 
168
- backend_output = self.backend.submit_circuits(job_circuits)
157
+ execution_result = self.backend.submit_circuits(job_circuits, **kwargs)
169
158
 
170
- if isinstance(self.backend, QoroService):
171
- self._curr_service_job_id = backend_output
159
+ return execution_result
172
160
 
173
- return backend_output
174
-
175
- def _dispatch_circuits_and_process_results(self, store_data=False, data_file=None):
176
- """
177
- Run an iteration of the program. The outputs are stored in the Program object.
178
- Optionally, the data can be stored in a file.
161
+ def _track_runtime(self, response):
162
+ """Extract and track runtime from a backend response.
179
163
 
180
164
  Args:
181
- store_data (bool): Whether to store the data for the iteration
182
- data_file (str): The file to store the data in
165
+ response: Backend response containing runtime information.
166
+ Can be a dict or a list of responses.
183
167
  """
168
+ if isinstance(response, dict):
169
+ self._total_run_time += float(response["run_time"])
170
+ elif isinstance(response, list):
171
+ self._total_run_time += sum(float(r.json()["run_time"]) for r in response)
184
172
 
185
- results = self._prepare_and_send_circuits()
186
-
187
- def add_run_time(response):
188
- if isinstance(response, dict):
189
- self._total_run_time += float(response["run_time"])
190
- elif isinstance(response, list):
191
- self._total_run_time += sum(float(r["run_time"]) for r in response)
192
-
193
- if isinstance(self.backend, QoroService):
194
- status = self.backend.poll_job_status(
195
- self._curr_service_job_id,
196
- loop_until_complete=True,
197
- on_complete=add_run_time,
198
- **(
199
- {
200
- "pbar_update_fn": lambda n_polls: self._progress_queue.put(
201
- {
202
- "job_id": self.job_id,
203
- "progress": 0,
204
- "poll_attempt": n_polls,
205
- }
206
- )
207
- }
208
- if self._progress_queue is not None
209
- else {}
210
- ),
211
- )
212
-
213
- if status != JobStatus.COMPLETED:
214
- raise Exception(
215
- "Job has not completed yet, cannot post-process results"
216
- )
217
-
218
- results = self.backend.get_job_results(self._curr_service_job_id)
219
-
220
- results = {r["label"]: r["results"] for r in results}
221
-
222
- result = self._post_process_results(results)
223
-
224
- if store_data:
225
- self.save_iteration(data_file)
226
-
227
- return result
228
-
229
- def _post_process_results(
230
- self, results: dict[str, dict[str, int]]
231
- ) -> dict[int, float]:
232
- """
233
- Post-process the results of the quantum problem.
173
+ def _wait_for_qoro_job_completion(
174
+ self, execution_result: ExecutionResult
175
+ ) -> list[dict]:
176
+ """Wait for a QoroService job to complete and return results.
234
177
 
235
178
  Args:
236
- results (dict): The shot histograms of the quantum execution step.
237
- The keys should be strings of format {param_id}_*_{measurement_group_id}.
238
- i.e. An underscore-separated bunch of metadata, starting always with
239
- the index of some parameter and ending with the index of some measurement group.
240
- Any extra piece of metadata that might be relevant to the specific application can
241
- be kept in the middle.
179
+ execution_result: The ExecutionResult from circuit submission.
242
180
 
243
181
  Returns:
244
- (dict) The energies for each parameter set grouping, where the dict keys
245
- correspond to the parameter indices.
246
- """
247
-
248
- losses = {}
249
- measurement_groups = self._meta_circuits["cost_circuit"].measurement_groups
250
-
251
- for p in range(self._curr_params.shape[0]):
252
- # Extract relevant entries from the execution results dict
253
- param_results = {k: v for k, v in results.items() if k.startswith(f"{p}_")}
254
-
255
- # Compute the marginal results for each observable
256
- marginal_results = []
257
- for group_idx, curr_measurement_group in enumerate(measurement_groups):
258
- group_results = {
259
- k: v
260
- for k, v in param_results.items()
261
- if k.endswith(f"_{group_idx}")
262
- }
263
-
264
- curr_marginal_results = []
265
- for observable in curr_measurement_group:
266
- intermediate_exp_values = [
267
- sampled_expectation_value(
268
- marginal_counts(shots_dict, observable.wires.tolist()),
269
- "Z" * len(observable.wires),
270
- )
271
- for shots_dict in group_results.values()
272
- ]
273
-
274
- mitigated_exp_value = self._qem_protocol.postprocess_results(
275
- intermediate_exp_values
276
- )
182
+ list[dict]: The job results from the backend.
277
183
 
278
- curr_marginal_results.append(mitigated_exp_value)
279
-
280
- marginal_results.append(
281
- curr_marginal_results
282
- if len(curr_marginal_results) > 1
283
- else curr_marginal_results[0]
284
- )
285
-
286
- pl_loss = (
287
- self._meta_circuits["cost_circuit"]
288
- .postprocessing_fn(marginal_results)[0]
289
- .item()
290
- )
291
-
292
- losses[p] = pl_loss + self.loss_constant
293
-
294
- return losses
295
-
296
- def run(self, store_data=False, data_file=None):
184
+ Raises:
185
+ Exception: If job fails or doesn't complete.
297
186
  """
298
- Run the QAOA problem. The outputs are stored in the QAOA object. Optionally, the data can be stored in a file.
299
-
300
- Args:
301
- store_data (bool): Whether to store the data for the iteration
302
- data_file (str): The file to store the data in
303
- """
304
-
305
- if self._progress_queue is not None:
306
- self._progress_queue.put(
307
- {
308
- "job_id": self.job_id,
309
- "message": "Finished Setup",
310
- "progress": 0,
311
- }
187
+ job_id = execution_result.job_id
188
+ if job_id is None:
189
+ raise ValueError("ExecutionResult must have a job_id for async completion")
190
+
191
+ # Build the poll callback if reporter is available
192
+ if hasattr(self, "reporter"):
193
+ update_function = lambda n_polls, status: self.reporter.info(
194
+ message="",
195
+ poll_attempt=n_polls,
196
+ max_retries=self.backend.max_retries,
197
+ service_job_id=job_id,
198
+ job_status=status,
312
199
  )
313
200
  else:
314
- logger.info("Finished Setup")
315
-
316
- if self.optimizer == Optimizer.MONTE_CARLO:
317
- while self.current_iteration < self.max_iterations:
318
-
319
- self._update_mc_params()
320
-
321
- if self._progress_queue is not None:
322
- self._progress_queue.put(
323
- {
324
- "job_id": self.job_id,
325
- "message": f"⛰️ Sampling from Loss Lansdscape ⛰️",
326
- "progress": 0,
327
- }
328
- )
329
- else:
330
- logger.info(
331
- f"Running Iteration #{self.current_iteration} circuits\r"
332
- )
333
-
334
- curr_losses = self._run_optimization_circuits(store_data, data_file)
335
-
336
- if self._progress_queue is not None:
337
- self._progress_queue.put(
338
- {
339
- "job_id": self.job_id,
340
- "progress": 1,
341
- }
342
- )
343
- else:
344
- logger.info(f"Finished Iteration #{self.current_iteration}\r\n")
345
-
346
- self.losses.append(curr_losses)
347
-
348
- self.final_params[:] = np.atleast_2d(self._curr_params)
349
-
350
- elif self.optimizer in (
351
- Optimizer.NELDER_MEAD,
352
- Optimizer.L_BFGS_B,
353
- Optimizer.COBYLA,
354
- ):
355
-
356
- def cost_fn(params):
357
- task_name = "💸 Computing Cost 💸"
358
-
359
- if self._progress_queue is not None:
360
- self._progress_queue.put(
361
- {
362
- "job_id": self.job_id,
363
- "message": task_name,
364
- "progress": 0,
365
- }
366
- )
367
- else:
368
- logger.info(
369
- f"Running Iteration #{self.current_iteration + 1} circuits: {task_name}\r"
370
- )
371
-
372
- self._curr_params = np.atleast_2d(params)
373
-
374
- losses = self._run_optimization_circuits(store_data, data_file)
375
-
376
- return losses[0]
377
-
378
- def grad_fn(params):
379
- self._grad_mode = True
380
-
381
- task_name = "📈 Computing Gradients 📈"
382
-
383
- if self._progress_queue is not None:
384
- self._progress_queue.put(
385
- {
386
- "job_id": self.job_id,
387
- "message": task_name,
388
- "progress": 0,
389
- }
390
- )
391
- else:
392
- logger.info(
393
- f"Running Iteration #{self.current_iteration + 1} circuits: {task_name}\r"
394
- )
395
-
396
- shift_mask = self.optimizer.compute_parameter_shift_mask(len(params))
397
-
398
- self._curr_params = shift_mask + params
399
-
400
- exp_vals = self._run_optimization_circuits(store_data, data_file)
401
-
402
- grads = np.zeros_like(params)
403
- for i in range(len(params)):
404
- grads[i] = 0.5 * (exp_vals[2 * i] - exp_vals[2 * i + 1])
405
-
406
- self._grad_mode = False
407
-
408
- return grads
201
+ update_function = None
202
+
203
+ # Poll until complete
204
+ status = self.backend.poll_job_status(
205
+ execution_result,
206
+ loop_until_complete=True,
207
+ on_complete=self._track_runtime,
208
+ verbose=False, # Disable the default logger in QoroService
209
+ progress_callback=update_function,
210
+ )
409
211
 
410
- def _iteration_counter(intermediate_result: OptimizeResult):
411
- self.losses.append({0: intermediate_result.fun})
212
+ if status == JobStatus.FAILED:
213
+ raise RuntimeError(f"Job {job_id} has failed")
214
+
215
+ if status == JobStatus.CANCELLED:
216
+ # If cancellation was requested (e.g., by ProgramBatch), raise _CancelledError
217
+ # so it's handled gracefully. Otherwise, raise RuntimeError for unexpected cancellation.
218
+ if (
219
+ hasattr(self, "_cancellation_event")
220
+ and self._cancellation_event
221
+ and self._cancellation_event.is_set()
222
+ ):
223
+ raise _CancelledError(f"Job {job_id} was cancelled")
224
+ raise RuntimeError(f"Job {job_id} was cancelled")
225
+
226
+ if status != JobStatus.COMPLETED:
227
+ raise Exception("Job has not completed yet, cannot post-process results")
228
+ completed_result = self.backend.get_job_results(execution_result)
229
+ return completed_result.results
230
+
231
+ def cancel_unfinished_job(self):
232
+ """Cancel the currently running cloud job if one exists.
233
+
234
+ This method attempts to cancel the job associated with the current
235
+ ExecutionResult. It is best-effort and will log warnings for any errors
236
+ (e.g., job already completed, permission denied) without raising exceptions.
237
+
238
+ This is typically called by ProgramBatch when handling cancellation
239
+ to ensure cloud jobs are cancelled before local threads terminate.
240
+ """
412
241
 
413
- self.final_params[:] = np.atleast_2d(intermediate_result.x)
242
+ if self._current_execution_result is None:
243
+ warn("Cannot cancel job: no current execution result", stacklevel=2)
244
+ return
414
245
 
415
- self.current_iteration += 1
246
+ if self._current_execution_result.job_id is None:
247
+ warn("Cannot cancel job: execution result has no job_id", stacklevel=2)
248
+ return
416
249
 
417
- if self._progress_queue is not None:
418
- self._progress_queue.put(
419
- {
420
- "job_id": self.job_id,
421
- "progress": 1,
422
- }
250
+ try:
251
+ self.backend.cancel_job(self._current_execution_result)
252
+ except requests.exceptions.HTTPError as e:
253
+ # Check if this is an expected error (job already completed/failed/cancelled)
254
+ if (
255
+ hasattr(e, "response")
256
+ and e.response is not None
257
+ and e.response.status_code == HTTPStatus.CONFLICT
258
+ ):
259
+ # 409 Conflict means job is already in a terminal state - this is expected
260
+ # in race conditions where job completes before we can cancel it.
261
+ if hasattr(self, "reporter"):
262
+ self.reporter.info(
263
+ f"Job {self._current_execution_result.job_id} already completed or cancelled"
423
264
  )
424
- else:
425
- logger.info(f"Finished Iteration #{self.current_iteration}\r\n")
426
-
427
- if (
428
- self.optimizer == Optimizer.COBYLA
429
- and intermediate_result.nit + 1 == self.max_iterations
430
- ):
431
- raise StopIteration
432
-
433
- if self.max_iterations is None or self.optimizer == Optimizer.COBYLA:
434
- # COBYLA perceive maxiter as maxfev so we need
435
- # to use the callback fn for counting instead.
436
- maxiter = None
437
265
  else:
438
- # Need to add one more iteration for Nelder-Mead's simplex initialization step
439
- maxiter = (
440
- self.max_iterations + 1
441
- if self.optimizer == Optimizer.NELDER_MEAD
442
- else self.max_iterations
266
+ # Unexpected error (403 Forbidden, 404 Not Found, etc.) - report it
267
+ if hasattr(self, "reporter"):
268
+ self.reporter.info(
269
+ f"Failed to cancel job {self._current_execution_result.job_id}: {e}"
270
+ )
271
+ except Exception as e:
272
+ # Other unexpected errors - report them
273
+ if hasattr(self, "reporter"):
274
+ self.reporter.info(
275
+ f"Failed to cancel job {self._current_execution_result.job_id}: {e}"
443
276
  )
444
277
 
445
- self._initialize_params()
446
- self._minimize_res = minimize(
447
- fun=cost_fn,
448
- x0=self._curr_params[0],
449
- method=(
450
- cobyla_fn
451
- if self.optimizer == Optimizer.COBYLA
452
- else self.optimizer.value
453
- ),
454
- jac=grad_fn if self.optimizer == Optimizer.L_BFGS_B else None,
455
- callback=_iteration_counter,
456
- options={"maxiter": maxiter},
457
- )
278
+ def _dispatch_circuits_and_process_results(self, **kwargs):
279
+ """Run an iteration of the program.
458
280
 
459
- if self._progress_queue:
460
- self._progress_queue.put(
461
- {
462
- "job_id": self.job_id,
463
- "progress": 0,
464
- "final_status": "Success",
465
- }
466
- )
467
- else:
468
- logger.info(f"Finished Optimization!")
281
+ The outputs are stored in the Program object.
469
282
 
470
- return self._total_circuit_count, self._total_run_time
283
+ Args:
284
+ **kwargs: Additional keyword arguments for circuit submission and result processing.
471
285
 
472
- def save_iteration(self, data_file):
286
+ Returns:
287
+ Any: Processed results from _post_process_results.
473
288
  """
474
- Save the current iteration of the program to a file.
289
+ execution_result = self._prepare_and_send_circuits(**kwargs)
475
290
 
476
- Args:
477
- data_file (str): The file to save the iteration to.
478
- """
291
+ # Store the execution result for potential cancellation
292
+ self._current_execution_result = execution_result
479
293
 
480
- with open(data_file, "wb") as f:
481
- pickle.dump(self, f)
294
+ try:
295
+ # For async backends, poll for results
296
+ if execution_result.job_id is not None:
297
+ results = self._wait_for_qoro_job_completion(execution_result)
298
+ else:
299
+ # For sync backends, results are already available
300
+ results = execution_result.results
301
+ if results is None:
302
+ raise ValueError("ExecutionResult has neither results nor job_id")
482
303
 
483
- @staticmethod
484
- def import_iteration(data_file):
485
- """
486
- Import an iteration of the program from a file.
304
+ results = {r["label"]: r["results"] for r in results}
487
305
 
488
- Args:
489
- data_file (str): The file to import the iteration from.
490
- """
306
+ result = self._post_process_results(results, **kwargs)
491
307
 
492
- with open(data_file, "rb") as f:
493
- return pickle.load(f)
308
+ return result
309
+ finally:
310
+ # Clear the execution result after processing
311
+ self._current_execution_result = None