qoro-divi 0.3.4__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of qoro-divi might be problematic. Click here for more details.

@@ -2,433 +2,256 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- import logging
6
5
  import pickle
7
6
  from abc import ABC, abstractmethod
8
- from functools import partial
9
- from itertools import groupby
10
7
  from queue import Queue
8
+ from threading import Event
9
+ from typing import Any
11
10
 
12
- import numpy as np
13
- from pennylane.measurements import ExpectationMP
14
- from scipy.optimize import OptimizeResult
11
+ from divi.backends import CircuitRunner, JobStatus
12
+ from divi.circuits import Circuit
15
13
 
16
- from divi.backends import CircuitRunner, JobStatus, QoroService
17
- from divi.circuits import Circuit, MetaCircuit
18
- from divi.circuits.qem import _NoMitigation
19
- from divi.qprog.optimizers import ScipyMethod, ScipyOptimizer
20
- from divi.reporting import LoggingProgressReporter, QueueProgressReporter
21
14
 
22
- logger = logging.getLogger(__name__)
23
-
24
-
25
- def _compute_parameter_shift_mask(n_params):
26
- """
27
- Generate a binary matrix mask for the parameter shift rule.
28
- This mask is used to determine the shifts to apply to each parameter
29
- when computing gradients via the parameter shift rule in quantum algorithms.
30
-
31
- Args:
32
- n_params (int): The number of parameters in the quantum circuit.
33
-
34
- Returns:
35
- np.ndarray: A (2 * n_params, n_params) matrix where each row encodes
36
- the shift to apply to each parameter for a single evaluation.
37
- The values are multiples of 0.5 * pi, with alternating signs.
15
+ class QuantumProgram(ABC):
16
+ """Abstract base class for quantum programs.
17
+
18
+ This class defines the interface and provides common functionality for quantum algorithms.
19
+ It handles circuit execution, result processing, and data persistence.
20
+
21
+ Subclasses must implement:
22
+ - run(): Execute the quantum algorithm
23
+ - _generate_circuits(): Generate quantum circuits for execution
24
+ - _post_process_results(): Process execution results
25
+
26
+ Attributes:
27
+ backend (CircuitRunner): The quantum circuit execution backend.
28
+ _seed (int | None): Random seed for reproducible results.
29
+ _progress_queue (Queue | None): Queue for progress reporting.
30
+ _circuits (list): List of circuits to be executed.
31
+ _curr_service_job_id: Current service job ID for QoroService backends.
38
32
  """
39
- mask_arr = np.arange(0, 2 * n_params, 2)
40
- mask_arr[0] = 1
41
-
42
- binary_matrix = ((mask_arr[:, np.newaxis] & (1 << np.arange(n_params))) > 0).astype(
43
- np.float64
44
- )
45
-
46
- binary_matrix = binary_matrix.repeat(2, axis=0)
47
- binary_matrix[1::2] *= -1
48
- binary_matrix *= 0.5 * np.pi
49
-
50
- return binary_matrix
51
33
 
52
-
53
- class QuantumProgram(ABC):
54
34
  def __init__(
55
35
  self,
56
36
  backend: CircuitRunner,
57
37
  seed: int | None = None,
58
38
  progress_queue: Queue | None = None,
59
- has_final_computation: bool = False,
60
39
  **kwargs,
61
40
  ):
62
- """
63
- Initializes the QuantumProgram class.
64
-
65
- If a child class represents a hybrid quantum-classical algorithm,
66
- the instance variables `n_layers` and `n_params` must be set, where:
67
- - `n_layers` is the number of layers in the quantum circuit.
68
- - `n_params` is the number of parameters per layer.
69
-
70
- For exotic algorithms where these variables may not be applicable,
71
- the `_initialize_params` method should be overridden to set the parameters.
41
+ """Initialize the QuantumProgram.
72
42
 
73
43
  Args:
74
- backend (CircuitRunner): An instance of a CircuitRunner object, which
75
- can either be ParallelSimulator or QoroService.
76
- seed (int): A seed for numpy's random number generator, which will
77
- be used for the parameter initialization.
78
- Defaults to None.
79
- progress_queue (Queue): a queue for progress bar updates.
80
- has_final_computation (bool): Whether the program includes a final
81
- computation step after optimization. This affects progress reporting.
82
-
83
- **kwargs: Additional keyword arguments that influence behaviour.
84
- - grouping_strategy (Literal["default", "wires", "qwc"]): A strategy for grouping operations, used in Pennylane's transforms.
85
- Defaults to None.
86
- - qem_protocol (QEMProtocol, optional): the quantum error mitigation protocol to apply.
87
- Must be of type QEMProtocol. Defaults to None.
88
-
89
- The following key values are reserved for internal use and should not be set by the user:
90
- - losses (list, optional): A list to initialize the `losses` attribute. Defaults to an empty list.
91
- - final_params (list, optional): A list to initialize the `final_params` attribute. Defaults to an empty list.
92
-
44
+ backend (CircuitRunner): Quantum circuit execution backend.
45
+ seed (int | None): Random seed for reproducible results. Defaults to None.
46
+ progress_queue (Queue | None): Queue for progress reporting. Defaults to None.
47
+ **kwargs: Additional keyword arguments for subclasses.
93
48
  """
94
-
95
- # Shared Variables
96
- self.losses = kwargs.pop("losses", [])
97
- self.final_params = kwargs.pop("final_params", [])
98
-
99
- self.circuits: list[Circuit] = []
100
-
49
+ self.backend = backend
50
+ self._seed = seed
51
+ self._progress_queue = progress_queue
101
52
  self._total_circuit_count = 0
102
53
  self._total_run_time = 0.0
103
- self._curr_params = []
54
+ self._curr_circuits = []
55
+ self._curr_service_job_id = None
104
56
 
105
- self._seed = seed
106
- self._rng = np.random.default_rng(self._seed)
57
+ @abstractmethod
58
+ def run(self, data_file: str | None = None, **kwargs) -> tuple[int, float]:
59
+ """Execute the quantum algorithm.
107
60
 
108
- # Lets child classes adapt their optimization
109
- # step for grad calculation routine
110
- self._grad_mode = False
61
+ Args:
62
+ data_file (str | None): The file to store the data in. If None, no data is stored. Defaults to None.
63
+ **kwargs: Additional keyword arguments for subclasses.
111
64
 
112
- self.backend = backend
65
+ Returns:
66
+ tuple[int, float]: A tuple containing:
67
+ - int: Total number of circuits executed
68
+ - float: Total runtime in seconds
69
+ """
70
+ pass
113
71
 
114
- self.job_id = kwargs.get("job_id", None)
115
- self._progress_queue = progress_queue
116
- if progress_queue and self.job_id:
117
- self.reporter = QueueProgressReporter(
118
- self.job_id, progress_queue, has_final_computation=has_final_computation
119
- )
120
- else:
121
- self.reporter = LoggingProgressReporter()
72
+ @abstractmethod
73
+ def _generate_circuits(self, **kwargs) -> list[Circuit]:
74
+ """Generate quantum circuits for execution.
122
75
 
123
- # Needed for Pennylane's transforms
124
- self._grouping_strategy = kwargs.pop("grouping_strategy", None)
76
+ This method should generate and return a list of Circuit objects based on
77
+ the current algorithm state. The circuits will be executed by the backend.
125
78
 
126
- self._qem_protocol = kwargs.pop("qem_protocol", None) or _NoMitigation()
79
+ Args:
80
+ **kwargs: Additional keyword arguments for circuit generation.
127
81
 
128
- self._meta_circuit_factory = partial(
129
- MetaCircuit,
130
- grouping_strategy=self._grouping_strategy,
131
- qem_protocol=self._qem_protocol,
132
- )
82
+ Returns:
83
+ list[Circuit]: List of Circuit objects to be executed.
84
+ """
85
+ pass
133
86
 
134
- @property
135
- def total_circuit_count(self):
136
- return self._total_circuit_count
87
+ @abstractmethod
88
+ def _post_process_results(self, results: dict, **kwargs) -> Any:
89
+ """Process execution results.
137
90
 
138
- @property
139
- def total_run_time(self):
140
- return self._total_run_time
91
+ Args:
92
+ results (dict): Raw results from circuit execution.
141
93
 
142
- @property
143
- def meta_circuits(self):
144
- return self._meta_circuits
94
+ Returns:
95
+ Any: Processed results specific to the algorithm.
96
+ """
97
+ pass
145
98
 
146
- @property
147
- def n_params(self):
148
- return self._n_params
99
+ def _set_cancellation_event(self, event: Event):
100
+ """Set a cancellation event for graceful program termination.
149
101
 
150
- @abstractmethod
151
- def _create_meta_circuits_dict(self) -> dict[str, MetaCircuit]:
152
- pass
102
+ This method is called by batch runners to provide a mechanism
103
+ for stopping the optimization loop cleanly when requested.
153
104
 
154
- @abstractmethod
155
- def _generate_circuits(self, **kwargs):
156
- pass
105
+ Args:
106
+ event (Event): Threading Event object that signals cancellation when set.
107
+ """
108
+ self._cancellation_event = event
157
109
 
158
- def _initialize_params(self):
159
- self._curr_params = np.array(
160
- [
161
- self._rng.uniform(0, 2 * np.pi, self.n_layers * self.n_params)
162
- for _ in range(self.optimizer.n_param_sets)
163
- ]
164
- )
110
+ @property
111
+ def total_circuit_count(self) -> int:
112
+ """Get the total number of circuits executed.
165
113
 
166
- def _run_optimization_circuits(self, store_data, data_file):
167
- self.circuits[:] = []
114
+ Returns:
115
+ int: Cumulative count of circuits submitted for execution.
116
+ """
117
+ return self._total_circuit_count
168
118
 
169
- self._generate_circuits()
119
+ @property
120
+ def total_run_time(self) -> float:
121
+ """Get the total runtime across all circuit executions.
170
122
 
171
- losses = self._dispatch_circuits_and_process_results(
172
- store_data=store_data, data_file=data_file
173
- )
123
+ Returns:
124
+ float: Cumulative execution time in seconds.
125
+ """
126
+ return self._total_run_time
174
127
 
175
- return losses
128
+ def _prepare_and_send_circuits(self, **kwargs):
129
+ """Prepare circuits for execution and submit them to the backend.
176
130
 
177
- def _prepare_and_send_circuits(self):
131
+ Returns:
132
+ Backend output from circuit submission.
133
+ """
178
134
  job_circuits = {}
179
135
 
180
- for circuit in self.circuits:
136
+ for circuit in self._curr_circuits:
181
137
  for tag, qasm_circuit in zip(circuit.tags, circuit.qasm_circuits):
182
138
  job_circuits[tag] = qasm_circuit
183
139
 
184
140
  self._total_circuit_count += len(job_circuits)
185
141
 
186
- backend_output = self.backend.submit_circuits(job_circuits)
142
+ backend_output = self.backend.submit_circuits(job_circuits, **kwargs)
187
143
 
188
- if isinstance(self.backend, QoroService):
144
+ if self.backend.is_async:
189
145
  self._curr_service_job_id = backend_output
190
146
 
191
147
  return backend_output
192
148
 
193
- def _dispatch_circuits_and_process_results(self, store_data=False, data_file=None):
194
- """
195
- Run an iteration of the program. The outputs are stored in the Program object.
196
- Optionally, the data can be stored in a file.
149
+ def _track_runtime(self, response):
150
+ """Extract and track runtime from a backend response.
197
151
 
198
152
  Args:
199
- store_data (bool): Whether to store the data for the iteration
200
- data_file (str): The file to store the data in
153
+ response: Backend response containing runtime information.
154
+ Can be a dict or a list of responses.
201
155
  """
156
+ if isinstance(response, dict):
157
+ self._total_run_time += float(response["run_time"])
158
+ elif isinstance(response, list):
159
+ self._total_run_time += sum(float(r.json()["run_time"]) for r in response)
202
160
 
203
- results = self._prepare_and_send_circuits()
161
+ def _wait_for_qoro_job_completion(self, job_id: str) -> list[dict]:
162
+ """Wait for a QoroService job to complete and return results.
204
163
 
205
- def add_run_time(response):
206
- if isinstance(response, dict):
207
- self._total_run_time += float(response["run_time"])
208
- elif isinstance(response, list):
209
- self._total_run_time += sum(
210
- float(r.json()["run_time"]) for r in response
211
- )
164
+ Args:
165
+ job_id: The QoroService job identifier.
212
166
 
213
- if isinstance(self.backend, QoroService):
167
+ Returns:
168
+ list[dict]: The job results from the backend.
169
+
170
+ Raises:
171
+ Exception: If job fails or doesn't complete.
172
+ """
173
+ # Build the poll callback if reporter is available
174
+ if hasattr(self, "reporter"):
214
175
  update_function = lambda n_polls, status: self.reporter.info(
215
176
  message="",
216
177
  poll_attempt=n_polls,
217
178
  max_retries=self.backend.max_retries,
218
- service_job_id=self._curr_service_job_id,
179
+ service_job_id=job_id,
219
180
  job_status=status,
220
181
  )
182
+ else:
183
+ update_function = None
184
+
185
+ # Poll until complete
186
+ status = self.backend.poll_job_status(
187
+ job_id,
188
+ loop_until_complete=True,
189
+ on_complete=self._track_runtime,
190
+ verbose=False, # Disable the default logger in QoroService
191
+ poll_callback=update_function,
192
+ )
221
193
 
222
- status = self.backend.poll_job_status(
223
- self._curr_service_job_id,
224
- loop_until_complete=True,
225
- on_complete=add_run_time,
226
- verbose=False, # Disable the default logger in QoroService
227
- poll_callback=update_function, # Use the new, more generic name
228
- )
229
-
230
- if status != JobStatus.COMPLETED:
231
- raise Exception(
232
- "Job has not completed yet, cannot post-process results"
233
- )
234
-
235
- results = self.backend.get_job_results(self._curr_service_job_id)
236
-
237
- results = {r["label"]: r["results"] for r in results}
238
-
239
- result = self._post_process_results(results)
240
-
241
- if store_data:
242
- self.save_iteration(data_file)
194
+ if status != JobStatus.COMPLETED:
195
+ raise Exception("Job has not completed yet, cannot post-process results")
196
+ return self.backend.get_job_results(job_id)
243
197
 
244
- return result
198
+ def _dispatch_circuits_and_process_results(
199
+ self, data_file: str | None = None, **kwargs
200
+ ):
201
+ """Run an iteration of the program.
245
202
 
246
- def _post_process_results(
247
- self, results: dict[str, dict[str, int]]
248
- ) -> dict[int, float]:
249
- """
250
- Post-process the results of the quantum problem.
203
+ The outputs are stored in the Program object.
204
+ Optionally, the data can be stored in a file.
251
205
 
252
206
  Args:
253
- results (dict): The shot histograms of the quantum execution step.
254
- The keys should be strings of format {param_id}_*_{measurement_group_id}.
255
- i.e. An underscore-separated bunch of metadata, starting always with
256
- the index of some parameter and ending with the index of some measurement group.
257
- Any extra piece of metadata that might be relevant to the specific application can
258
- be kept in the middle.
207
+ data_file (str | None): The file to store the data in. If None, no data is stored. Defaults to None.
208
+ **kwargs: Additional keyword arguments for circuit submission and result processing.
259
209
 
260
210
  Returns:
261
- (dict) The energies for each parameter set grouping, where the dict keys
262
- correspond to the parameter indices.
211
+ Any: Processed results from _post_process_results.
263
212
  """
213
+ results = self._prepare_and_send_circuits(**kwargs)
264
214
 
265
- losses = {}
266
- measurement_groups = self._meta_circuits["cost_circuit"].measurement_groups
215
+ if self.backend.is_async:
216
+ results = self._wait_for_qoro_job_completion(self._curr_service_job_id)
267
217
 
268
- # Define key functions for both levels of grouping
269
- get_param_id = lambda item: int(item[0].split("_")[0])
270
- get_qem_id = lambda item: int(item[0].split("_")[1].split(":")[1])
271
-
272
- # Group the pre-sorted results by parameter ID.
273
- for p, param_group_iterator in groupby(results.items(), key=get_param_id):
274
- param_group_iterator = list(param_group_iterator)
218
+ results = {r["label"]: r["results"] for r in results}
275
219
 
276
- shots_by_qem_idx = zip(
277
- *{
278
- gid: [value for _, value in group]
279
- for gid, group in groupby(param_group_iterator, key=get_qem_id)
280
- }.values()
281
- )
220
+ result = self._post_process_results(results, **kwargs)
282
221
 
283
- marginal_results = []
284
- for shots_dicts, curr_measurement_group in zip(
285
- shots_by_qem_idx, measurement_groups
286
- ):
287
- if hasattr(self, "cost_hamiltonian"):
288
- wire_order = tuple(reversed(self.cost_hamiltonian.wires))
289
- else:
290
- wire_order = tuple(
291
- reversed(range(len(next(iter(shots_dicts[0].keys())))))
292
- )
293
-
294
- curr_marginal_results = []
295
- for observable in curr_measurement_group:
296
-
297
- intermediate_exp_values = [
298
- ExpectationMP(observable).process_counts(shots_dict, wire_order)
299
- for shots_dict in shots_dicts
300
- ]
301
-
302
- mitigated_exp_value = self._qem_protocol.postprocess_results(
303
- intermediate_exp_values
304
- )
305
-
306
- curr_marginal_results.append(mitigated_exp_value)
307
-
308
- marginal_results.append(
309
- curr_marginal_results
310
- if len(curr_marginal_results) > 1
311
- else curr_marginal_results[0]
312
- )
313
-
314
- pl_loss = (
315
- self._meta_circuits["cost_circuit"]
316
- .postprocessing_fn(marginal_results)[0]
317
- .item()
318
- )
222
+ if data_file is not None:
223
+ self.save_iteration(data_file)
319
224
 
320
- losses[p] = pl_loss + self.loss_constant
225
+ return result
321
226
 
322
- return losses
227
+ def save_iteration(self, data_file: str):
228
+ """Save the current state of the quantum program to a file.
323
229
 
324
- def run(self, store_data=False, data_file=None):
325
- """
326
- Run the QAOA problem. The outputs are stored in the QAOA object. Optionally, the data can be stored in a file.
230
+ Serializes the entire QuantumProgram instance including parameters,
231
+ losses, and circuit history using pickle.
327
232
 
328
233
  Args:
329
- store_data (bool): Whether to store the data for the iteration
330
- data_file (str): The file to store the data in
331
- """
332
-
333
- def cost_fn(params):
334
- self.reporter.info(
335
- message="💸 Computing Cost 💸", iteration=self.current_iteration
336
- )
337
-
338
- self._curr_params = np.atleast_2d(params)
339
-
340
- losses = self._run_optimization_circuits(store_data, data_file)
234
+ data_file (str): Path to the file where the program state will be saved.
341
235
 
342
- losses = np.fromiter(losses.values(), dtype=np.float64)
343
-
344
- if params.ndim > 1:
345
- return losses
346
- else:
347
- return losses.item()
348
-
349
- self._grad_shift_mask = _compute_parameter_shift_mask(
350
- self.n_layers * self.n_params
351
- )
352
-
353
- def grad_fn(params):
354
- self._grad_mode = True
355
-
356
- self.reporter.info(
357
- message="📈 Computing Gradients 📈", iteration=self.current_iteration
358
- )
359
-
360
- self._curr_params = self._grad_shift_mask + params
361
-
362
- exp_vals = self._run_optimization_circuits(store_data, data_file)
363
- exp_vals_arr = np.fromiter(exp_vals.values(), dtype=np.float64)
364
-
365
- pos_shifts = exp_vals_arr[::2]
366
- neg_shifts = exp_vals_arr[1::2]
367
- grads = 0.5 * (pos_shifts - neg_shifts)
368
-
369
- self._grad_mode = False
370
-
371
- return grads
372
-
373
- def _iteration_counter(intermediate_result: OptimizeResult):
374
- self.losses.append(
375
- dict(
376
- zip(
377
- range(len(intermediate_result.x)),
378
- np.atleast_1d(intermediate_result.fun),
379
- )
380
- )
381
- )
382
-
383
- self.final_params[:] = np.atleast_2d(intermediate_result.x)
384
-
385
- self.current_iteration += 1
386
-
387
- self.reporter.update(iteration=self.current_iteration)
388
-
389
- if (
390
- isinstance(self.optimizer, ScipyOptimizer)
391
- and self.optimizer.method == ScipyMethod.COBYLA
392
- and intermediate_result.nit + 1 == self.max_iterations
393
- ):
394
- raise StopIteration
395
-
396
- self.reporter.info(message="Finished Setup")
397
-
398
- self._initialize_params()
399
- self._minimize_res = self.optimizer.optimize(
400
- cost_fn=cost_fn,
401
- initial_params=self._curr_params,
402
- callback_fn=_iteration_counter,
403
- jac=grad_fn,
404
- maxiter=self.max_iterations,
405
- rng=self._rng,
406
- )
407
- self.final_params[:] = np.atleast_2d(self._minimize_res.x)
408
-
409
- self.reporter.info(message="Finished Optimization!")
410
-
411
- return self._total_circuit_count, self._total_run_time
412
-
413
- def save_iteration(self, data_file):
414
- """
415
- Save the current iteration of the program to a file.
416
-
417
- Args:
418
- data_file (str): The file to save the iteration to.
236
+ Note:
237
+ The file is written in binary mode and can be restored using
238
+ `import_iteration()`.
419
239
  """
420
-
421
240
  with open(data_file, "wb") as f:
422
241
  pickle.dump(self, f)
423
242
 
424
243
  @staticmethod
425
- def import_iteration(data_file):
426
- """
427
- Import an iteration of the program from a file.
244
+ def import_iteration(data_file: str):
245
+ """Load a previously saved quantum program state from a file.
246
+
247
+ Deserializes a QuantumProgram instance that was saved using `save_iteration()`.
428
248
 
429
249
  Args:
430
- data_file (str): The file to import the iteration from.
431
- """
250
+ data_file (str): Path to the file containing the saved program state.
432
251
 
252
+ Returns:
253
+ QuantumProgram: The restored QuantumProgram instance with all its state,
254
+ including parameters, losses, and circuit history.
255
+ """
433
256
  with open(data_file, "rb") as f:
434
257
  return pickle.load(f)