qoro-divi 0.3.5__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of qoro-divi might be problematic. Click here for more details.

@@ -2,211 +2,35 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- import logging
6
5
  import pickle
7
6
  from abc import ABC, abstractmethod
8
- from functools import lru_cache, partial
9
- from itertools import groupby
10
7
  from queue import Queue
11
8
  from threading import Event
9
+ from typing import Any
12
10
 
13
- import numpy as np
14
- import pennylane as qml
15
- from scipy.optimize import OptimizeResult
11
+ from divi.backends import CircuitRunner, JobStatus
12
+ from divi.circuits import Circuit
16
13
 
17
- from divi.backends import CircuitRunner, JobStatus, QoroService
18
- from divi.circuits import Circuit, MetaCircuit
19
- from divi.circuits.qem import _NoMitigation
20
- from divi.qprog.exceptions import _CancelledError
21
- from divi.qprog.optimizers import ScipyMethod, ScipyOptimizer
22
- from divi.reporting import LoggingProgressReporter, QueueProgressReporter
23
14
 
24
- logger = logging.getLogger(__name__)
25
-
26
-
27
- def _get_structural_key(obs: qml.operation.Operation) -> tuple[str, ...]:
28
- """Generates a hashable, wire-independent key from an observable's structure.
29
-
30
- This function is used to create a canonical representation of an observable
31
- based on its constituent Pauli operators, ignoring the wires they act on.
32
- This key is ideal for caching computed eigenvalues, as observables with the
33
- same structure (e.g., PauliX(0) @ PauliZ(1) and PauliX(2) @ PauliZ(3))
34
- share the same eigenvalues. It maps PauliX and PauliY to PauliZ because
35
- they are all isospectral (have eigenvalues [1, -1]).
36
-
37
- Args:
38
- obs: A PennyLane observable (e.g., qml.PauliZ(0), qml.PauliX(0) @ qml.PauliY(1)).
39
-
40
- Returns:
41
- A tuple of strings representing the structure of the observable,
42
- e.g., ('PauliZ',) or ('PauliZ', 'PauliZ').
43
- """
44
-
45
- # Pennylane returns the same eigenvalues for PauliX and PauliY
46
- # since it handles diagonalizing gates internally anyway
47
- name_map = {
48
- "PauliY": "PauliZ",
49
- "PauliX": "PauliZ",
50
- "PauliZ": "PauliZ",
51
- "Identity": "Identity",
52
- }
53
-
54
- if isinstance(obs, qml.ops.Prod):
55
- # Recursively build a tuple of operator names
56
- return tuple(name_map[o.name] for o in obs.operands)
57
-
58
- # For single operators, return a single-element tuple
59
- return (name_map[obs.name],)
60
-
61
-
62
- @lru_cache(maxsize=512)
63
- def _get_eigvals_from_key(key: tuple[str, ...]) -> np.ndarray:
64
- """Computes and caches eigenvalues based on a structural key.
65
-
66
- This function takes a key generated by `_get_structural_key` and computes
67
- the eigenvalues of the corresponding tensor product of operators. The results
68
- are memoized using @lru_cache to avoid redundant calculations.
69
-
70
- Args:
71
- key: A tuple of strings representing the observable's structure.
72
-
73
- Returns:
74
- A NumPy array containing the eigenvalues of the observable.
75
- """
76
-
77
- # Define a mapping from name to the base eigenvalue array
78
- eigvals_map = {
79
- "PauliZ": np.array([1, -1], dtype=np.int8),
80
- "Identity": np.array([1, 1], dtype=np.int8),
81
- }
82
-
83
- # Start with the eigenvalues of the first operator in the key
84
- final_eigvals = eigvals_map[key[0]]
85
-
86
- # Iteratively compute the kronecker product for the rest
87
- for op_name in key[1:]:
88
- final_eigvals = np.kron(final_eigvals, eigvals_map[op_name])
89
-
90
- return final_eigvals
91
-
92
-
93
- def _batched_expectation(shots_dicts, observables, wire_order):
94
- """Efficiently calculates expectation values for multiple observables across multiple shot histograms.
95
-
96
- This function is optimized to compute expectation values in a fully vectorized
97
- manner, minimizing Python loops. It operates in four main steps:
98
- 1. Aggregates all unique bitstrings measured across all histograms.
99
- 2. Builds a "reduced" eigenvalue matrix corresponding only to the unique states.
100
- 3. Builds a "reduced" probability matrix from the shot counts for each histogram.
101
- 4. Computes all expectation values with a single matrix multiplication.
102
-
103
- Args:
104
- shots_dicts (list[dict[str, int]]): A list of shot dictionaries (histograms),
105
- where each dictionary maps a measured bitstring to its count.
106
- observables (list[qml.operation.Operation]): A list of PennyLane observables
107
- for which to calculate expectation values.
108
- wire_order (tuple[int, ...]): A tuple defining the order of wires, which maps
109
- the bitstring to the qubits. Note: This is typically the reverse of the
110
- qubit indices (e.g., (2, 1, 0) for a 3-qubit system).
111
-
112
- Returns:
113
- np.ndarray: A 2D NumPy array of shape (n_observables, n_shots) where
114
- result[i, j] is the expectation value of observables[i] for the
115
- histogram in shots_dicts[j].
116
- """
117
-
118
- n_histograms = len(shots_dicts)
119
- n_total_wires = len(wire_order)
120
- n_observables = len(observables)
121
-
122
- # --- 1. Aggregate all unique measured states across all shots ---
123
- all_measured_bitstrings = set()
124
- for sd in shots_dicts:
125
- all_measured_bitstrings.update(sd.keys())
126
-
127
- unique_bitstrings = sorted(list(all_measured_bitstrings))
128
- n_unique_states = len(unique_bitstrings)
129
-
130
- bitstring_to_idx_map = {bs: i for i, bs in enumerate(unique_bitstrings)}
131
-
132
- # --- 2. Build REDUCED Eigenvalue Matrix: (n_observables, n_unique_states) ---
133
- unique_states_int = np.array(
134
- [int(bs, 2) for bs in unique_bitstrings], dtype=np.uint64
135
- )
136
- reduced_eigvals_matrix = np.zeros((n_observables, n_unique_states))
137
- wire_map = {w: i for i, w in enumerate(wire_order)}
138
-
139
- powers_cache = {}
140
-
141
- for obs_idx, observable in enumerate(observables):
142
- obs_wires = observable.wires
143
- n_obs_wires = len(obs_wires)
144
-
145
- if n_obs_wires in powers_cache:
146
- powers = powers_cache[n_obs_wires]
147
- else:
148
- powers = 2 ** np.arange(n_obs_wires - 1, -1, -1, dtype=np.intp)
149
- powers_cache[n_obs_wires] = powers
150
-
151
- obs_wire_indices = np.array([wire_map[w] for w in obs_wires], dtype=np.uint32)
152
- eigvals = _get_eigvals_from_key(_get_structural_key(observable))
153
-
154
- # Vectorized mapping, but on the *reduced* set of states
155
- shifts = n_total_wires - 1 - obs_wire_indices
156
- bits = ((unique_states_int[:, np.newaxis] >> shifts) & 1).astype(np.intp)
157
- # powers = 2 ** np.arange(n_obs_wires - 1, -1, -1)
158
-
159
- # obs_state_indices = (bits * powers).sum(axis=1).astype(np.intp)
160
- obs_state_indices = np.dot(bits, powers)
161
-
162
- reduced_eigvals_matrix[obs_idx, :] = eigvals[obs_state_indices]
163
-
164
- # --- 3. Build REDUCED Probability Matrix: (n_shots, n_unique_states) ---
165
- reduced_prob_matrix = np.zeros((n_histograms, n_unique_states), dtype=np.float32)
166
- for i, shots_dict in enumerate(shots_dicts):
167
- total = sum(shots_dict.values())
168
-
169
- for bitstring, count in shots_dict.items():
170
- col_idx = bitstring_to_idx_map[bitstring]
171
- reduced_prob_matrix[i, col_idx] = count / total
172
-
173
- # --- 4. Compute Final Expectation Values ---
174
- # (n_shots, n_unique_states) @ (n_unique_states, n_observables)
175
- result = reduced_prob_matrix @ reduced_eigvals_matrix.T
176
-
177
- # Transpose to (n_observables, n_shots) as expected by the calling code
178
- return result.T
179
-
180
-
181
- def _compute_parameter_shift_mask(n_params):
182
- """
183
- Generate a binary matrix mask for the parameter shift rule.
184
- This mask is used to determine the shifts to apply to each parameter
185
- when computing gradients via the parameter shift rule in quantum algorithms.
186
-
187
- Args:
188
- n_params (int): The number of parameters in the quantum circuit.
189
-
190
- Returns:
191
- np.ndarray: A (2 * n_params, n_params) matrix where each row encodes
192
- the shift to apply to each parameter for a single evaluation.
193
- The values are multiples of 0.5 * pi, with alternating signs.
15
+ class QuantumProgram(ABC):
16
+ """Abstract base class for quantum programs.
17
+
18
+ This class defines the interface and provides common functionality for quantum algorithms.
19
+ It handles circuit execution, result processing, and data persistence.
20
+
21
+ Subclasses must implement:
22
+ - run(): Execute the quantum algorithm
23
+ - _generate_circuits(): Generate quantum circuits for execution
24
+ - _post_process_results(): Process execution results
25
+
26
+ Attributes:
27
+ backend (CircuitRunner): The quantum circuit execution backend.
28
+ _seed (int | None): Random seed for reproducible results.
29
+ _progress_queue (Queue | None): Queue for progress reporting.
30
+ _circuits (list): List of circuits to be executed.
31
+ _curr_service_job_id: Current service job ID for QoroService backends.
194
32
  """
195
- mask_arr = np.arange(0, 2 * n_params, 2)
196
- mask_arr[0] = 1
197
-
198
- binary_matrix = ((mask_arr[:, np.newaxis] & (1 << np.arange(n_params))) > 0).astype(
199
- np.float64
200
- )
201
-
202
- binary_matrix = binary_matrix.repeat(2, axis=0)
203
- binary_matrix[1::2] *= -1
204
- binary_matrix *= 0.5 * np.pi
205
-
206
- return binary_matrix
207
33
 
208
-
209
- class QuantumProgram(ABC):
210
34
  def __init__(
211
35
  self,
212
36
  backend: CircuitRunner,
@@ -214,192 +38,68 @@ class QuantumProgram(ABC):
214
38
  progress_queue: Queue | None = None,
215
39
  **kwargs,
216
40
  ):
217
- """
218
- Initializes the QuantumProgram class.
219
-
220
- If a child class represents a hybrid quantum-classical algorithm,
221
- the instance variables `n_layers` and `n_params` must be set, where:
222
- - `n_layers` is the number of layers in the quantum circuit.
223
- - `n_params` is the number of parameters per layer.
224
-
225
- For exotic algorithms where these variables may not be applicable,
226
- the `_initialize_params` method should be overridden to set the parameters.
41
+ """Initialize the QuantumProgram.
227
42
 
228
43
  Args:
229
- backend (CircuitRunner): An instance of a CircuitRunner object, which
230
- can either be ParallelSimulator or QoroService.
231
- seed (int): A seed for numpy's random number generator, which will
232
- be used for the parameter initialization.
233
- Defaults to None.
234
- progress_queue (Queue): a queue for progress bar updates.
235
-
236
- **kwargs: Additional keyword arguments that influence behaviour.
237
- - grouping_strategy (Literal["default", "wires", "qwc"]): A strategy for grouping operations, used in Pennylane's transforms.
238
- Defaults to None.
239
- - qem_protocol (QEMProtocol, optional): the quantum error mitigation protocol to apply.
240
- Must be of type QEMProtocol. Defaults to None.
44
+ backend (CircuitRunner): Quantum circuit execution backend.
45
+ seed (int | None): Random seed for reproducible results. Defaults to None.
46
+ progress_queue (Queue | None): Queue for progress reporting. Defaults to None.
47
+ **kwargs: Additional keyword arguments for subclasses.
241
48
  """
242
-
243
- self._losses = []
244
- self._final_params = []
245
-
246
- self._circuits: list[Circuit] = []
247
-
248
- self._total_circuit_count = 0
249
- self._total_run_time = 0.0
250
- self._curr_params = None
251
-
252
- self._seed = seed
253
- self._rng = np.random.default_rng(self._seed)
254
-
255
- # Lets child classes adapt their optimization
256
- # step for grad calculation routine
257
- self._grad_mode = False
258
-
259
49
  self.backend = backend
260
-
261
- self.job_id = kwargs.get("job_id", None)
50
+ self._seed = seed
262
51
  self._progress_queue = progress_queue
263
- if progress_queue and self.job_id:
264
- self.reporter = QueueProgressReporter(self.job_id, progress_queue)
265
- else:
266
- self.reporter = LoggingProgressReporter()
267
-
268
- # Needed for Pennylane's transforms
269
- self._grouping_strategy = kwargs.pop("grouping_strategy", None)
270
-
271
- self._qem_protocol = kwargs.pop("qem_protocol", None) or _NoMitigation()
272
-
273
- self._cancellation_event = None
274
-
275
- self._meta_circuit_factory = partial(
276
- MetaCircuit,
277
- grouping_strategy=self._grouping_strategy,
278
- qem_protocol=self._qem_protocol,
279
- )
280
-
281
- @property
282
- def total_circuit_count(self):
283
- """
284
- Get the total number of circuits executed so far.
285
-
286
- Returns:
287
- int: Cumulative count of circuits submitted for execution.
288
- """
289
- return self._total_circuit_count
290
-
291
- @property
292
- def total_run_time(self):
293
- """
294
- Get the total runtime across all circuit executions.
295
-
296
- Returns:
297
- float: Cumulative execution time in seconds.
298
- """
299
- return self._total_run_time
300
-
301
- @property
302
- def meta_circuits(self):
303
- """
304
- Get the meta-circuit templates used by this program.
305
-
306
- Returns:
307
- dict[str, MetaCircuit]: Dictionary mapping circuit names to their
308
- MetaCircuit templates.
309
- """
310
- return self._meta_circuits
311
-
312
- @property
313
- def n_params(self):
314
- """
315
- Get the total number of parameters in the quantum circuit.
316
-
317
- Returns:
318
- int: Total number of trainable parameters (n_layers * n_params_per_layer).
319
- """
320
- return self._n_params
321
-
322
- @property
323
- def circuits(self) -> list[Circuit]:
324
- """
325
- Get a copy of the generated circuits list.
326
-
327
- Returns:
328
- list[Circuit]: Copy of the circuits list. Modifications to this list
329
- will not affect the internal state.
330
- """
331
- return self._circuits.copy()
52
+ self._total_circuit_count = 0
53
+ self._total_run_time = 0.0
54
+ self._curr_circuits = []
55
+ self._curr_service_job_id = None
332
56
 
333
- @property
334
- def losses(self) -> list[dict]:
335
- """
336
- Get a copy of the optimization loss history.
57
+ @abstractmethod
58
+ def run(self, data_file: str | None = None, **kwargs) -> tuple[int, float]:
59
+ """Execute the quantum algorithm.
337
60
 
338
- Each entry is a dictionary mapping parameter indices to loss values.
61
+ Args:
62
+ data_file (str | None): The file to store the data in. If None, no data is stored. Defaults to None.
63
+ **kwargs: Additional keyword arguments for subclasses.
339
64
 
340
65
  Returns:
341
- list[dict]: Copy of the loss history. Modifications to this list
342
- will not affect the internal state.
66
+ tuple[int, float]: A tuple containing:
67
+ - int: Total number of circuits executed
68
+ - float: Total runtime in seconds
343
69
  """
344
- return self._losses.copy()
70
+ pass
345
71
 
346
- @property
347
- def final_params(self) -> list:
348
- """
349
- Get a copy of the final optimized parameters.
72
+ @abstractmethod
73
+ def _generate_circuits(self, **kwargs) -> list[Circuit]:
74
+ """Generate quantum circuits for execution.
350
75
 
351
- Returns:
352
- list: Copy of the final parameters. Modifications to this list
353
- will not affect the internal state.
354
- """
355
- return self._final_params.copy()
76
+ This method should generate and return a list of Circuit objects based on
77
+ the current algorithm state. The circuits will be executed by the backend.
356
78
 
357
- @property
358
- def initial_params(self) -> np.ndarray:
359
- """
360
- Get the current initial parameters.
79
+ Args:
80
+ **kwargs: Additional keyword arguments for circuit generation.
361
81
 
362
82
  Returns:
363
- np.ndarray: Current initial parameters. If not yet initialized,
364
- they will be generated automatically.
83
+ list[Circuit]: List of Circuit objects to be executed.
365
84
  """
366
- if self._curr_params is None:
367
- self._initialize_params()
368
- return self._curr_params.copy()
85
+ pass
369
86
 
370
- @initial_params.setter
371
- def initial_params(self, value: np.ndarray | None):
372
- """
373
- Set initial parameters.
87
+ @abstractmethod
88
+ def _post_process_results(self, results: dict, **kwargs) -> Any:
89
+ """Process execution results.
374
90
 
375
91
  Args:
376
- value (np.ndarray | None): Initial parameters with shape
377
- (n_param_sets, n_layers * n_params), or None to reset
378
- to uninitialized state.
92
+ results (dict): Raw results from circuit execution.
379
93
 
380
- Raises:
381
- ValueError: If parameters have incorrect shape.
94
+ Returns:
95
+ Any: Processed results specific to the algorithm.
382
96
  """
383
- if value is not None:
384
- self._validate_initial_params(value)
385
- self._curr_params = value.copy()
386
- else:
387
- # Reset to uninitialized state
388
- self._curr_params = None
389
-
390
- @abstractmethod
391
- def _create_meta_circuits_dict(self) -> dict[str, MetaCircuit]:
392
- pass
393
-
394
- @abstractmethod
395
- def _generate_circuits(self, **kwargs):
396
97
  pass
397
98
 
398
99
  def _set_cancellation_event(self, event: Event):
399
- """
400
- Set a cancellation event for graceful program termination.
100
+ """Set a cancellation event for graceful program termination.
401
101
 
402
- This internal method is called by a batch runner to provide a mechanism
102
+ This method is called by batch runners to provide a mechanism
403
103
  for stopping the optimization loop cleanly when requested.
404
104
 
405
105
  Args:
@@ -407,324 +107,125 @@ class QuantumProgram(ABC):
407
107
  """
408
108
  self._cancellation_event = event
409
109
 
410
- def get_expected_param_shape(self) -> tuple[int, int]:
411
- """
412
- Get the expected shape for initial parameters.
110
+ @property
111
+ def total_circuit_count(self) -> int:
112
+ """Get the total number of circuits executed.
413
113
 
414
114
  Returns:
415
- tuple[int, int]: Shape (n_param_sets, n_layers * n_params) that
416
- initial parameters should have for this quantum program.
417
- """
418
- return (self.optimizer.n_param_sets, self.n_layers * self.n_params)
419
-
420
- def _validate_initial_params(self, params: np.ndarray):
115
+ int: Cumulative count of circuits submitted for execution.
421
116
  """
422
- Validate user-provided initial parameters.
117
+ return self._total_circuit_count
423
118
 
424
- Args:
425
- params (np.ndarray): Parameters to validate.
119
+ @property
120
+ def total_run_time(self) -> float:
121
+ """Get the total runtime across all circuit executions.
426
122
 
427
- Raises:
428
- ValueError: If parameters have incorrect shape.
123
+ Returns:
124
+ float: Cumulative execution time in seconds.
429
125
  """
430
- expected_shape = self.get_expected_param_shape()
431
-
432
- if params.shape != expected_shape:
433
- raise ValueError(
434
- f"Initial parameters must have shape {expected_shape}, "
435
- f"got {params.shape}"
436
- )
126
+ return self._total_run_time
437
127
 
438
- def _initialize_params(self):
439
- """
440
- Initialize the circuit parameters randomly.
128
+ def _prepare_and_send_circuits(self, **kwargs):
129
+ """Prepare circuits for execution and submit them to the backend.
441
130
 
442
- Generates random parameters with values uniformly distributed between
443
- 0 and 2π. The number of parameter sets depends on the optimizer being used.
131
+ Returns:
132
+ Backend output from circuit submission.
444
133
  """
445
- total_params = self.n_layers * self.n_params
446
- self._curr_params = self._rng.uniform(
447
- 0, 2 * np.pi, (self.optimizer.n_param_sets, total_params)
448
- )
449
-
450
- def _run_optimization_circuits(self, store_data, data_file):
451
- self._circuits[:] = []
452
-
453
- self._generate_circuits()
454
-
455
- losses = self._dispatch_circuits_and_process_results(
456
- store_data=store_data, data_file=data_file
457
- )
458
-
459
- return losses
460
-
461
- def _prepare_and_send_circuits(self):
462
134
  job_circuits = {}
463
135
 
464
- for circuit in self._circuits:
136
+ for circuit in self._curr_circuits:
465
137
  for tag, qasm_circuit in zip(circuit.tags, circuit.qasm_circuits):
466
138
  job_circuits[tag] = qasm_circuit
467
139
 
468
140
  self._total_circuit_count += len(job_circuits)
469
141
 
470
- backend_output = self.backend.submit_circuits(job_circuits)
142
+ backend_output = self.backend.submit_circuits(job_circuits, **kwargs)
471
143
 
472
- if isinstance(self.backend, QoroService):
144
+ if self.backend.is_async:
473
145
  self._curr_service_job_id = backend_output
474
146
 
475
147
  return backend_output
476
148
 
477
- def _dispatch_circuits_and_process_results(self, store_data=False, data_file=None):
478
- """
479
- Run an iteration of the program. The outputs are stored in the Program object.
480
- Optionally, the data can be stored in a file.
149
+ def _track_runtime(self, response):
150
+ """Extract and track runtime from a backend response.
481
151
 
482
152
  Args:
483
- store_data (bool): Whether to store the data for the iteration
484
- data_file (str): The file to store the data in
153
+ response: Backend response containing runtime information.
154
+ Can be a dict or a list of responses.
485
155
  """
156
+ if isinstance(response, dict):
157
+ self._total_run_time += float(response["run_time"])
158
+ elif isinstance(response, list):
159
+ self._total_run_time += sum(float(r.json()["run_time"]) for r in response)
486
160
 
487
- results = self._prepare_and_send_circuits()
161
+ def _wait_for_qoro_job_completion(self, job_id: str) -> list[dict]:
162
+ """Wait for a QoroService job to complete and return results.
488
163
 
489
- def add_run_time(response):
490
- if isinstance(response, dict):
491
- self._total_run_time += float(response["run_time"])
492
- elif isinstance(response, list):
493
- self._total_run_time += sum(
494
- float(r.json()["run_time"]) for r in response
495
- )
164
+ Args:
165
+ job_id: The QoroService job identifier.
496
166
 
497
- if isinstance(self.backend, QoroService):
167
+ Returns:
168
+ list[dict]: The job results from the backend.
169
+
170
+ Raises:
171
+ Exception: If job fails or doesn't complete.
172
+ """
173
+ # Build the poll callback if reporter is available
174
+ if hasattr(self, "reporter"):
498
175
  update_function = lambda n_polls, status: self.reporter.info(
499
176
  message="",
500
177
  poll_attempt=n_polls,
501
178
  max_retries=self.backend.max_retries,
502
- service_job_id=self._curr_service_job_id,
179
+ service_job_id=job_id,
503
180
  job_status=status,
504
181
  )
182
+ else:
183
+ update_function = None
184
+
185
+ # Poll until complete
186
+ status = self.backend.poll_job_status(
187
+ job_id,
188
+ loop_until_complete=True,
189
+ on_complete=self._track_runtime,
190
+ verbose=False, # Disable the default logger in QoroService
191
+ poll_callback=update_function,
192
+ )
505
193
 
506
- status = self.backend.poll_job_status(
507
- self._curr_service_job_id,
508
- loop_until_complete=True,
509
- on_complete=add_run_time,
510
- verbose=False, # Disable the default logger in QoroService
511
- poll_callback=update_function, # Use the new, more generic name
512
- )
513
-
514
- if status != JobStatus.COMPLETED:
515
- raise Exception(
516
- "Job has not completed yet, cannot post-process results"
517
- )
518
-
519
- results = self.backend.get_job_results(self._curr_service_job_id)
520
-
521
- results = {r["label"]: r["results"] for r in results}
522
-
523
- result = self._post_process_results(results)
524
-
525
- if store_data:
526
- self.save_iteration(data_file)
194
+ if status != JobStatus.COMPLETED:
195
+ raise Exception("Job has not completed yet, cannot post-process results")
196
+ return self.backend.get_job_results(job_id)
527
197
 
528
- return result
198
+ def _dispatch_circuits_and_process_results(
199
+ self, data_file: str | None = None, **kwargs
200
+ ):
201
+ """Run an iteration of the program.
529
202
 
530
- def _post_process_results(
531
- self, results: dict[str, dict[str, int]]
532
- ) -> dict[int, float]:
533
- """
534
- Post-process the results of the quantum problem.
203
+ The outputs are stored in the Program object.
204
+ Optionally, the data can be stored in a file.
535
205
 
536
206
  Args:
537
- results (dict): The shot histograms of the quantum execution step.
538
- The keys should be strings of format {param_id}_*_{measurement_group_id}.
539
- i.e. An underscore-separated bunch of metadata, starting always with
540
- the index of some parameter and ending with the index of some measurement group.
541
- Any extra piece of metadata that might be relevant to the specific application can
542
- be kept in the middle.
207
+ data_file (str | None): The file to store the data in. If None, no data is stored. Defaults to None.
208
+ **kwargs: Additional keyword arguments for circuit submission and result processing.
543
209
 
544
210
  Returns:
545
- (dict) The energies for each parameter set grouping, where the dict keys
546
- correspond to the parameter indices.
547
- """
548
- if not (self._cancellation_event and self._cancellation_event.is_set()):
549
- self.reporter.info(
550
- message="Post-processing output", iteration=self.current_iteration
551
- )
552
-
553
- losses = {}
554
- measurement_groups = self._meta_circuits["cost_circuit"].measurement_groups
555
-
556
- # Define key functions for both levels of grouping
557
- get_param_id = lambda item: int(item[0].split("_")[0])
558
- get_qem_id = lambda item: int(item[0].split("_")[1].split(":")[1])
559
-
560
- # Group the pre-sorted results by parameter ID.
561
- for p, param_group_iterator in groupby(results.items(), key=get_param_id):
562
- param_group_iterator = list(param_group_iterator)
563
-
564
- shots_by_qem_idx = zip(
565
- *{
566
- gid: [value for _, value in group]
567
- for gid, group in groupby(param_group_iterator, key=get_qem_id)
568
- }.values()
569
- )
570
-
571
- marginal_results = []
572
- for shots_dicts, curr_measurement_group in zip(
573
- shots_by_qem_idx, measurement_groups
574
- ):
575
- if hasattr(self, "cost_hamiltonian"):
576
- wire_order = tuple(reversed(self.cost_hamiltonian.wires))
577
- else:
578
- wire_order = tuple(
579
- reversed(range(len(next(iter(shots_dicts[0].keys())))))
580
- )
581
-
582
- expectation_matrix = _batched_expectation(
583
- shots_dicts, curr_measurement_group, wire_order
584
- )
585
-
586
- # expectation_matrix[i, j] = expectation value for observable i, histogram j
587
- curr_marginal_results = []
588
- for obs_idx in range(len(curr_measurement_group)):
589
- intermediate_exp_values = expectation_matrix[obs_idx, :]
590
- mitigated_exp_value = self._qem_protocol.postprocess_results(
591
- intermediate_exp_values
592
- )
593
- curr_marginal_results.append(mitigated_exp_value)
594
-
595
- marginal_results.append(
596
- curr_marginal_results
597
- if len(curr_marginal_results) > 1
598
- else curr_marginal_results[0]
599
- )
600
-
601
- pl_loss = (
602
- self._meta_circuits["cost_circuit"]
603
- .postprocessing_fn(marginal_results)[0]
604
- .item()
605
- )
606
-
607
- losses[p] = pl_loss + self.loss_constant
608
-
609
- return losses
610
-
611
- def _perform_final_computation(self):
612
- """
613
- Perform final computations after optimization completes.
614
-
615
- This is an optional hook method that subclasses can override to perform
616
- any post-optimization processing, such as extracting solutions, running
617
- final measurements, or computing additional metrics.
618
-
619
- Note:
620
- The default implementation does nothing. Subclasses should override
621
- this method if they need post-optimization processing.
622
- """
623
- pass
624
-
625
- def run(self, store_data=False, data_file=None):
626
- """
627
- Run the QAOA problem. The outputs are stored in the QAOA object. Optionally, the data can be stored in a file.
628
-
629
- Args:
630
- store_data (bool): Whether to store the data for the iteration
631
- data_file (str): The file to store the data in
211
+ Any: Processed results from _post_process_results.
632
212
  """
213
+ results = self._prepare_and_send_circuits(**kwargs)
633
214
 
634
- def cost_fn(params):
635
- self.reporter.info(
636
- message="💸 Computing Cost 💸", iteration=self.current_iteration
637
- )
638
-
639
- self._curr_params = np.atleast_2d(params)
640
-
641
- losses = self._run_optimization_circuits(store_data, data_file)
642
-
643
- losses = np.fromiter(losses.values(), dtype=np.float64)
644
-
645
- if params.ndim > 1:
646
- return losses
647
- else:
648
- return losses.item()
215
+ if self.backend.is_async:
216
+ results = self._wait_for_qoro_job_completion(self._curr_service_job_id)
649
217
 
650
- self._grad_shift_mask = _compute_parameter_shift_mask(
651
- self.n_layers * self.n_params
652
- )
653
-
654
- def grad_fn(params):
655
- self._grad_mode = True
656
-
657
- self.reporter.info(
658
- message="📈 Computing Gradients 📈", iteration=self.current_iteration
659
- )
660
-
661
- self._curr_params = self._grad_shift_mask + params
662
-
663
- exp_vals = self._run_optimization_circuits(store_data, data_file)
664
- exp_vals_arr = np.fromiter(exp_vals.values(), dtype=np.float64)
665
-
666
- pos_shifts = exp_vals_arr[::2]
667
- neg_shifts = exp_vals_arr[1::2]
668
- grads = 0.5 * (pos_shifts - neg_shifts)
669
-
670
- self._grad_mode = False
671
-
672
- return grads
673
-
674
- def _iteration_counter(intermediate_result: OptimizeResult):
675
-
676
- self._losses.append(
677
- dict(
678
- zip(
679
- range(len(intermediate_result.x)),
680
- np.atleast_1d(intermediate_result.fun),
681
- )
682
- )
683
- )
684
-
685
- self.current_iteration += 1
686
-
687
- self.reporter.update(iteration=self.current_iteration)
688
-
689
- if self._cancellation_event and self._cancellation_event.is_set():
690
- raise _CancelledError("Cancellation requested by batch.")
691
-
692
- if (
693
- isinstance(self.optimizer, ScipyOptimizer)
694
- and self.optimizer.method == ScipyMethod.COBYLA
695
- and intermediate_result.nit + 1 == self.max_iterations
696
- ):
697
- raise StopIteration
698
-
699
- self.reporter.info(message="Finished Setup")
700
-
701
- self._initialize_params()
702
-
703
- try:
704
- self._minimize_res = self.optimizer.optimize(
705
- cost_fn=cost_fn,
706
- initial_params=self._curr_params,
707
- callback_fn=_iteration_counter,
708
- jac=grad_fn,
709
- maxiter=self.max_iterations,
710
- rng=self._rng,
711
- )
712
- except _CancelledError:
713
- # The optimizer was stopped by our callback. This is not a real
714
- # error, just a signal to exit this task cleanly.
715
- return self._total_circuit_count, self._total_run_time
716
-
717
- self._final_params[:] = np.atleast_2d(self._minimize_res.x)
218
+ results = {r["label"]: r["results"] for r in results}
718
219
 
719
- self._perform_final_computation()
220
+ result = self._post_process_results(results, **kwargs)
720
221
 
721
- self.reporter.info(message="Finished successfully!")
222
+ if data_file is not None:
223
+ self.save_iteration(data_file)
722
224
 
723
- return self._total_circuit_count, self._total_run_time
225
+ return result
724
226
 
725
- def save_iteration(self, data_file):
726
- """
727
- Save the current state of the quantum program to a file.
227
+ def save_iteration(self, data_file: str):
228
+ """Save the current state of the quantum program to a file.
728
229
 
729
230
  Serializes the entire QuantumProgram instance including parameters,
730
231
  losses, and circuit history using pickle.
@@ -736,14 +237,12 @@ class QuantumProgram(ABC):
736
237
  The file is written in binary mode and can be restored using
737
238
  `import_iteration()`.
738
239
  """
739
-
740
240
  with open(data_file, "wb") as f:
741
241
  pickle.dump(self, f)
742
242
 
743
243
  @staticmethod
744
- def import_iteration(data_file):
745
- """
746
- Load a previously saved quantum program state from a file.
244
+ def import_iteration(data_file: str):
245
+ """Load a previously saved quantum program state from a file.
747
246
 
748
247
  Deserializes a QuantumProgram instance that was saved using `save_iteration()`.
749
248
 
@@ -754,6 +253,5 @@ class QuantumProgram(ABC):
754
253
  QuantumProgram: The restored QuantumProgram instance with all its state,
755
254
  including parameters, losses, and circuit history.
756
255
  """
757
-
758
256
  with open(data_file, "rb") as f:
759
257
  return pickle.load(f)