qoro-divi 0.2.0b1__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. divi/__init__.py +1 -2
  2. divi/backends/__init__.py +9 -0
  3. divi/backends/_circuit_runner.py +70 -0
  4. divi/backends/_execution_result.py +70 -0
  5. divi/backends/_parallel_simulator.py +486 -0
  6. divi/backends/_qoro_service.py +663 -0
  7. divi/backends/_qpu_system.py +101 -0
  8. divi/backends/_results_processing.py +133 -0
  9. divi/circuits/__init__.py +8 -0
  10. divi/{exp/cirq → circuits/_cirq}/__init__.py +1 -2
  11. divi/circuits/_cirq/_parser.py +110 -0
  12. divi/circuits/_cirq/_qasm_export.py +78 -0
  13. divi/circuits/_core.py +369 -0
  14. divi/{qasm.py → circuits/_qasm_conversion.py} +73 -14
  15. divi/circuits/_qasm_validation.py +694 -0
  16. divi/qprog/__init__.py +24 -6
  17. divi/qprog/_expectation.py +181 -0
  18. divi/qprog/_hamiltonians.py +281 -0
  19. divi/qprog/algorithms/__init__.py +14 -0
  20. divi/qprog/algorithms/_ansatze.py +356 -0
  21. divi/qprog/algorithms/_qaoa.py +572 -0
  22. divi/qprog/algorithms/_vqe.py +249 -0
  23. divi/qprog/batch.py +383 -73
  24. divi/qprog/checkpointing.py +556 -0
  25. divi/qprog/exceptions.py +9 -0
  26. divi/qprog/optimizers.py +1014 -43
  27. divi/qprog/quantum_program.py +231 -413
  28. divi/qprog/variational_quantum_algorithm.py +995 -0
  29. divi/qprog/workflows/__init__.py +10 -0
  30. divi/qprog/{_graph_partitioning.py → workflows/_graph_partitioning.py} +139 -95
  31. divi/qprog/workflows/_qubo_partitioning.py +220 -0
  32. divi/qprog/workflows/_vqe_sweep.py +560 -0
  33. divi/reporting/__init__.py +7 -0
  34. divi/reporting/_pbar.py +127 -0
  35. divi/reporting/_qlogger.py +68 -0
  36. divi/reporting/_reporter.py +133 -0
  37. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.5.0.dist-info}/METADATA +43 -15
  38. qoro_divi-0.5.0.dist-info/RECORD +43 -0
  39. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.5.0.dist-info}/WHEEL +1 -1
  40. qoro_divi-0.5.0.dist-info/licenses/LICENSES/.license-header +3 -0
  41. divi/_pbar.py +0 -73
  42. divi/circuits.py +0 -139
  43. divi/exp/cirq/_lexer.py +0 -126
  44. divi/exp/cirq/_parser.py +0 -889
  45. divi/exp/cirq/_qasm_export.py +0 -37
  46. divi/exp/cirq/_qasm_import.py +0 -35
  47. divi/exp/cirq/exception.py +0 -21
  48. divi/exp/scipy/_cobyla.py +0 -342
  49. divi/exp/scipy/pyprima/LICENCE.txt +0 -28
  50. divi/exp/scipy/pyprima/__init__.py +0 -263
  51. divi/exp/scipy/pyprima/cobyla/__init__.py +0 -0
  52. divi/exp/scipy/pyprima/cobyla/cobyla.py +0 -599
  53. divi/exp/scipy/pyprima/cobyla/cobylb.py +0 -849
  54. divi/exp/scipy/pyprima/cobyla/geometry.py +0 -240
  55. divi/exp/scipy/pyprima/cobyla/initialize.py +0 -269
  56. divi/exp/scipy/pyprima/cobyla/trustregion.py +0 -540
  57. divi/exp/scipy/pyprima/cobyla/update.py +0 -331
  58. divi/exp/scipy/pyprima/common/__init__.py +0 -0
  59. divi/exp/scipy/pyprima/common/_bounds.py +0 -41
  60. divi/exp/scipy/pyprima/common/_linear_constraints.py +0 -46
  61. divi/exp/scipy/pyprima/common/_nonlinear_constraints.py +0 -64
  62. divi/exp/scipy/pyprima/common/_project.py +0 -224
  63. divi/exp/scipy/pyprima/common/checkbreak.py +0 -107
  64. divi/exp/scipy/pyprima/common/consts.py +0 -48
  65. divi/exp/scipy/pyprima/common/evaluate.py +0 -101
  66. divi/exp/scipy/pyprima/common/history.py +0 -39
  67. divi/exp/scipy/pyprima/common/infos.py +0 -30
  68. divi/exp/scipy/pyprima/common/linalg.py +0 -452
  69. divi/exp/scipy/pyprima/common/message.py +0 -336
  70. divi/exp/scipy/pyprima/common/powalg.py +0 -131
  71. divi/exp/scipy/pyprima/common/preproc.py +0 -393
  72. divi/exp/scipy/pyprima/common/present.py +0 -5
  73. divi/exp/scipy/pyprima/common/ratio.py +0 -56
  74. divi/exp/scipy/pyprima/common/redrho.py +0 -49
  75. divi/exp/scipy/pyprima/common/selectx.py +0 -346
  76. divi/interfaces.py +0 -25
  77. divi/parallel_simulator.py +0 -258
  78. divi/qlogger.py +0 -119
  79. divi/qoro_service.py +0 -343
  80. divi/qprog/_mlae.py +0 -182
  81. divi/qprog/_qaoa.py +0 -440
  82. divi/qprog/_vqe.py +0 -275
  83. divi/qprog/_vqe_sweep.py +0 -144
  84. divi/utils.py +0 -116
  85. qoro_divi-0.2.0b1.dist-info/RECORD +0 -58
  86. /divi/{qem.py → circuits/qem.py} +0 -0
  87. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.5.0.dist-info/licenses}/LICENSE +0 -0
  88. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.5.0.dist-info/licenses}/LICENSES/Apache-2.0.txt +0 -0
@@ -0,0 +1,995 @@
1
+ # SPDX-FileCopyrightText: 2025 Qoro Quantum Ltd <divi@qoroquantum.de>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ import logging
6
+ import pickle
7
+ from abc import abstractmethod
8
+ from datetime import datetime
9
+ from functools import partial
10
+ from itertools import groupby
11
+ from pathlib import Path
12
+ from queue import Queue
13
+ from typing import Any
14
+ from warnings import warn
15
+
16
+ import numpy as np
17
+ import numpy.typing as npt
18
+ import pennylane as qml
19
+ from pydantic import BaseModel, ConfigDict, Field, field_serializer, field_validator
20
+ from scipy.optimize import OptimizeResult
21
+
22
+ from divi.backends import (
23
+ CircuitRunner,
24
+ convert_counts_to_probs,
25
+ reverse_dict_endianness,
26
+ )
27
+ from divi.circuits import CircuitBundle, MetaCircuit
28
+ from divi.circuits.qem import _NoMitigation
29
+ from divi.qprog._expectation import _batched_expectation
30
+ from divi.qprog._hamiltonians import convert_hamiltonian_to_pauli_string
31
+ from divi.qprog.checkpointing import (
32
+ PROGRAM_STATE_FILE,
33
+ CheckpointConfig,
34
+ _atomic_write,
35
+ _ensure_checkpoint_dir,
36
+ _get_checkpoint_subdir_path,
37
+ _load_and_validate_pydantic_model,
38
+ resolve_checkpoint_path,
39
+ )
40
+ from divi.qprog.exceptions import _CancelledError
41
+ from divi.qprog.optimizers import (
42
+ MonteCarloOptimizer,
43
+ Optimizer,
44
+ PymooOptimizer,
45
+ ScipyMethod,
46
+ ScipyOptimizer,
47
+ )
48
+ from divi.qprog.quantum_program import QuantumProgram
49
+
50
+ logger = logging.getLogger(__name__)
51
+
52
+
53
+ class SubclassState(BaseModel):
54
+ """Container for subclass-specific state."""
55
+
56
+ data: dict[str, Any] = Field(default_factory=dict)
57
+
58
+
59
+ class OptimizerConfig(BaseModel):
60
+ """Configuration for reconstructing an optimizer."""
61
+
62
+ type: str
63
+ config: dict[str, Any] = Field(default_factory=dict)
64
+
65
+
66
+ class ProgramState(BaseModel):
67
+ """Pydantic model for VariationalQuantumAlgorithm state."""
68
+
69
+ model_config = ConfigDict(from_attributes=True, populate_by_name=True)
70
+
71
+ # Metadata
72
+ program_type: str = Field(validation_alias="_serialized_program_type")
73
+ version: str = "1.0"
74
+ timestamp: str = Field(default_factory=lambda: datetime.now().isoformat())
75
+
76
+ # Core Algorithm State (mapped to private attributes)
77
+ current_iteration: int
78
+ max_iterations: int
79
+ losses_history: list[dict[str, float]] = Field(validation_alias="_losses_history")
80
+ best_loss: float = Field(validation_alias="_best_loss")
81
+ best_probs: dict[str, float] = Field(validation_alias="_best_probs")
82
+ total_circuit_count: int = Field(validation_alias="_total_circuit_count")
83
+ total_run_time: float = Field(validation_alias="_total_run_time")
84
+ seed: int | None = Field(validation_alias="_seed")
85
+ grouping_strategy: str = Field(validation_alias="_grouping_strategy")
86
+
87
+ # Arrays
88
+ curr_params: list[list[float]] | None = Field(
89
+ default=None, validation_alias="_curr_params"
90
+ )
91
+ best_params: list[float] | None = Field(
92
+ default=None, validation_alias="_best_params"
93
+ )
94
+ final_params: list[float] | None = Field(
95
+ default=None, validation_alias="_final_params"
96
+ )
97
+
98
+ # Complex State (mapped to new adapter properties)
99
+ rng_state_bytes: bytes | None = Field(
100
+ default=None, validation_alias="_serialized_rng_state"
101
+ )
102
+ optimizer_config: OptimizerConfig = Field(
103
+ validation_alias="_serialized_optimizer_config"
104
+ )
105
+ subclass_state: SubclassState = Field(validation_alias="_serialized_subclass_state")
106
+
107
+ @field_serializer("rng_state_bytes")
108
+ def serialize_bytes(self, v: bytes | None, _info):
109
+ return v.hex() if v is not None else None
110
+
111
+ @field_validator("rng_state_bytes", mode="before")
112
+ @classmethod
113
+ def validate_bytes(cls, v):
114
+ return bytes.fromhex(v) if isinstance(v, str) else v
115
+
116
+ @field_serializer("curr_params", "best_params", "final_params")
117
+ def serialize_arrays(self, v: npt.NDArray | list | None, _info):
118
+ if isinstance(v, np.ndarray):
119
+ return v.tolist()
120
+ return v
121
+
122
+ def restore(self, program: "VariationalQuantumAlgorithm") -> None:
123
+ """Apply this state object back to a program instance."""
124
+ # 1. Bulk restore standard attributes
125
+ for name, field in self.model_fields.items():
126
+ target_attr = field.validation_alias or name
127
+
128
+ # Skip adapter properties (they are read-only / calculated)
129
+ if target_attr.startswith("_serialized_"):
130
+ continue
131
+
132
+ val = getattr(self, name)
133
+
134
+ # Handle numpy conversion
135
+ if "params" in target_attr and val is not None:
136
+ val = np.array(val)
137
+
138
+ if hasattr(program, target_attr):
139
+ setattr(program, target_attr, val)
140
+
141
+ # 2. Restore complex state
142
+ if self.rng_state_bytes:
143
+ program._rng.bit_generator.state = pickle.loads(self.rng_state_bytes)
144
+
145
+ program._load_subclass_state(self.subclass_state.data)
146
+
147
+
148
+ def _compute_parameter_shift_mask(n_params: int) -> npt.NDArray[np.float64]:
149
+ """
150
+ Generate a binary matrix mask for the parameter shift rule.
151
+ This mask is used to determine the shifts to apply to each parameter
152
+ when computing gradients via the parameter shift rule in quantum algorithms.
153
+
154
+ Args:
155
+ n_params (int): The number of parameters in the quantum circuit.
156
+
157
+ Returns:
158
+ npt.NDArray[np.float64]: A (2 * n_params, n_params) matrix where each row encodes
159
+ the shift to apply to each parameter for a single evaluation.
160
+ The values are multiples of 0.5 * pi, with alternating signs.
161
+ """
162
+ mask_arr = np.arange(0, 2 * n_params, 2)
163
+ mask_arr[0] = 1
164
+
165
+ binary_matrix = ((mask_arr[:, np.newaxis] & (1 << np.arange(n_params))) > 0).astype(
166
+ np.float64
167
+ )
168
+
169
+ binary_matrix = binary_matrix.repeat(2, axis=0)
170
+ binary_matrix[1::2] *= -1
171
+ binary_matrix *= 0.5 * np.pi
172
+
173
+ return binary_matrix
174
+
175
+
176
+ class VariationalQuantumAlgorithm(QuantumProgram):
177
+ """Base class for variational quantum algorithms.
178
+
179
+ This class provides the foundation for implementing variational quantum
180
+ algorithms in Divi. It handles circuit execution, parameter optimization,
181
+ and result management for algorithms that optimize parameterized quantum
182
+ circuits to minimize cost functions.
183
+
184
+ Variational algorithms work by:
185
+ 1. Generating parameterized quantum circuits
186
+ 2. Executing circuits on quantum hardware/simulators
187
+ 3. Computing expectation values of cost Hamiltonians
188
+ 4. Using classical optimizers to update parameters
189
+ 5. Iterating until convergence
190
+
191
+ Attributes:
192
+ _losses_history (list[dict]): History of loss values during optimization.
193
+ _final_params (npt.NDArray[np.float64]): Final optimized parameters.
194
+ _best_params (npt.NDArray[np.float64]): Parameters that achieved the best loss.
195
+ _best_loss (float): Best loss achieved during optimization.
196
+ _circuits (list[Circuit]): Generated quantum circuits.
197
+ _total_circuit_count (int): Total number of circuits executed.
198
+ _total_run_time (float): Total execution time in seconds.
199
+ _curr_params (npt.NDArray[np.float64]): Current parameter values.
200
+ _seed (int | None): Random seed for parameter initialization.
201
+ _rng (np.random.Generator): Random number generator.
202
+ _grad_mode (bool): Whether currently computing gradients.
203
+ _grouping_strategy (str): Strategy for grouping quantum operations.
204
+ _qem_protocol (QEMProtocol): Quantum error mitigation protocol.
205
+ _cancellation_event (Event | None): Event for graceful termination.
206
+ _meta_circuit_factory (callable): Factory for creating MetaCircuit instances.
207
+ """
208
+
209
+ def __init__(
210
+ self,
211
+ backend: CircuitRunner,
212
+ optimizer: Optimizer | None = None,
213
+ seed: int | None = None,
214
+ progress_queue: Queue | None = None,
215
+ **kwargs,
216
+ ):
217
+ """Initialize the VariationalQuantumAlgorithm.
218
+
219
+ This constructor is specifically designed for hybrid quantum-classical
220
+ variational algorithms. The instance variables `n_layers` and `n_params`
221
+ must be set by subclasses, where:
222
+ - `n_layers` is the number of layers in the quantum circuit.
223
+ - `n_params` is the number of parameters per layer.
224
+
225
+ For exotic variational algorithms where these variables may not be applicable,
226
+ the `_initialize_params` method should be overridden to set the parameters.
227
+
228
+ Args:
229
+ backend (CircuitRunner): Quantum circuit execution backend.
230
+ optimizer (Optimizer | None): The optimizer to use for parameter optimization.
231
+ Defaults to MonteCarloOptimizer().
232
+ seed (int | None): Random seed for parameter initialization. Defaults to None.
233
+ progress_queue (Queue | None): Queue for progress reporting. Defaults to None.
234
+
235
+ Keyword Args:
236
+ initial_params (npt.NDArray[np.float64] | None): Initial parameters with shape
237
+ (n_param_sets, n_layers * n_params). If provided, these will be set as
238
+ the current parameters via the `curr_params` setter (which includes validation).
239
+ Defaults to None.
240
+ grouping_strategy (str): Strategy for grouping operations in Pennylane transforms.
241
+ Options: "default", "wires", "qwc". Defaults to "qwc".
242
+ qem_protocol (QEMProtocol | None): Quantum error mitigation protocol to apply. Defaults to None.
243
+ precision (int): Number of decimal places for parameter values in QASM conversion.
244
+ Defaults to 8.
245
+
246
+ Note: Higher precision values result in longer QASM strings, which increases
247
+ the amount of data sent to cloud backends. For most use cases, the default
248
+ precision of 8 decimal places provides sufficient accuracy while keeping
249
+ QASM sizes manageable. Consider reducing precision if you need to minimize
250
+ data transfer overhead, or increase it only if you require higher numerical
251
+ precision in your circuit parameters.
252
+ """
253
+
254
+ super().__init__(
255
+ backend=backend, seed=seed, progress_queue=progress_queue, **kwargs
256
+ )
257
+
258
+ # --- Optimization Results & History ---
259
+ self._losses_history = []
260
+ self._best_params = []
261
+ self._final_params = []
262
+ self._best_loss = float("inf")
263
+ self._best_probs = {}
264
+ self._curr_params = kwargs.pop("initial_params", None)
265
+
266
+ # --- Random Number Generation ---
267
+ self._seed = seed
268
+ self._rng = np.random.default_rng(self._seed)
269
+
270
+ # --- Computation Mode Flags ---
271
+ # Lets child classes adapt their optimization step for grad calculation routine
272
+ self._grad_mode = False
273
+ self._is_compute_probabilities = False
274
+
275
+ # --- Optimizer Configuration ---
276
+ self.optimizer = optimizer if optimizer is not None else MonteCarloOptimizer()
277
+
278
+ # --- Backend & Circuit Configuration ---
279
+ if backend and backend.supports_expval:
280
+ grouping_strategy = kwargs.pop("grouping_strategy", None)
281
+ if grouping_strategy is not None and grouping_strategy != "_backend_expval":
282
+ warn(
283
+ "Backend supports direct expectation value calculation, but a grouping_strategy was provided. "
284
+ "The grouping strategy will be ignored.",
285
+ UserWarning,
286
+ )
287
+ self._grouping_strategy = "_backend_expval"
288
+ else:
289
+ self._grouping_strategy = kwargs.pop("grouping_strategy", "qwc")
290
+
291
+ self._qem_protocol = kwargs.pop("qem_protocol", None) or _NoMitigation()
292
+ self._precision = kwargs.pop("precision", 8)
293
+
294
+ # --- Circuit Factory & Templates ---
295
+ self._meta_circuits = None
296
+ self._meta_circuit_factory = partial(
297
+ MetaCircuit,
298
+ # No grouping strategy for expectation value measurements
299
+ grouping_strategy=self._grouping_strategy,
300
+ qem_protocol=self._qem_protocol,
301
+ precision=self._precision,
302
+ )
303
+
304
+ # --- Control Flow ---
305
+ self._cancellation_event = None
306
+
307
+ @property
308
+ @abstractmethod
309
+ def cost_hamiltonian(self) -> qml.operation.Operator:
310
+ """The cost Hamiltonian for the variational problem."""
311
+ pass
312
+
313
+ @property
314
+ def total_circuit_count(self) -> int:
315
+ """Get the total number of circuits executed.
316
+
317
+ Returns:
318
+ int: Cumulative count of circuits submitted for execution.
319
+ """
320
+ return self._total_circuit_count
321
+
322
+ @property
323
+ def total_run_time(self) -> float:
324
+ """Get the total runtime across all circuit executions.
325
+
326
+ Returns:
327
+ float: Cumulative execution time in seconds.
328
+ """
329
+ return self._total_run_time
330
+
331
+ @property
332
+ def meta_circuits(self) -> dict[str, MetaCircuit]:
333
+ """Get the meta-circuit templates used by this program.
334
+
335
+ Returns:
336
+ dict[str, MetaCircuit]: Dictionary mapping circuit names to their
337
+ MetaCircuit templates.
338
+ """
339
+ return self._meta_circuits
340
+
341
+ @property
342
+ def n_params(self):
343
+ """Get the total number of parameters in the quantum circuit.
344
+
345
+ Returns:
346
+ int: Total number of trainable parameters (n_layers * n_params_per_layer).
347
+ """
348
+ return self._n_params
349
+
350
+ def _has_run_optimization(self) -> bool:
351
+ """Check if optimization has been run at least once.
352
+
353
+ Returns:
354
+ bool: True if optimization has been run, False otherwise.
355
+ """
356
+ return len(self._losses_history) > 0
357
+
358
+ @property
359
+ def losses_history(self) -> list[dict]:
360
+ """Get a copy of the optimization loss history.
361
+
362
+ Each entry is a dictionary mapping parameter indices to loss values.
363
+
364
+ Returns:
365
+ list[dict]: Copy of the loss history. Modifications to this list
366
+ will not affect the internal state.
367
+ """
368
+ if not self._has_run_optimization():
369
+ warn(
370
+ "losses_history is empty. Optimization has not been run yet. "
371
+ "Call run() to execute the optimization.",
372
+ UserWarning,
373
+ stacklevel=2,
374
+ )
375
+ return self._losses_history.copy()
376
+
377
+ @property
378
+ def min_losses_per_iteration(self) -> list[float]:
379
+ """Get the minimum loss value for each iteration.
380
+
381
+ Returns a list where each element is the minimum (best) loss value
382
+ across all parameter sets for that iteration.
383
+
384
+ Returns:
385
+ list[float]: List of minimum loss values, one per iteration.
386
+ """
387
+ if not self._has_run_optimization():
388
+ warn(
389
+ "min_losses_per_iteration is empty. Optimization has not been run yet. "
390
+ "Call run() to execute the optimization.",
391
+ UserWarning,
392
+ stacklevel=2,
393
+ )
394
+ return [min(loss_dict.values()) for loss_dict in self._losses_history]
395
+
396
+ @property
397
+ def final_params(self) -> npt.NDArray[np.float64]:
398
+ """Get a copy of the final optimized parameters.
399
+
400
+ Returns:
401
+ npt.NDArray[np.float64]: Copy of the final parameters. Modifications to this array
402
+ will not affect the internal state.
403
+ """
404
+ if len(self._final_params) == 0 or not self._has_run_optimization():
405
+ warn(
406
+ "final_params is not available. Optimization has not been run yet. "
407
+ "Call run() to execute the optimization.",
408
+ UserWarning,
409
+ stacklevel=2,
410
+ )
411
+ return self._final_params.copy()
412
+
413
+ @property
414
+ def best_params(self) -> npt.NDArray[np.float64]:
415
+ """Get a copy of the parameters that achieved the best (lowest) loss.
416
+
417
+ Returns:
418
+ npt.NDArray[np.float64]: Copy of the best parameters. Modifications to this array
419
+ will not affect the internal state.
420
+ """
421
+ if len(self._best_params) == 0 or not self._has_run_optimization():
422
+ warn(
423
+ "best_params is not available. Optimization has not been run yet. "
424
+ "Call run() to execute the optimization.",
425
+ UserWarning,
426
+ stacklevel=2,
427
+ )
428
+ return self._best_params.copy()
429
+
430
+ @property
431
+ def best_loss(self) -> float:
432
+ """Get the best loss achieved so far.
433
+
434
+ Returns:
435
+ float: The best loss achieved so far.
436
+ """
437
+ if not self._has_run_optimization():
438
+ warn(
439
+ "best_loss has not been computed yet. Optimization has not been run. "
440
+ "Call run() to execute the optimization.",
441
+ UserWarning,
442
+ stacklevel=2,
443
+ )
444
+ elif self._best_loss == float("inf"):
445
+ # Defensive check: if optimization ran but best_loss is still inf, something is wrong
446
+ raise RuntimeError(
447
+ "best_loss is still infinite after optimization. This indicates a problem "
448
+ "with the optimization process. The optimization callback may not have executed "
449
+ "correctly, or all computed losses were infinite."
450
+ )
451
+ return self._best_loss
452
+
453
+ @property
454
+ def best_probs(self):
455
+ """Get a copy of the probability distribution for the best parameters.
456
+
457
+ Returns:
458
+ dict: A copy of the best probability distribution.
459
+ """
460
+ if not self._best_probs:
461
+ warn(
462
+ "best_probs is empty. Either optimization has not been run yet, "
463
+ "or final computation was not performed. Call run() to execute the optimization.",
464
+ UserWarning,
465
+ stacklevel=2,
466
+ )
467
+ return self._best_probs.copy()
468
+
469
+ @property
470
+ def curr_params(self) -> npt.NDArray[np.float64]:
471
+ """Get the current parameters.
472
+
473
+ These are the parameters used for optimization. They can be accessed
474
+ and modified at any time, including during optimization.
475
+
476
+ Returns:
477
+ npt.NDArray[np.float64]: Current parameters. If not yet initialized,
478
+ they will be generated automatically.
479
+ """
480
+ if self._curr_params is None:
481
+ self._initialize_params()
482
+ return self._curr_params.copy()
483
+
484
+ @curr_params.setter
485
+ def curr_params(self, value: npt.NDArray[np.float64] | None):
486
+ """
487
+ Set the current parameters.
488
+
489
+ Args:
490
+ value (npt.NDArray[np.float64] | None): Parameters with shape
491
+ (n_param_sets, n_layers * n_params), or None to reset
492
+ to uninitialized state.
493
+
494
+ Raises:
495
+ ValueError: If parameters have incorrect shape.
496
+ """
497
+ if value is not None:
498
+ self._validate_initial_params(value)
499
+ self._curr_params = value.copy()
500
+ else:
501
+ # Reset to uninitialized state
502
+ self._curr_params = None
503
+
504
+ # --- Serialization Adapters (For Pydantic) ---
505
+ @property
506
+ def _serialized_program_type(self) -> str:
507
+ return type(self).__name__
508
+
509
+ @property
510
+ def _serialized_rng_state(self) -> bytes:
511
+ return pickle.dumps(self._rng.bit_generator.state)
512
+
513
+ @property
514
+ def _serialized_optimizer_config(self) -> OptimizerConfig:
515
+ config_dict = self.optimizer.get_config()
516
+ return OptimizerConfig(type=config_dict.pop("type"), config=config_dict)
517
+
518
+ @property
519
+ def _serialized_subclass_state(self) -> SubclassState:
520
+ return SubclassState(data=self._save_subclass_state())
521
+
522
+ @property
523
+ def meta_circuits(self) -> dict[str, MetaCircuit]:
524
+ """Get the meta-circuit templates used by this program.
525
+
526
+ Returns:
527
+ dict[str, MetaCircuit]: Dictionary mapping circuit names to their
528
+ MetaCircuit templates.
529
+ """
530
+ # Lazy initialization: each instance has its own _meta_circuits.
531
+ # Note: When used with ProgramBatch, meta_circuits is initialized sequentially
532
+ # in the main thread before parallel execution to avoid thread-safety issues.
533
+ if self._meta_circuits is None:
534
+ self._meta_circuits = self._create_meta_circuits_dict()
535
+ return self._meta_circuits
536
+
537
+ @abstractmethod
538
+ def _create_meta_circuits_dict(self) -> dict[str, MetaCircuit]:
539
+ pass
540
+
541
+ @abstractmethod
542
+ def _generate_circuits(self, **kwargs) -> list[CircuitBundle]:
543
+ """Generate quantum circuits for execution.
544
+
545
+ This method should generate and return a list of Circuit objects based on
546
+ the current algorithm state and parameters. The circuits will be executed
547
+ by the backend.
548
+
549
+ Args:
550
+ **kwargs: Additional keyword arguments for circuit generation.
551
+
552
+ Returns:
553
+ list[CircuitBundle]: List of Circuit objects to be executed.
554
+ """
555
+ pass
556
+
557
+ @abstractmethod
558
+ def _save_subclass_state(self) -> dict[str, Any]:
559
+ """Hook method for subclasses to save additional state.
560
+
561
+ Subclasses must override this method to return a dictionary of
562
+ state variables that should be included in the checkpoint.
563
+
564
+ Returns:
565
+ dict[str, Any]: Dictionary of subclass-specific state.
566
+ """
567
+ pass
568
+
569
+ @abstractmethod
570
+ def _load_subclass_state(self, state: dict[str, Any]) -> None:
571
+ """Hook method for subclasses to load additional state.
572
+
573
+ Subclasses must override this method to restore state variables
574
+ from the checkpoint dictionary. This is called after instance creation.
575
+
576
+ Args:
577
+ state (dict[str, Any]): Dictionary of subclass-specific state.
578
+ """
579
+ pass
580
+
581
+ def _get_optimizer_config(self) -> OptimizerConfig:
582
+ """Extract optimizer configuration for checkpoint reconstruction.
583
+
584
+ Returns:
585
+ OptimizerConfig: Configuration object for the current optimizer.
586
+
587
+ Raises:
588
+ NotImplementedError: If the optimizer does not support state saving.
589
+ """
590
+ config_dict = self.optimizer.get_config()
591
+ return OptimizerConfig(
592
+ type=config_dict.pop("type"),
593
+ config=config_dict,
594
+ )
595
+
596
+ def save_state(self, checkpoint_config: CheckpointConfig) -> str:
597
+ """Save the program state to a checkpoint directory."""
598
+ if self.current_iteration == 0 and len(self._losses_history) == 0:
599
+ raise RuntimeError("Cannot save checkpoint: optimization has not been run.")
600
+
601
+ if checkpoint_config.checkpoint_dir is None:
602
+ raise ValueError(
603
+ "checkpoint_config.checkpoint_dir must be a non-None Path."
604
+ )
605
+
606
+ main_dir = _ensure_checkpoint_dir(checkpoint_config.checkpoint_dir)
607
+ checkpoint_path = _get_checkpoint_subdir_path(main_dir, self.current_iteration)
608
+ checkpoint_path.mkdir(parents=True, exist_ok=True)
609
+
610
+ # 1. Save optimizer
611
+ self.optimizer.save_state(checkpoint_path)
612
+
613
+ # 2. Save Program State (Pydantic pulls data via validation_aliases)
614
+ state = ProgramState.model_validate(self)
615
+
616
+ state_file = checkpoint_path / PROGRAM_STATE_FILE
617
+ _atomic_write(state_file, state.model_dump_json(indent=2))
618
+
619
+ return checkpoint_path
620
+
621
+ @classmethod
622
+ def load_state(
623
+ cls,
624
+ checkpoint_dir: Path | str,
625
+ backend: CircuitRunner,
626
+ subdirectory: str | None = None,
627
+ **kwargs,
628
+ ) -> "VariationalQuantumAlgorithm":
629
+ """Load program state from a checkpoint directory."""
630
+ checkpoint_path = resolve_checkpoint_path(checkpoint_dir, subdirectory)
631
+ state_file = checkpoint_path / PROGRAM_STATE_FILE
632
+
633
+ # 1. Load Pydantic Model
634
+ state = _load_and_validate_pydantic_model(
635
+ state_file,
636
+ ProgramState,
637
+ required_fields=["program_type", "current_iteration"],
638
+ )
639
+
640
+ # 2. Reconstruct Optimizer
641
+ opt_config = state.optimizer_config
642
+ if opt_config.type == "MonteCarloOptimizer":
643
+ optimizer = MonteCarloOptimizer.load_state(checkpoint_path)
644
+ elif opt_config.type == "PymooOptimizer":
645
+ optimizer = PymooOptimizer.load_state(checkpoint_path)
646
+ else:
647
+ raise ValueError(f"Unsupported optimizer type: {opt_config.type}")
648
+
649
+ # 3. Create Instance
650
+ program = cls(backend=backend, optimizer=optimizer, seed=state.seed, **kwargs)
651
+
652
+ # 4. Restore State
653
+ state.restore(program)
654
+
655
+ return program
656
+
657
+ def get_expected_param_shape(self) -> tuple[int, int]:
658
+ """
659
+ Get the expected shape for initial parameters.
660
+
661
+ Returns:
662
+ tuple[int, int]: Shape (n_param_sets, n_layers * n_params) that
663
+ initial parameters should have for this quantum program.
664
+ """
665
+ return (self.optimizer.n_param_sets, self.n_layers * self.n_params)
666
+
667
+ def _validate_initial_params(self, params: npt.NDArray[np.float64]):
668
+ """
669
+ Validate user-provided initial parameters.
670
+
671
+ Args:
672
+ params (npt.NDArray[np.float64]): Parameters to validate.
673
+
674
+ Raises:
675
+ ValueError: If parameters have incorrect shape.
676
+ """
677
+ expected_shape = self.get_expected_param_shape()
678
+
679
+ if params.shape != expected_shape:
680
+ raise ValueError(
681
+ f"Initial parameters must have shape {expected_shape}, "
682
+ f"got {params.shape}"
683
+ )
684
+
685
+ def _initialize_params(self):
686
+ """
687
+ Initialize the circuit parameters randomly.
688
+
689
+ Generates random parameters with values uniformly distributed between
690
+ 0 and 2π. The number of parameter sets depends on the optimizer being used.
691
+ """
692
+ total_params = self.n_layers * self.n_params
693
+ self._curr_params = self._rng.uniform(
694
+ 0, 2 * np.pi, (self.optimizer.n_param_sets, total_params)
695
+ )
696
+
697
+ def _run_optimization_circuits(self, **kwargs) -> dict[int, float]:
698
+ self._curr_circuits = self._generate_circuits(**kwargs)
699
+
700
+ if self.backend.supports_expval:
701
+ kwargs["ham_ops"] = convert_hamiltonian_to_pauli_string(
702
+ self.cost_hamiltonian, self.n_qubits
703
+ )
704
+
705
+ losses = self._dispatch_circuits_and_process_results(**kwargs)
706
+
707
+ return losses
708
+
709
+ def _post_process_results(
710
+ self, results: dict[str, dict[str, int]], **kwargs
711
+ ) -> dict[int, float]:
712
+ """
713
+ Post-process the results of the quantum problem.
714
+
715
+ Args:
716
+ results (dict[str, dict[str, int]]): The shot histograms of the quantum execution step.
717
+ The keys should be strings of format {param_id}_*_{measurement_group_id}.
718
+ i.e. an underscore-separated bunch of metadata, starting always with
719
+ the index of some parameter and ending with the index of some measurement group.
720
+ Any extra piece of metadata that might be relevant to the specific
721
+ application can be kept in the middle.
722
+
723
+ Returns:
724
+ dict[int, float]: The energies for each parameter set grouping, where the dict keys
725
+ correspond to the parameter indices.
726
+ """
727
+ if self._is_compute_probabilities:
728
+ probs = convert_counts_to_probs(results, self.backend.shots)
729
+ return reverse_dict_endianness(probs)
730
+
731
+ if not (self._cancellation_event and self._cancellation_event.is_set()):
732
+ self.reporter.info(
733
+ message="Post-processing output", iteration=self.current_iteration
734
+ )
735
+
736
+ losses = {}
737
+ measurement_groups = self.meta_circuits["cost_circuit"].measurement_groups
738
+
739
+ # Define key functions for grouping
740
+ get_param_id = lambda item: int(item[0].split("_")[0])
741
+ get_qem_id = lambda item: int(item[0].split("_")[1].split(":")[1])
742
+
743
+ # Group the pre-sorted results by parameter ID.
744
+ for p, param_group_iterator in groupby(results.items(), key=get_param_id):
745
+ param_group_iterator = list(param_group_iterator)
746
+
747
+ # Group by QEM ID to handle error mitigation
748
+ qem_groups = {
749
+ gid: [value for _, value in group]
750
+ for gid, group in groupby(param_group_iterator, key=get_qem_id)
751
+ }
752
+
753
+ # Apply QEM protocol to expectation values (common for both backends)
754
+ apply_qem = lambda exp_matrix: [
755
+ self._qem_protocol.postprocess_results(exp_vals)
756
+ for exp_vals in exp_matrix
757
+ ]
758
+
759
+ if self.backend.supports_expval:
760
+ ham_ops = kwargs.get("ham_ops")
761
+ if ham_ops is None:
762
+ raise ValueError(
763
+ "Hamiltonian operators (ham_ops) are required when using a backend "
764
+ "that supports expectation values, but were not provided."
765
+ )
766
+ marginal_results = [
767
+ apply_qem(
768
+ np.array(
769
+ [
770
+ [shot_dict[op] for op in ham_ops.split(";")]
771
+ for shot_dict in shots_dicts
772
+ ]
773
+ ).T
774
+ )
775
+ for shots_dicts in sorted(qem_groups.values())
776
+ ] or []
777
+ else:
778
+ shots_by_qem_idx = zip(*qem_groups.values())
779
+ marginal_results = []
780
+ for shots_dicts, curr_measurement_group in zip(
781
+ shots_by_qem_idx, measurement_groups
782
+ ):
783
+ wire_order = tuple(reversed(self.cost_hamiltonian.wires))
784
+ exp_matrix = _batched_expectation(
785
+ shots_dicts, curr_measurement_group, wire_order
786
+ )
787
+ mitigated = apply_qem(exp_matrix)
788
+ marginal_results.append(
789
+ mitigated if len(mitigated) > 1 else mitigated[0]
790
+ )
791
+
792
+ pl_loss = (
793
+ self.meta_circuits["cost_circuit"]
794
+ .postprocessing_fn(marginal_results)
795
+ .item()
796
+ )
797
+
798
+ losses[p] = pl_loss + self.loss_constant
799
+
800
+ return losses
801
+
802
+ def _perform_final_computation(self, **kwargs) -> None:
803
+ """
804
+ Perform final computations after optimization is complete.
805
+
806
+ This is an optional hook method that subclasses can override to perform
807
+ any post-optimization processing, such as extracting solutions, running
808
+ final measurements, or computing additional metrics.
809
+
810
+ Args:
811
+ **kwargs: Additional keyword arguments for subclasses.
812
+
813
+ Note:
814
+ The default implementation does nothing. Subclasses should override
815
+ this method if they need post-optimization processing.
816
+ """
817
+ pass
818
+
819
+ def run(
820
+ self,
821
+ perform_final_computation: bool = True,
822
+ checkpoint_config: CheckpointConfig | None = None,
823
+ **kwargs,
824
+ ) -> tuple[int, float]:
825
+ """Run the variational quantum algorithm.
826
+
827
+ The outputs are stored in the algorithm object.
828
+
829
+ Args:
830
+ perform_final_computation (bool): Whether to perform final computation after optimization completes.
831
+ Typically, this step involves sampling with the best found parameters to extract
832
+ solution probability distributions. Set this to False in warm-starting or pre-training
833
+ routines where the final sampling step is not needed. Defaults to True.
834
+ checkpoint_config (CheckpointConfig | None): Checkpoint configuration.
835
+ If None, no checkpointing is performed.
836
+ **kwargs: Additional keyword arguments for subclasses.
837
+
838
+ Returns:
839
+ tuple[int, float]: A tuple containing (total_circuit_count, total_run_time).
840
+ """
841
+ # Initialize checkpointing
842
+ if checkpoint_config is None:
843
+ checkpoint_config = CheckpointConfig()
844
+
845
+ if checkpoint_config.checkpoint_dir:
846
+ logger.info(
847
+ f"Using checkpoint directory: {checkpoint_config.checkpoint_dir}"
848
+ )
849
+
850
+ # Extract max_iterations from kwargs if present (for compatibility with subclasses)
851
+ max_iterations = kwargs.pop("max_iterations", self.max_iterations)
852
+ if max_iterations != self.max_iterations:
853
+ self.max_iterations = max_iterations
854
+
855
+ # Warn if max_iterations is less than current_iteration (regardless of how it was set)
856
+ if self.max_iterations < self.current_iteration:
857
+ warn(
858
+ f"max_iterations ({self.max_iterations}) is less than current_iteration "
859
+ f"({self.current_iteration}). The optimization will not run additional "
860
+ f"iterations since the maximum has already been reached.",
861
+ UserWarning,
862
+ )
863
+
864
+ def cost_fn(params):
865
+ self.reporter.info(
866
+ message="💸 Computing Cost 💸", iteration=self.current_iteration
867
+ )
868
+
869
+ self._curr_params = np.atleast_2d(params)
870
+
871
+ losses = self._run_optimization_circuits(**kwargs)
872
+
873
+ losses = np.fromiter(losses.values(), dtype=np.float64)
874
+
875
+ if params.ndim > 1:
876
+ return losses
877
+ else:
878
+ return losses.item()
879
+
880
+ self._grad_shift_mask = _compute_parameter_shift_mask(
881
+ self.n_layers * self.n_params
882
+ )
883
+
884
+ def grad_fn(params):
885
+ self._grad_mode = True
886
+
887
+ self.reporter.info(
888
+ message="📈 Computing Gradients 📈", iteration=self.current_iteration
889
+ )
890
+
891
+ self._curr_params = self._grad_shift_mask + params
892
+
893
+ exp_vals = self._run_optimization_circuits(**kwargs)
894
+ exp_vals_arr = np.fromiter(exp_vals.values(), dtype=np.float64)
895
+
896
+ pos_shifts = exp_vals_arr[::2]
897
+ neg_shifts = exp_vals_arr[1::2]
898
+ grads = 0.5 * (pos_shifts - neg_shifts)
899
+
900
+ self._grad_mode = False
901
+
902
+ return grads
903
+
904
+ def _iteration_counter(intermediate_result: OptimizeResult):
905
+
906
+ self._losses_history.append(
907
+ dict(
908
+ zip(
909
+ [str(i) for i in range(len(intermediate_result.x))],
910
+ intermediate_result.fun,
911
+ )
912
+ )
913
+ )
914
+
915
+ current_loss = np.min(intermediate_result.fun)
916
+ if current_loss < self._best_loss:
917
+ self._best_loss = current_loss
918
+ best_idx = np.argmin(intermediate_result.fun)
919
+
920
+ self._best_params = intermediate_result.x[best_idx].copy()
921
+
922
+ self.current_iteration += 1
923
+
924
+ self.reporter.update(iteration=self.current_iteration)
925
+
926
+ # Checkpointing
927
+ if checkpoint_config._should_checkpoint(self.current_iteration):
928
+ self.save_state(checkpoint_config)
929
+
930
+ if self._cancellation_event and self._cancellation_event.is_set():
931
+ raise _CancelledError("Cancellation requested by batch.")
932
+
933
+ # The scipy implementation of COBYLA interprets the `maxiter` option
934
+ # as the maximum number of function evaluations, not iterations.
935
+ # To provide a consistent user experience, we disable `scipy`'s
936
+ # `maxiter` and manually stop the optimization from the callback
937
+ # when the desired number of iterations is reached.
938
+ if (
939
+ isinstance(self.optimizer, ScipyOptimizer)
940
+ and self.optimizer.method == ScipyMethod.COBYLA
941
+ and intermediate_result.nit + 1 == self.max_iterations
942
+ ):
943
+ raise StopIteration
944
+
945
+ self.reporter.info(message="Finished Setup")
946
+
947
+ if self._curr_params is None:
948
+ self._initialize_params()
949
+ else:
950
+ self._validate_initial_params(self._curr_params)
951
+
952
+ try:
953
+ self._minimize_res = self.optimizer.optimize(
954
+ cost_fn=cost_fn,
955
+ initial_params=self._curr_params,
956
+ callback_fn=_iteration_counter,
957
+ jac=grad_fn,
958
+ max_iterations=self.max_iterations,
959
+ rng=self._rng,
960
+ )
961
+ except _CancelledError:
962
+ # The optimizer was stopped by our callback. This is not a real
963
+ # error, just a signal to exit this task cleanly.
964
+ return self._total_circuit_count, self._total_run_time
965
+
966
+ self._final_params = self._minimize_res.x
967
+
968
+ if perform_final_computation:
969
+ self._perform_final_computation(**kwargs)
970
+
971
+ self.reporter.info(message="Finished successfully!")
972
+
973
+ return self.total_circuit_count, self.total_run_time
974
+
975
+ def _run_solution_measurement(self) -> None:
976
+ """Execute measurement circuits to obtain probability distributions for solution extraction."""
977
+ if self._best_params is None:
978
+ raise RuntimeError(
979
+ "Optimization has not been run, no best parameters available."
980
+ )
981
+
982
+ if "meas_circuit" not in self.meta_circuits:
983
+ raise NotImplementedError(
984
+ f"{type(self).__name__} does not implement a 'meas_circuit'."
985
+ )
986
+
987
+ self._is_compute_probabilities = True
988
+
989
+ # Compute probabilities for best parameters (the ones that achieved best loss)
990
+ self._curr_params = np.atleast_2d(self._best_params)
991
+ self._curr_circuits = self._generate_circuits()
992
+ best_probs = self._dispatch_circuits_and_process_results()
993
+ self._best_probs.update(best_probs)
994
+
995
+ self._is_compute_probabilities = False