qoro-divi 0.2.0b1__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- divi/__init__.py +1 -2
- divi/backends/__init__.py +10 -0
- divi/backends/_backend_properties_conversion.py +227 -0
- divi/backends/_circuit_runner.py +70 -0
- divi/backends/_execution_result.py +70 -0
- divi/backends/_parallel_simulator.py +486 -0
- divi/backends/_qoro_service.py +663 -0
- divi/backends/_qpu_system.py +101 -0
- divi/backends/_results_processing.py +133 -0
- divi/circuits/__init__.py +13 -0
- divi/{exp/cirq → circuits/_cirq}/__init__.py +1 -2
- divi/circuits/_cirq/_parser.py +110 -0
- divi/circuits/_cirq/_qasm_export.py +78 -0
- divi/circuits/_core.py +391 -0
- divi/{qasm.py → circuits/_qasm_conversion.py} +73 -14
- divi/circuits/_qasm_validation.py +694 -0
- divi/qprog/__init__.py +27 -8
- divi/qprog/_expectation.py +181 -0
- divi/qprog/_hamiltonians.py +281 -0
- divi/qprog/algorithms/__init__.py +16 -0
- divi/qprog/algorithms/_ansatze.py +368 -0
- divi/qprog/algorithms/_custom_vqa.py +263 -0
- divi/qprog/algorithms/_pce.py +262 -0
- divi/qprog/algorithms/_qaoa.py +579 -0
- divi/qprog/algorithms/_vqe.py +262 -0
- divi/qprog/batch.py +387 -74
- divi/qprog/checkpointing.py +556 -0
- divi/qprog/exceptions.py +9 -0
- divi/qprog/optimizers.py +1014 -43
- divi/qprog/quantum_program.py +243 -412
- divi/qprog/typing.py +62 -0
- divi/qprog/variational_quantum_algorithm.py +1208 -0
- divi/qprog/workflows/__init__.py +10 -0
- divi/qprog/{_graph_partitioning.py → workflows/_graph_partitioning.py} +139 -95
- divi/qprog/workflows/_qubo_partitioning.py +221 -0
- divi/qprog/workflows/_vqe_sweep.py +560 -0
- divi/reporting/__init__.py +7 -0
- divi/reporting/_pbar.py +127 -0
- divi/reporting/_qlogger.py +68 -0
- divi/reporting/_reporter.py +155 -0
- {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info}/METADATA +43 -15
- qoro_divi-0.6.0.dist-info/RECORD +47 -0
- {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info}/WHEEL +1 -1
- qoro_divi-0.6.0.dist-info/licenses/LICENSES/.license-header +3 -0
- divi/_pbar.py +0 -73
- divi/circuits.py +0 -139
- divi/exp/cirq/_lexer.py +0 -126
- divi/exp/cirq/_parser.py +0 -889
- divi/exp/cirq/_qasm_export.py +0 -37
- divi/exp/cirq/_qasm_import.py +0 -35
- divi/exp/cirq/exception.py +0 -21
- divi/exp/scipy/_cobyla.py +0 -342
- divi/exp/scipy/pyprima/LICENCE.txt +0 -28
- divi/exp/scipy/pyprima/__init__.py +0 -263
- divi/exp/scipy/pyprima/cobyla/__init__.py +0 -0
- divi/exp/scipy/pyprima/cobyla/cobyla.py +0 -599
- divi/exp/scipy/pyprima/cobyla/cobylb.py +0 -849
- divi/exp/scipy/pyprima/cobyla/geometry.py +0 -240
- divi/exp/scipy/pyprima/cobyla/initialize.py +0 -269
- divi/exp/scipy/pyprima/cobyla/trustregion.py +0 -540
- divi/exp/scipy/pyprima/cobyla/update.py +0 -331
- divi/exp/scipy/pyprima/common/__init__.py +0 -0
- divi/exp/scipy/pyprima/common/_bounds.py +0 -41
- divi/exp/scipy/pyprima/common/_linear_constraints.py +0 -46
- divi/exp/scipy/pyprima/common/_nonlinear_constraints.py +0 -64
- divi/exp/scipy/pyprima/common/_project.py +0 -224
- divi/exp/scipy/pyprima/common/checkbreak.py +0 -107
- divi/exp/scipy/pyprima/common/consts.py +0 -48
- divi/exp/scipy/pyprima/common/evaluate.py +0 -101
- divi/exp/scipy/pyprima/common/history.py +0 -39
- divi/exp/scipy/pyprima/common/infos.py +0 -30
- divi/exp/scipy/pyprima/common/linalg.py +0 -452
- divi/exp/scipy/pyprima/common/message.py +0 -336
- divi/exp/scipy/pyprima/common/powalg.py +0 -131
- divi/exp/scipy/pyprima/common/preproc.py +0 -393
- divi/exp/scipy/pyprima/common/present.py +0 -5
- divi/exp/scipy/pyprima/common/ratio.py +0 -56
- divi/exp/scipy/pyprima/common/redrho.py +0 -49
- divi/exp/scipy/pyprima/common/selectx.py +0 -346
- divi/interfaces.py +0 -25
- divi/parallel_simulator.py +0 -258
- divi/qlogger.py +0 -119
- divi/qoro_service.py +0 -343
- divi/qprog/_mlae.py +0 -182
- divi/qprog/_qaoa.py +0 -440
- divi/qprog/_vqe.py +0 -275
- divi/qprog/_vqe_sweep.py +0 -144
- divi/utils.py +0 -116
- qoro_divi-0.2.0b1.dist-info/RECORD +0 -58
- /divi/{qem.py → circuits/qem.py} +0 -0
- {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info/licenses}/LICENSE +0 -0
- {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info/licenses}/LICENSES/Apache-2.0.txt +0 -0
|
@@ -0,0 +1,1208 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: 2025-2026 Qoro Quantum Ltd <divi@qoroquantum.de>
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import pickle
|
|
7
|
+
from abc import abstractmethod
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from functools import partial
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from queue import Queue
|
|
12
|
+
from typing import Any, NamedTuple
|
|
13
|
+
from warnings import warn
|
|
14
|
+
|
|
15
|
+
import numpy as np
|
|
16
|
+
import numpy.typing as npt
|
|
17
|
+
import pennylane as qml
|
|
18
|
+
from pydantic import BaseModel, ConfigDict, Field, field_serializer, field_validator
|
|
19
|
+
from scipy.optimize import OptimizeResult
|
|
20
|
+
|
|
21
|
+
from divi.backends import (
|
|
22
|
+
CircuitRunner,
|
|
23
|
+
convert_counts_to_probs,
|
|
24
|
+
reverse_dict_endianness,
|
|
25
|
+
)
|
|
26
|
+
from divi.circuits import CircuitBundle, CircuitTag, MetaCircuit, format_circuit_tag
|
|
27
|
+
from divi.circuits.qem import _NoMitigation
|
|
28
|
+
from divi.qprog._expectation import _batched_expectation
|
|
29
|
+
from divi.qprog._hamiltonians import convert_hamiltonian_to_pauli_string
|
|
30
|
+
from divi.qprog.checkpointing import (
|
|
31
|
+
PROGRAM_STATE_FILE,
|
|
32
|
+
CheckpointConfig,
|
|
33
|
+
_atomic_write,
|
|
34
|
+
_ensure_checkpoint_dir,
|
|
35
|
+
_get_checkpoint_subdir_path,
|
|
36
|
+
_load_and_validate_pydantic_model,
|
|
37
|
+
resolve_checkpoint_path,
|
|
38
|
+
)
|
|
39
|
+
from divi.qprog.exceptions import _CancelledError
|
|
40
|
+
from divi.qprog.optimizers import (
|
|
41
|
+
MonteCarloOptimizer,
|
|
42
|
+
Optimizer,
|
|
43
|
+
PymooOptimizer,
|
|
44
|
+
ScipyMethod,
|
|
45
|
+
ScipyOptimizer,
|
|
46
|
+
)
|
|
47
|
+
from divi.qprog.quantum_program import QuantumProgram
|
|
48
|
+
|
|
49
|
+
logger = logging.getLogger(__name__)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class SolutionEntry(NamedTuple):
|
|
53
|
+
"""A solution entry with bitstring, probability, and optional decoded value.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
bitstring: Binary string representing a computational basis state.
|
|
57
|
+
prob: Measured probability in range [0.0, 1.0].
|
|
58
|
+
decoded: Optional problem-specific decoded representation. Defaults to None.
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
bitstring: str
|
|
62
|
+
prob: float
|
|
63
|
+
decoded: Any | None = None
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class SubclassState(BaseModel):
|
|
67
|
+
"""Container for subclass-specific state."""
|
|
68
|
+
|
|
69
|
+
data: dict[str, Any] = Field(default_factory=dict)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class OptimizerConfig(BaseModel):
|
|
73
|
+
"""Configuration for reconstructing an optimizer."""
|
|
74
|
+
|
|
75
|
+
type: str
|
|
76
|
+
config: dict[str, Any] = Field(default_factory=dict)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class ProgramState(BaseModel):
|
|
80
|
+
"""Pydantic model for VariationalQuantumAlgorithm state."""
|
|
81
|
+
|
|
82
|
+
model_config = ConfigDict(from_attributes=True, populate_by_name=True)
|
|
83
|
+
|
|
84
|
+
# Metadata
|
|
85
|
+
program_type: str = Field(validation_alias="_serialized_program_type")
|
|
86
|
+
version: str = "1.0"
|
|
87
|
+
timestamp: str = Field(default_factory=lambda: datetime.now().isoformat())
|
|
88
|
+
|
|
89
|
+
# Core Algorithm State (mapped to private attributes)
|
|
90
|
+
current_iteration: int
|
|
91
|
+
max_iterations: int
|
|
92
|
+
losses_history: list[dict[str, float]] = Field(validation_alias="_losses_history")
|
|
93
|
+
best_loss: float = Field(validation_alias="_best_loss")
|
|
94
|
+
best_probs: dict[str, float] = Field(validation_alias="_best_probs")
|
|
95
|
+
total_circuit_count: int = Field(validation_alias="_total_circuit_count")
|
|
96
|
+
total_run_time: float = Field(validation_alias="_total_run_time")
|
|
97
|
+
seed: int | None = Field(validation_alias="_seed")
|
|
98
|
+
grouping_strategy: str = Field(validation_alias="_grouping_strategy")
|
|
99
|
+
|
|
100
|
+
# Arrays
|
|
101
|
+
curr_params: list[list[float]] | None = Field(
|
|
102
|
+
default=None, validation_alias="_curr_params"
|
|
103
|
+
)
|
|
104
|
+
best_params: list[float] | None = Field(
|
|
105
|
+
default=None, validation_alias="_best_params"
|
|
106
|
+
)
|
|
107
|
+
final_params: list[float] | None = Field(
|
|
108
|
+
default=None, validation_alias="_final_params"
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# Complex State (mapped to new adapter properties)
|
|
112
|
+
rng_state_bytes: bytes | None = Field(
|
|
113
|
+
default=None, validation_alias="_serialized_rng_state"
|
|
114
|
+
)
|
|
115
|
+
optimizer_config: OptimizerConfig = Field(
|
|
116
|
+
validation_alias="_serialized_optimizer_config"
|
|
117
|
+
)
|
|
118
|
+
subclass_state: SubclassState = Field(validation_alias="_serialized_subclass_state")
|
|
119
|
+
|
|
120
|
+
@field_serializer("rng_state_bytes")
|
|
121
|
+
def serialize_bytes(self, v: bytes | None, _info):
|
|
122
|
+
return v.hex() if v is not None else None
|
|
123
|
+
|
|
124
|
+
@field_validator("rng_state_bytes", mode="before")
|
|
125
|
+
@classmethod
|
|
126
|
+
def validate_bytes(cls, v):
|
|
127
|
+
return bytes.fromhex(v) if isinstance(v, str) else v
|
|
128
|
+
|
|
129
|
+
@field_serializer("curr_params", "best_params", "final_params")
|
|
130
|
+
def serialize_arrays(self, v: npt.NDArray | list | None, _info):
|
|
131
|
+
if isinstance(v, np.ndarray):
|
|
132
|
+
return v.tolist()
|
|
133
|
+
return v
|
|
134
|
+
|
|
135
|
+
def restore(self, program: "VariationalQuantumAlgorithm") -> None:
|
|
136
|
+
"""Apply this state object back to a program instance."""
|
|
137
|
+
# 1. Bulk restore standard attributes
|
|
138
|
+
for name, field in self.model_fields.items():
|
|
139
|
+
target_attr = field.validation_alias or name
|
|
140
|
+
|
|
141
|
+
# Skip adapter properties (they are read-only / calculated)
|
|
142
|
+
if target_attr.startswith("_serialized_"):
|
|
143
|
+
continue
|
|
144
|
+
|
|
145
|
+
val = getattr(self, name)
|
|
146
|
+
|
|
147
|
+
# Handle numpy conversion
|
|
148
|
+
if "params" in target_attr and val is not None:
|
|
149
|
+
val = np.array(val)
|
|
150
|
+
|
|
151
|
+
if hasattr(program, target_attr):
|
|
152
|
+
setattr(program, target_attr, val)
|
|
153
|
+
|
|
154
|
+
# 2. Restore complex state
|
|
155
|
+
if self.rng_state_bytes:
|
|
156
|
+
program._rng.bit_generator.state = pickle.loads(self.rng_state_bytes)
|
|
157
|
+
|
|
158
|
+
program._load_subclass_state(self.subclass_state.data)
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def _compute_parameter_shift_mask(n_params: int) -> npt.NDArray[np.float64]:
|
|
162
|
+
"""
|
|
163
|
+
Generate a binary matrix mask for the parameter shift rule.
|
|
164
|
+
This mask is used to determine the shifts to apply to each parameter
|
|
165
|
+
when computing gradients via the parameter shift rule in quantum algorithms.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
n_params (int): The number of parameters in the quantum circuit.
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
npt.NDArray[np.float64]: A (2 * n_params, n_params) matrix where each row encodes
|
|
172
|
+
the shift to apply to each parameter for a single evaluation.
|
|
173
|
+
The values are multiples of 0.5 * pi, with alternating signs.
|
|
174
|
+
"""
|
|
175
|
+
mask_arr = np.arange(0, 2 * n_params, 2)
|
|
176
|
+
mask_arr[0] = 1
|
|
177
|
+
|
|
178
|
+
binary_matrix = ((mask_arr[:, np.newaxis] & (1 << np.arange(n_params))) > 0).astype(
|
|
179
|
+
np.float64
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
binary_matrix = binary_matrix.repeat(2, axis=0)
|
|
183
|
+
binary_matrix[1::2] *= -1
|
|
184
|
+
binary_matrix *= 0.5 * np.pi
|
|
185
|
+
|
|
186
|
+
return binary_matrix
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
class VariationalQuantumAlgorithm(QuantumProgram):
|
|
190
|
+
"""Base class for variational quantum algorithms.
|
|
191
|
+
|
|
192
|
+
This class provides the foundation for implementing variational quantum
|
|
193
|
+
algorithms in Divi. It handles circuit execution, parameter optimization,
|
|
194
|
+
and result management for algorithms that optimize parameterized quantum
|
|
195
|
+
circuits to minimize cost functions.
|
|
196
|
+
|
|
197
|
+
Variational algorithms work by:
|
|
198
|
+
1. Generating parameterized quantum circuits
|
|
199
|
+
2. Executing circuits on quantum hardware/simulators
|
|
200
|
+
3. Computing expectation values of cost Hamiltonians
|
|
201
|
+
4. Using classical optimizers to update parameters
|
|
202
|
+
5. Iterating until convergence
|
|
203
|
+
|
|
204
|
+
Attributes:
|
|
205
|
+
_losses_history (list[dict]): History of loss values during optimization.
|
|
206
|
+
_final_params (npt.NDArray[np.float64]): Final optimized parameters.
|
|
207
|
+
_best_params (npt.NDArray[np.float64]): Parameters that achieved the best loss.
|
|
208
|
+
_best_loss (float): Best loss achieved during optimization.
|
|
209
|
+
_circuits (list[Circuit]): Generated quantum circuits.
|
|
210
|
+
_total_circuit_count (int): Total number of circuits executed.
|
|
211
|
+
_total_run_time (float): Total execution time in seconds.
|
|
212
|
+
_curr_params (npt.NDArray[np.float64]): Current parameter values.
|
|
213
|
+
_seed (int | None): Random seed for parameter initialization.
|
|
214
|
+
_rng (np.random.Generator): Random number generator.
|
|
215
|
+
_grad_mode (bool): Whether currently computing gradients.
|
|
216
|
+
_grouping_strategy (str): Strategy for grouping quantum operations.
|
|
217
|
+
_qem_protocol (QEMProtocol): Quantum error mitigation protocol.
|
|
218
|
+
_cancellation_event (Event | None): Event for graceful termination.
|
|
219
|
+
_meta_circuit_factory (callable): Factory for creating MetaCircuit instances.
|
|
220
|
+
"""
|
|
221
|
+
|
|
222
|
+
def __init__(
|
|
223
|
+
self,
|
|
224
|
+
backend: CircuitRunner,
|
|
225
|
+
optimizer: Optimizer | None = None,
|
|
226
|
+
seed: int | None = None,
|
|
227
|
+
progress_queue: Queue | None = None,
|
|
228
|
+
**kwargs,
|
|
229
|
+
):
|
|
230
|
+
"""Initialize the VariationalQuantumAlgorithm.
|
|
231
|
+
|
|
232
|
+
This constructor is specifically designed for hybrid quantum-classical
|
|
233
|
+
variational algorithms. The instance variables `n_layers` and `n_params`
|
|
234
|
+
must be set by subclasses, where:
|
|
235
|
+
- `n_layers` is the number of layers in the quantum circuit.
|
|
236
|
+
- `n_params` is the number of parameters per layer.
|
|
237
|
+
|
|
238
|
+
For exotic variational algorithms where these variables may not be applicable,
|
|
239
|
+
the `_initialize_params` method should be overridden to set the parameters.
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
backend (CircuitRunner): Quantum circuit execution backend.
|
|
243
|
+
optimizer (Optimizer | None): The optimizer to use for parameter optimization.
|
|
244
|
+
Defaults to MonteCarloOptimizer().
|
|
245
|
+
seed (int | None): Random seed for parameter initialization. Defaults to None.
|
|
246
|
+
progress_queue (Queue | None): Queue for progress reporting. Defaults to None.
|
|
247
|
+
|
|
248
|
+
Keyword Args:
|
|
249
|
+
initial_params (npt.NDArray[np.float64] | None): Initial parameters with shape
|
|
250
|
+
(n_param_sets, n_layers * n_params). If provided, these will be set as
|
|
251
|
+
the current parameters via the `curr_params` setter (which includes validation).
|
|
252
|
+
Defaults to None.
|
|
253
|
+
grouping_strategy (str): Strategy for grouping operations in Pennylane transforms.
|
|
254
|
+
Options: "default", "wires", "qwc". Defaults to "qwc".
|
|
255
|
+
qem_protocol (QEMProtocol | None): Quantum error mitigation protocol to apply. Defaults to None.
|
|
256
|
+
precision (int): Number of decimal places for parameter values in QASM conversion.
|
|
257
|
+
Defaults to 8.
|
|
258
|
+
|
|
259
|
+
Note: Higher precision values result in longer QASM strings, which increases
|
|
260
|
+
the amount of data sent to cloud backends. For most use cases, the default
|
|
261
|
+
precision of 8 decimal places provides sufficient accuracy while keeping
|
|
262
|
+
QASM sizes manageable. Consider reducing precision if you need to minimize
|
|
263
|
+
data transfer overhead, or increase it only if you require higher numerical
|
|
264
|
+
precision in your circuit parameters.
|
|
265
|
+
decode_solution_fn (callable[[str], Any] | None): Function to decode bitstrings
|
|
266
|
+
into problem-specific solution representations. Called during final computation
|
|
267
|
+
and when `get_top_solutions(include_decoded=True)` is used. The function should
|
|
268
|
+
take a binary string (e.g., "0101") and return a decoded representation
|
|
269
|
+
(e.g., a list of indices, numpy array, or custom object). Defaults to
|
|
270
|
+
`lambda bitstring: bitstring` (identity function).
|
|
271
|
+
"""
|
|
272
|
+
|
|
273
|
+
super().__init__(
|
|
274
|
+
backend=backend, seed=seed, progress_queue=progress_queue, **kwargs
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# --- Optimization Results & History ---
|
|
278
|
+
self._losses_history = []
|
|
279
|
+
self._best_params = []
|
|
280
|
+
self._final_params = []
|
|
281
|
+
self._best_loss = float("inf")
|
|
282
|
+
self._best_probs = {}
|
|
283
|
+
self._curr_params = kwargs.pop("initial_params", None)
|
|
284
|
+
|
|
285
|
+
# --- Random Number Generation ---
|
|
286
|
+
self._seed = seed
|
|
287
|
+
self._rng = np.random.default_rng(self._seed)
|
|
288
|
+
|
|
289
|
+
# --- Computation Mode Flags ---
|
|
290
|
+
# Lets child classes adapt their optimization step for grad calculation routine
|
|
291
|
+
self._grad_mode = False
|
|
292
|
+
self._is_compute_probabilities = False
|
|
293
|
+
|
|
294
|
+
# --- Optimizer Configuration ---
|
|
295
|
+
self.optimizer = optimizer if optimizer is not None else MonteCarloOptimizer()
|
|
296
|
+
|
|
297
|
+
# --- Backend & Circuit Configuration ---
|
|
298
|
+
if backend and backend.supports_expval:
|
|
299
|
+
grouping_strategy = kwargs.pop("grouping_strategy", None)
|
|
300
|
+
if grouping_strategy is not None and grouping_strategy != "_backend_expval":
|
|
301
|
+
warn(
|
|
302
|
+
"Backend supports direct expectation value calculation, but a grouping_strategy was provided. "
|
|
303
|
+
"The grouping strategy will be ignored.",
|
|
304
|
+
UserWarning,
|
|
305
|
+
)
|
|
306
|
+
self._grouping_strategy = "_backend_expval"
|
|
307
|
+
else:
|
|
308
|
+
self._grouping_strategy = kwargs.pop("grouping_strategy", "qwc")
|
|
309
|
+
|
|
310
|
+
self._qem_protocol = kwargs.pop("qem_protocol", None) or _NoMitigation()
|
|
311
|
+
self._precision = kwargs.pop("precision", 8)
|
|
312
|
+
|
|
313
|
+
# --- Solution Decoding ---
|
|
314
|
+
self._decode_solution_fn = kwargs.pop(
|
|
315
|
+
"decode_solution_fn", lambda bitstring: bitstring
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
# --- Circuit Factory & Templates ---
|
|
319
|
+
self._meta_circuits = None
|
|
320
|
+
self._meta_circuit_factory = partial(
|
|
321
|
+
MetaCircuit,
|
|
322
|
+
# No grouping strategy for expectation value measurements
|
|
323
|
+
grouping_strategy=self._grouping_strategy,
|
|
324
|
+
qem_protocol=self._qem_protocol,
|
|
325
|
+
precision=self._precision,
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
# --- Control Flow ---
|
|
329
|
+
self._cancellation_event = None
|
|
330
|
+
|
|
331
|
+
@property
|
|
332
|
+
@abstractmethod
|
|
333
|
+
def cost_hamiltonian(self) -> qml.operation.Operator:
|
|
334
|
+
"""The cost Hamiltonian for the variational problem."""
|
|
335
|
+
pass
|
|
336
|
+
|
|
337
|
+
@property
|
|
338
|
+
def total_circuit_count(self) -> int:
|
|
339
|
+
"""Get the total number of circuits executed.
|
|
340
|
+
|
|
341
|
+
Returns:
|
|
342
|
+
int: Cumulative count of circuits submitted for execution.
|
|
343
|
+
"""
|
|
344
|
+
return self._total_circuit_count
|
|
345
|
+
|
|
346
|
+
@property
|
|
347
|
+
def total_run_time(self) -> float:
|
|
348
|
+
"""Get the total runtime across all circuit executions.
|
|
349
|
+
|
|
350
|
+
Returns:
|
|
351
|
+
float: Cumulative execution time in seconds.
|
|
352
|
+
"""
|
|
353
|
+
return self._total_run_time
|
|
354
|
+
|
|
355
|
+
@property
|
|
356
|
+
def meta_circuits(self) -> dict[str, MetaCircuit]:
|
|
357
|
+
"""Get the meta-circuit templates used by this program.
|
|
358
|
+
|
|
359
|
+
Returns:
|
|
360
|
+
dict[str, MetaCircuit]: Dictionary mapping circuit names to their
|
|
361
|
+
MetaCircuit templates.
|
|
362
|
+
"""
|
|
363
|
+
return self._meta_circuits
|
|
364
|
+
|
|
365
|
+
@property
|
|
366
|
+
def n_params(self):
|
|
367
|
+
"""Get the total number of parameters in the quantum circuit.
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
int: Total number of trainable parameters (n_layers * n_params_per_layer).
|
|
371
|
+
"""
|
|
372
|
+
return self._n_params
|
|
373
|
+
|
|
374
|
+
def _has_run_optimization(self) -> bool:
|
|
375
|
+
"""Check if optimization has been run at least once.
|
|
376
|
+
|
|
377
|
+
Returns:
|
|
378
|
+
bool: True if optimization has been run, False otherwise.
|
|
379
|
+
"""
|
|
380
|
+
return len(self._losses_history) > 0
|
|
381
|
+
|
|
382
|
+
@property
|
|
383
|
+
def losses_history(self) -> list[dict]:
|
|
384
|
+
"""Get a copy of the optimization loss history.
|
|
385
|
+
|
|
386
|
+
Each entry is a dictionary mapping parameter indices to loss values.
|
|
387
|
+
|
|
388
|
+
Returns:
|
|
389
|
+
list[dict]: Copy of the loss history. Modifications to this list
|
|
390
|
+
will not affect the internal state.
|
|
391
|
+
"""
|
|
392
|
+
if not self._has_run_optimization():
|
|
393
|
+
warn(
|
|
394
|
+
"losses_history is empty. Optimization has not been run yet. "
|
|
395
|
+
"Call run() to execute the optimization.",
|
|
396
|
+
UserWarning,
|
|
397
|
+
stacklevel=2,
|
|
398
|
+
)
|
|
399
|
+
return self._losses_history.copy()
|
|
400
|
+
|
|
401
|
+
@property
|
|
402
|
+
def min_losses_per_iteration(self) -> list[float]:
|
|
403
|
+
"""Get the minimum loss value for each iteration.
|
|
404
|
+
|
|
405
|
+
Returns a list where each element is the minimum (best) loss value
|
|
406
|
+
across all parameter sets for that iteration.
|
|
407
|
+
|
|
408
|
+
Returns:
|
|
409
|
+
list[float]: List of minimum loss values, one per iteration.
|
|
410
|
+
"""
|
|
411
|
+
if not self._has_run_optimization():
|
|
412
|
+
warn(
|
|
413
|
+
"min_losses_per_iteration is empty. Optimization has not been run yet. "
|
|
414
|
+
"Call run() to execute the optimization.",
|
|
415
|
+
UserWarning,
|
|
416
|
+
stacklevel=2,
|
|
417
|
+
)
|
|
418
|
+
return [min(loss_dict.values()) for loss_dict in self._losses_history]
|
|
419
|
+
|
|
420
|
+
@property
|
|
421
|
+
def final_params(self) -> npt.NDArray[np.float64]:
|
|
422
|
+
"""Get a copy of the final optimized parameters.
|
|
423
|
+
|
|
424
|
+
Returns:
|
|
425
|
+
npt.NDArray[np.float64]: Copy of the final parameters. Modifications to this array
|
|
426
|
+
will not affect the internal state.
|
|
427
|
+
"""
|
|
428
|
+
if len(self._final_params) == 0 or not self._has_run_optimization():
|
|
429
|
+
warn(
|
|
430
|
+
"final_params is not available. Optimization has not been run yet. "
|
|
431
|
+
"Call run() to execute the optimization.",
|
|
432
|
+
UserWarning,
|
|
433
|
+
stacklevel=2,
|
|
434
|
+
)
|
|
435
|
+
return self._final_params.copy()
|
|
436
|
+
|
|
437
|
+
@property
|
|
438
|
+
def best_params(self) -> npt.NDArray[np.float64]:
|
|
439
|
+
"""Get a copy of the parameters that achieved the best (lowest) loss.
|
|
440
|
+
|
|
441
|
+
Returns:
|
|
442
|
+
npt.NDArray[np.float64]: Copy of the best parameters. Modifications to this array
|
|
443
|
+
will not affect the internal state.
|
|
444
|
+
"""
|
|
445
|
+
if len(self._best_params) == 0 or not self._has_run_optimization():
|
|
446
|
+
warn(
|
|
447
|
+
"best_params is not available. Optimization has not been run yet. "
|
|
448
|
+
"Call run() to execute the optimization.",
|
|
449
|
+
UserWarning,
|
|
450
|
+
stacklevel=2,
|
|
451
|
+
)
|
|
452
|
+
return self._best_params.copy()
|
|
453
|
+
|
|
454
|
+
@property
|
|
455
|
+
def best_loss(self) -> float:
|
|
456
|
+
"""Get the best loss achieved so far.
|
|
457
|
+
|
|
458
|
+
Returns:
|
|
459
|
+
float: The best loss achieved so far.
|
|
460
|
+
"""
|
|
461
|
+
if not self._has_run_optimization():
|
|
462
|
+
warn(
|
|
463
|
+
"best_loss has not been computed yet. Optimization has not been run. "
|
|
464
|
+
"Call run() to execute the optimization.",
|
|
465
|
+
UserWarning,
|
|
466
|
+
stacklevel=2,
|
|
467
|
+
)
|
|
468
|
+
elif self._best_loss == float("inf"):
|
|
469
|
+
# Defensive check: if optimization ran but best_loss is still inf, something is wrong
|
|
470
|
+
raise RuntimeError(
|
|
471
|
+
"best_loss is still infinite after optimization. This indicates a problem "
|
|
472
|
+
"with the optimization process. The optimization callback may not have executed "
|
|
473
|
+
"correctly, or all computed losses were infinite."
|
|
474
|
+
)
|
|
475
|
+
return self._best_loss
|
|
476
|
+
|
|
477
|
+
@property
|
|
478
|
+
def best_probs(self) -> dict[CircuitTag, dict[str, float]]:
|
|
479
|
+
"""Get normalized probabilities for the best parameters.
|
|
480
|
+
|
|
481
|
+
This property provides access to the probability distribution computed
|
|
482
|
+
by running measurement circuits with the best parameters found during
|
|
483
|
+
optimization. The distribution maps bitstrings (computational basis states)
|
|
484
|
+
to their measured probabilities.
|
|
485
|
+
|
|
486
|
+
The probabilities are normalized and have deterministic ordering when
|
|
487
|
+
iterated (dictionary insertion order is preserved in Python 3.7+).
|
|
488
|
+
|
|
489
|
+
Returns:
|
|
490
|
+
dict[CircuitTag, dict[str, float]]: Dictionary mapping CircuitTag keys to
|
|
491
|
+
bitstring probability dictionaries. Bitstrings are binary strings
|
|
492
|
+
(e.g., "0101"), values are probabilities in range [0.0, 1.0].
|
|
493
|
+
Returns an empty dict if final computation has not been performed.
|
|
494
|
+
|
|
495
|
+
Raises:
|
|
496
|
+
RuntimeError: If attempting to access probabilities before running
|
|
497
|
+
the algorithm with final computation enabled.
|
|
498
|
+
|
|
499
|
+
Note:
|
|
500
|
+
To populate this distribution, you must run the algorithm with
|
|
501
|
+
`perform_final_computation=True` (the default):
|
|
502
|
+
|
|
503
|
+
>>> program.run(perform_final_computation=True)
|
|
504
|
+
>>> probs = program.best_probs
|
|
505
|
+
|
|
506
|
+
Example:
|
|
507
|
+
>>> program.run()
|
|
508
|
+
>>> probs = program.best_probs
|
|
509
|
+
>>> for bitstring, prob in probs.items():
|
|
510
|
+
... print(f"{bitstring}: {prob:.2%}")
|
|
511
|
+
0101: 42.50%
|
|
512
|
+
1010: 31.20%
|
|
513
|
+
...
|
|
514
|
+
"""
|
|
515
|
+
if not self._best_probs:
|
|
516
|
+
warn(
|
|
517
|
+
"best_probs is empty. Either optimization has not been run yet, "
|
|
518
|
+
"or final computation was not performed. Call run() to execute the optimization.",
|
|
519
|
+
UserWarning,
|
|
520
|
+
stacklevel=2,
|
|
521
|
+
)
|
|
522
|
+
return self._best_probs.copy()
|
|
523
|
+
|
|
524
|
+
def get_top_solutions(
|
|
525
|
+
self, n: int = 10, *, min_prob: float = 0.0, include_decoded: bool = False
|
|
526
|
+
) -> list[SolutionEntry]:
|
|
527
|
+
"""Get the top-N solutions sorted by probability.
|
|
528
|
+
|
|
529
|
+
This method extracts the most probable solutions from the measured
|
|
530
|
+
probability distribution. Solutions are sorted by probability (descending)
|
|
531
|
+
with deterministic tie-breaking using lexicographic ordering of bitstrings.
|
|
532
|
+
|
|
533
|
+
Args:
|
|
534
|
+
n (int): Maximum number of solutions to return. Must be non-negative.
|
|
535
|
+
If n is 0 or negative, returns an empty list. If n exceeds the
|
|
536
|
+
number of available solutions (after filtering), returns all
|
|
537
|
+
available solutions. Defaults to 10.
|
|
538
|
+
min_prob (float): Minimum probability threshold for including solutions.
|
|
539
|
+
Only solutions with probability >= min_prob will be included.
|
|
540
|
+
Must be in range [0.0, 1.0]. Defaults to 0.0 (no filtering).
|
|
541
|
+
include_decoded (bool): Whether to populate the `decoded` field of
|
|
542
|
+
each SolutionEntry by calling the `decode_solution_fn` provided
|
|
543
|
+
in the constructor. If False, the decoded field will be None.
|
|
544
|
+
Defaults to False.
|
|
545
|
+
|
|
546
|
+
Returns:
|
|
547
|
+
list[SolutionEntry]: List of solution entries sorted by probability
|
|
548
|
+
(descending), then by bitstring (lexicographically ascending)
|
|
549
|
+
for deterministic tie-breaking. Returns an empty list if no
|
|
550
|
+
probability distribution is available or n <= 0.
|
|
551
|
+
|
|
552
|
+
Raises:
|
|
553
|
+
RuntimeError: If probability distribution is not available because
|
|
554
|
+
optimization has not been run or final computation was not performed.
|
|
555
|
+
ValueError: If min_prob is not in range [0.0, 1.0] or n is negative.
|
|
556
|
+
|
|
557
|
+
Note:
|
|
558
|
+
The probability distribution must be computed by running the algorithm
|
|
559
|
+
with `perform_final_computation=True` (the default):
|
|
560
|
+
|
|
561
|
+
>>> program.run(perform_final_computation=True)
|
|
562
|
+
>>> top_10 = program.get_top_solutions(n=10)
|
|
563
|
+
|
|
564
|
+
Example:
|
|
565
|
+
>>> # Get top 5 solutions with probability >= 5%
|
|
566
|
+
>>> program.run()
|
|
567
|
+
>>> solutions = program.get_top_solutions(n=5, min_prob=0.05)
|
|
568
|
+
>>> for sol in solutions:
|
|
569
|
+
... print(f"{sol.bitstring}: {sol.prob:.2%}")
|
|
570
|
+
1010: 42.50%
|
|
571
|
+
0101: 31.20%
|
|
572
|
+
1100: 15.30%
|
|
573
|
+
0011: 8.50%
|
|
574
|
+
1111: 2.50%
|
|
575
|
+
|
|
576
|
+
>>> # Get solutions with decoding
|
|
577
|
+
>>> solutions = program.get_top_solutions(n=3, include_decoded=True)
|
|
578
|
+
>>> for sol in solutions:
|
|
579
|
+
... print(f"{sol.bitstring} -> {sol.decoded}")
|
|
580
|
+
1010 -> [0, 2]
|
|
581
|
+
0101 -> [1, 3]
|
|
582
|
+
...
|
|
583
|
+
"""
|
|
584
|
+
# Validate inputs
|
|
585
|
+
if n < 0:
|
|
586
|
+
raise ValueError(f"n must be non-negative, got {n}")
|
|
587
|
+
if not (0.0 <= min_prob <= 1.0):
|
|
588
|
+
raise ValueError(f"min_prob must be in range [0.0, 1.0], got {min_prob}")
|
|
589
|
+
|
|
590
|
+
# Handle edge case: n == 0
|
|
591
|
+
if n == 0:
|
|
592
|
+
return []
|
|
593
|
+
|
|
594
|
+
# Require probability distribution to exist
|
|
595
|
+
if not self._best_probs:
|
|
596
|
+
raise RuntimeError(
|
|
597
|
+
"No probability distribution available. The final computation step "
|
|
598
|
+
"must be performed to compute the probability distribution. "
|
|
599
|
+
"Call run(perform_final_computation=True) to execute optimization "
|
|
600
|
+
"and compute the distribution."
|
|
601
|
+
)
|
|
602
|
+
# Extract the probability distribution (nested by parameter set)
|
|
603
|
+
# _best_probs structure: {tag: {bitstring: prob}}
|
|
604
|
+
probs_dict = next(iter(self._best_probs.values()))
|
|
605
|
+
|
|
606
|
+
# Filter by minimum probability and get top n sorted by probability (descending),
|
|
607
|
+
# then bitstring (ascending) for deterministic tie-breaking
|
|
608
|
+
top_items = sorted(
|
|
609
|
+
filter(
|
|
610
|
+
lambda bitstring_prob: bitstring_prob[1] >= min_prob, probs_dict.items()
|
|
611
|
+
),
|
|
612
|
+
key=lambda bitstring_prob: (-bitstring_prob[1], bitstring_prob[0]),
|
|
613
|
+
)[:n]
|
|
614
|
+
|
|
615
|
+
# Build result list (decode on demand)
|
|
616
|
+
return [
|
|
617
|
+
SolutionEntry(
|
|
618
|
+
bitstring=bitstring,
|
|
619
|
+
prob=prob,
|
|
620
|
+
decoded=(
|
|
621
|
+
self._decode_solution_fn(bitstring) if include_decoded else None
|
|
622
|
+
),
|
|
623
|
+
)
|
|
624
|
+
for bitstring, prob in top_items
|
|
625
|
+
]
|
|
626
|
+
|
|
627
|
+
@property
|
|
628
|
+
def curr_params(self) -> npt.NDArray[np.float64]:
|
|
629
|
+
"""Get the current parameters.
|
|
630
|
+
|
|
631
|
+
These are the parameters used for optimization. They can be accessed
|
|
632
|
+
and modified at any time, including during optimization.
|
|
633
|
+
|
|
634
|
+
Returns:
|
|
635
|
+
npt.NDArray[np.float64]: Current parameters. If not yet initialized,
|
|
636
|
+
they will be generated automatically.
|
|
637
|
+
"""
|
|
638
|
+
if self._curr_params is None:
|
|
639
|
+
self._initialize_params()
|
|
640
|
+
return self._curr_params.copy()
|
|
641
|
+
|
|
642
|
+
@curr_params.setter
|
|
643
|
+
def curr_params(self, value: npt.NDArray[np.float64] | None):
|
|
644
|
+
"""
|
|
645
|
+
Set the current parameters.
|
|
646
|
+
|
|
647
|
+
Args:
|
|
648
|
+
value (npt.NDArray[np.float64] | None): Parameters with shape
|
|
649
|
+
(n_param_sets, n_layers * n_params), or None to reset
|
|
650
|
+
to uninitialized state.
|
|
651
|
+
|
|
652
|
+
Raises:
|
|
653
|
+
ValueError: If parameters have incorrect shape.
|
|
654
|
+
"""
|
|
655
|
+
if value is not None:
|
|
656
|
+
self._validate_initial_params(value)
|
|
657
|
+
self._curr_params = value.copy()
|
|
658
|
+
else:
|
|
659
|
+
# Reset to uninitialized state
|
|
660
|
+
self._curr_params = None
|
|
661
|
+
|
|
662
|
+
# --- Serialization Adapters (For Pydantic) ---
|
|
663
|
+
@property
|
|
664
|
+
def _serialized_program_type(self) -> str:
|
|
665
|
+
return type(self).__name__
|
|
666
|
+
|
|
667
|
+
@property
|
|
668
|
+
def _serialized_rng_state(self) -> bytes:
|
|
669
|
+
return pickle.dumps(self._rng.bit_generator.state)
|
|
670
|
+
|
|
671
|
+
@property
|
|
672
|
+
def _serialized_optimizer_config(self) -> OptimizerConfig:
|
|
673
|
+
config_dict = self.optimizer.get_config()
|
|
674
|
+
return OptimizerConfig(type=config_dict.pop("type"), config=config_dict)
|
|
675
|
+
|
|
676
|
+
@property
|
|
677
|
+
def _serialized_subclass_state(self) -> SubclassState:
|
|
678
|
+
return SubclassState(data=self._save_subclass_state())
|
|
679
|
+
|
|
680
|
+
@property
|
|
681
|
+
def meta_circuits(self) -> dict[str, MetaCircuit]:
|
|
682
|
+
"""Get the meta-circuit templates used by this program.
|
|
683
|
+
|
|
684
|
+
Returns:
|
|
685
|
+
dict[str, MetaCircuit]: Dictionary mapping circuit names to their
|
|
686
|
+
MetaCircuit templates.
|
|
687
|
+
"""
|
|
688
|
+
# Lazy initialization: each instance has its own _meta_circuits.
|
|
689
|
+
# Note: When used with ProgramBatch, meta_circuits is initialized sequentially
|
|
690
|
+
# in the main thread before parallel execution to avoid thread-safety issues.
|
|
691
|
+
if self._meta_circuits is None:
|
|
692
|
+
self._meta_circuits = self._create_meta_circuits_dict()
|
|
693
|
+
return self._meta_circuits
|
|
694
|
+
|
|
695
|
+
@abstractmethod
|
|
696
|
+
def _create_meta_circuits_dict(self) -> dict[str, MetaCircuit]:
|
|
697
|
+
pass
|
|
698
|
+
|
|
699
|
+
@abstractmethod
|
|
700
|
+
def _generate_circuits(self, **kwargs) -> list[CircuitBundle]:
|
|
701
|
+
"""Generate quantum circuits for execution.
|
|
702
|
+
|
|
703
|
+
This method should generate and return a list of Circuit objects based on
|
|
704
|
+
the current algorithm state and parameters. The circuits will be executed
|
|
705
|
+
by the backend.
|
|
706
|
+
|
|
707
|
+
Args:
|
|
708
|
+
**kwargs: Additional keyword arguments for circuit generation.
|
|
709
|
+
|
|
710
|
+
Returns:
|
|
711
|
+
list[CircuitBundle]: List of Circuit objects to be executed.
|
|
712
|
+
"""
|
|
713
|
+
pass
|
|
714
|
+
|
|
715
|
+
@abstractmethod
|
|
716
|
+
def _save_subclass_state(self) -> dict[str, Any]:
|
|
717
|
+
"""Hook method for subclasses to save additional state.
|
|
718
|
+
|
|
719
|
+
Subclasses must override this method to return a dictionary of
|
|
720
|
+
state variables that should be included in the checkpoint.
|
|
721
|
+
|
|
722
|
+
Returns:
|
|
723
|
+
dict[str, Any]: Dictionary of subclass-specific state.
|
|
724
|
+
"""
|
|
725
|
+
pass
|
|
726
|
+
|
|
727
|
+
@abstractmethod
|
|
728
|
+
def _load_subclass_state(self, state: dict[str, Any]) -> None:
|
|
729
|
+
"""Hook method for subclasses to load additional state.
|
|
730
|
+
|
|
731
|
+
Subclasses must override this method to restore state variables
|
|
732
|
+
from the checkpoint dictionary. This is called after instance creation.
|
|
733
|
+
|
|
734
|
+
Args:
|
|
735
|
+
state (dict[str, Any]): Dictionary of subclass-specific state.
|
|
736
|
+
"""
|
|
737
|
+
pass
|
|
738
|
+
|
|
739
|
+
def _get_optimizer_config(self) -> OptimizerConfig:
|
|
740
|
+
"""Extract optimizer configuration for checkpoint reconstruction.
|
|
741
|
+
|
|
742
|
+
Returns:
|
|
743
|
+
OptimizerConfig: Configuration object for the current optimizer.
|
|
744
|
+
|
|
745
|
+
Raises:
|
|
746
|
+
NotImplementedError: If the optimizer does not support state saving.
|
|
747
|
+
"""
|
|
748
|
+
config_dict = self.optimizer.get_config()
|
|
749
|
+
return OptimizerConfig(
|
|
750
|
+
type=config_dict.pop("type"),
|
|
751
|
+
config=config_dict,
|
|
752
|
+
)
|
|
753
|
+
|
|
754
|
+
def save_state(self, checkpoint_config: CheckpointConfig) -> str:
|
|
755
|
+
"""Save the program state to a checkpoint directory."""
|
|
756
|
+
if self.current_iteration == 0 and len(self._losses_history) == 0:
|
|
757
|
+
raise RuntimeError("Cannot save checkpoint: optimization has not been run.")
|
|
758
|
+
|
|
759
|
+
if checkpoint_config.checkpoint_dir is None:
|
|
760
|
+
raise ValueError(
|
|
761
|
+
"checkpoint_config.checkpoint_dir must be a non-None Path."
|
|
762
|
+
)
|
|
763
|
+
|
|
764
|
+
main_dir = _ensure_checkpoint_dir(checkpoint_config.checkpoint_dir)
|
|
765
|
+
checkpoint_path = _get_checkpoint_subdir_path(main_dir, self.current_iteration)
|
|
766
|
+
checkpoint_path.mkdir(parents=True, exist_ok=True)
|
|
767
|
+
|
|
768
|
+
# 1. Save optimizer
|
|
769
|
+
self.optimizer.save_state(checkpoint_path)
|
|
770
|
+
|
|
771
|
+
# 2. Save Program State (Pydantic pulls data via validation_aliases)
|
|
772
|
+
state = ProgramState.model_validate(self)
|
|
773
|
+
|
|
774
|
+
state_file = checkpoint_path / PROGRAM_STATE_FILE
|
|
775
|
+
_atomic_write(state_file, state.model_dump_json(indent=2))
|
|
776
|
+
|
|
777
|
+
return checkpoint_path
|
|
778
|
+
|
|
779
|
+
@classmethod
|
|
780
|
+
def load_state(
|
|
781
|
+
cls,
|
|
782
|
+
checkpoint_dir: Path | str,
|
|
783
|
+
backend: CircuitRunner,
|
|
784
|
+
subdirectory: str | None = None,
|
|
785
|
+
**kwargs,
|
|
786
|
+
) -> "VariationalQuantumAlgorithm":
|
|
787
|
+
"""Load program state from a checkpoint directory."""
|
|
788
|
+
checkpoint_path = resolve_checkpoint_path(checkpoint_dir, subdirectory)
|
|
789
|
+
state_file = checkpoint_path / PROGRAM_STATE_FILE
|
|
790
|
+
|
|
791
|
+
# 1. Load Pydantic Model
|
|
792
|
+
state = _load_and_validate_pydantic_model(
|
|
793
|
+
state_file,
|
|
794
|
+
ProgramState,
|
|
795
|
+
required_fields=["program_type", "current_iteration"],
|
|
796
|
+
)
|
|
797
|
+
|
|
798
|
+
# 2. Reconstruct Optimizer
|
|
799
|
+
opt_config = state.optimizer_config
|
|
800
|
+
if opt_config.type == "MonteCarloOptimizer":
|
|
801
|
+
optimizer = MonteCarloOptimizer.load_state(checkpoint_path)
|
|
802
|
+
elif opt_config.type == "PymooOptimizer":
|
|
803
|
+
optimizer = PymooOptimizer.load_state(checkpoint_path)
|
|
804
|
+
else:
|
|
805
|
+
raise ValueError(f"Unsupported optimizer type: {opt_config.type}")
|
|
806
|
+
|
|
807
|
+
# 3. Create Instance
|
|
808
|
+
program = cls(backend=backend, optimizer=optimizer, seed=state.seed, **kwargs)
|
|
809
|
+
|
|
810
|
+
# 4. Restore State
|
|
811
|
+
state.restore(program)
|
|
812
|
+
|
|
813
|
+
return program
|
|
814
|
+
|
|
815
|
+
def get_expected_param_shape(self) -> tuple[int, int]:
|
|
816
|
+
"""
|
|
817
|
+
Get the expected shape for initial parameters.
|
|
818
|
+
|
|
819
|
+
Returns:
|
|
820
|
+
tuple[int, int]: Shape (n_param_sets, n_layers * n_params) that
|
|
821
|
+
initial parameters should have for this quantum program.
|
|
822
|
+
"""
|
|
823
|
+
return (self.optimizer.n_param_sets, self.n_layers * self.n_params)
|
|
824
|
+
|
|
825
|
+
def _validate_initial_params(self, params: npt.NDArray[np.float64]):
|
|
826
|
+
"""
|
|
827
|
+
Validate user-provided initial parameters.
|
|
828
|
+
|
|
829
|
+
Args:
|
|
830
|
+
params (npt.NDArray[np.float64]): Parameters to validate.
|
|
831
|
+
|
|
832
|
+
Raises:
|
|
833
|
+
ValueError: If parameters have incorrect shape.
|
|
834
|
+
"""
|
|
835
|
+
expected_shape = self.get_expected_param_shape()
|
|
836
|
+
|
|
837
|
+
if params.shape != expected_shape:
|
|
838
|
+
raise ValueError(
|
|
839
|
+
f"Initial parameters must have shape {expected_shape}, "
|
|
840
|
+
f"got {params.shape}"
|
|
841
|
+
)
|
|
842
|
+
|
|
843
|
+
def _initialize_params(self):
|
|
844
|
+
"""
|
|
845
|
+
Initialize the circuit parameters randomly.
|
|
846
|
+
|
|
847
|
+
Generates random parameters with values uniformly distributed between
|
|
848
|
+
0 and 2π. The number of parameter sets depends on the optimizer being used.
|
|
849
|
+
"""
|
|
850
|
+
total_params = self.n_layers * self.n_params
|
|
851
|
+
self._curr_params = self._rng.uniform(
|
|
852
|
+
0, 2 * np.pi, (self.optimizer.n_param_sets, total_params)
|
|
853
|
+
)
|
|
854
|
+
|
|
855
|
+
def _run_optimization_circuits(self, **kwargs) -> dict[int, float]:
|
|
856
|
+
self._curr_circuits = self._generate_circuits(**kwargs)
|
|
857
|
+
|
|
858
|
+
if self.backend.supports_expval:
|
|
859
|
+
kwargs["ham_ops"] = convert_hamiltonian_to_pauli_string(
|
|
860
|
+
self.cost_hamiltonian, self.n_qubits
|
|
861
|
+
)
|
|
862
|
+
|
|
863
|
+
losses = self._dispatch_circuits_and_process_results(**kwargs)
|
|
864
|
+
|
|
865
|
+
return losses
|
|
866
|
+
|
|
867
|
+
@staticmethod
|
|
868
|
+
def _parse_result_tag(tag: CircuitTag) -> tuple[int, int]:
|
|
869
|
+
"""Extract (param_id, qem_id) from a result tag."""
|
|
870
|
+
if not isinstance(tag, CircuitTag):
|
|
871
|
+
raise TypeError("Result tags must be CircuitTag instances.")
|
|
872
|
+
return tag.param_id, tag.qem_id
|
|
873
|
+
|
|
874
|
+
def _group_results(
|
|
875
|
+
self, results: dict[str, dict[str, int]]
|
|
876
|
+
) -> dict[int, dict[int, list[dict[str, int]]]]:
|
|
877
|
+
"""
|
|
878
|
+
Group results by parameter id and QEM id.
|
|
879
|
+
|
|
880
|
+
Returns:
|
|
881
|
+
dict[int, dict[int, list[dict[str, int]]]]: {param_id: {qem_id: [shots...]}}
|
|
882
|
+
"""
|
|
883
|
+
grouped: dict[int, dict[int, list[dict[str, int]]]] = {}
|
|
884
|
+
for tag, shots in results.items():
|
|
885
|
+
param_id, qem_id = self._parse_result_tag(tag)
|
|
886
|
+
grouped.setdefault(param_id, {}).setdefault(qem_id, []).append(shots)
|
|
887
|
+
return grouped
|
|
888
|
+
|
|
889
|
+
def _reset_tag_cache(self) -> None:
|
|
890
|
+
"""Reset per-run tag cache for structured result tags."""
|
|
891
|
+
self._tag_map: dict[str, CircuitTag] = {}
|
|
892
|
+
|
|
893
|
+
def _encode_tag(self, tag: CircuitTag | str) -> str:
|
|
894
|
+
"""Convert structured tags to backend-safe strings."""
|
|
895
|
+
if isinstance(tag, CircuitTag):
|
|
896
|
+
tag_str = format_circuit_tag(tag)
|
|
897
|
+
self._tag_map[tag_str] = tag
|
|
898
|
+
return tag_str
|
|
899
|
+
return str(tag)
|
|
900
|
+
|
|
901
|
+
def _decode_tags(
|
|
902
|
+
self, results: dict[str, dict[str, int]]
|
|
903
|
+
) -> dict[CircuitTag | str, dict[str, int]]:
|
|
904
|
+
"""Restore structured tags from backend result labels."""
|
|
905
|
+
if not self._tag_map:
|
|
906
|
+
return results
|
|
907
|
+
return {self._tag_map.get(tag, tag): shots for tag, shots in results.items()}
|
|
908
|
+
|
|
909
|
+
def _apply_qem_protocol(
|
|
910
|
+
self, exp_matrix: npt.NDArray[np.float64]
|
|
911
|
+
) -> list[npt.NDArray[np.float64]]:
|
|
912
|
+
"""Apply the configured QEM protocol to expectation value matrices."""
|
|
913
|
+
return [
|
|
914
|
+
self._qem_protocol.postprocess_results(exp_vals) for exp_vals in exp_matrix
|
|
915
|
+
]
|
|
916
|
+
|
|
917
|
+
def _compute_marginal_results(
|
|
918
|
+
self,
|
|
919
|
+
qem_groups: dict[int, list[dict[str, int]]],
|
|
920
|
+
measurement_groups: list[list[qml.operation.Operator]],
|
|
921
|
+
ham_ops: str | None,
|
|
922
|
+
) -> list[npt.NDArray[np.float64] | list[npt.NDArray[np.float64]]]:
|
|
923
|
+
"""Compute marginal results, handling backend modes and QEM."""
|
|
924
|
+
if self.backend.supports_expval:
|
|
925
|
+
if ham_ops is None:
|
|
926
|
+
raise ValueError(
|
|
927
|
+
"Hamiltonian operators (ham_ops) are required when using a backend "
|
|
928
|
+
"that supports expectation values, but were not provided."
|
|
929
|
+
)
|
|
930
|
+
ham_ops_list = ham_ops.split(";")
|
|
931
|
+
qem_group_values = [shots for _, shots in sorted(qem_groups.items())]
|
|
932
|
+
return [
|
|
933
|
+
self._apply_qem_protocol(
|
|
934
|
+
np.array(
|
|
935
|
+
[
|
|
936
|
+
[shot_dict[op] for op in ham_ops_list]
|
|
937
|
+
for shot_dict in shots_dicts
|
|
938
|
+
]
|
|
939
|
+
).T
|
|
940
|
+
)
|
|
941
|
+
for shots_dicts in qem_group_values
|
|
942
|
+
] or []
|
|
943
|
+
|
|
944
|
+
shots_by_qem_idx = zip(*qem_groups.values())
|
|
945
|
+
marginal_results: list[
|
|
946
|
+
npt.NDArray[np.float64] | list[npt.NDArray[np.float64]]
|
|
947
|
+
] = []
|
|
948
|
+
wire_order = tuple(reversed(self.cost_hamiltonian.wires))
|
|
949
|
+
for shots_dicts, curr_measurement_group in zip(
|
|
950
|
+
shots_by_qem_idx, measurement_groups
|
|
951
|
+
):
|
|
952
|
+
exp_matrix = _batched_expectation(
|
|
953
|
+
shots_dicts, curr_measurement_group, wire_order
|
|
954
|
+
)
|
|
955
|
+
mitigated = self._apply_qem_protocol(exp_matrix)
|
|
956
|
+
marginal_results.append(mitigated if len(mitigated) > 1 else mitigated[0])
|
|
957
|
+
|
|
958
|
+
return marginal_results
|
|
959
|
+
|
|
960
|
+
@staticmethod
|
|
961
|
+
def _merge_param_group_counts(
|
|
962
|
+
param_group: list[tuple[str, dict[str, int]]],
|
|
963
|
+
) -> dict[str, int]:
|
|
964
|
+
"""Merge shot histograms for a single parameter group."""
|
|
965
|
+
shots_dict: dict[str, int] = {}
|
|
966
|
+
for _, d in param_group:
|
|
967
|
+
for s, c in d.items():
|
|
968
|
+
shots_dict[s] = shots_dict.get(s, 0) + c
|
|
969
|
+
return shots_dict
|
|
970
|
+
|
|
971
|
+
def _post_process_results(
|
|
972
|
+
self, results: dict[str, dict[str, int]], **kwargs
|
|
973
|
+
) -> dict[int, float]:
|
|
974
|
+
"""
|
|
975
|
+
Post-process the results of the quantum problem.
|
|
976
|
+
|
|
977
|
+
Args:
|
|
978
|
+
results (dict[CircuitTag, dict[str, int]]): The shot histograms of the quantum execution
|
|
979
|
+
step. Keys are CircuitTag instances containing param, QEM, and measurement ids.
|
|
980
|
+
|
|
981
|
+
Returns:
|
|
982
|
+
dict[int, float]: The energies for each parameter set grouping, where the dict keys
|
|
983
|
+
correspond to the parameter indices.
|
|
984
|
+
"""
|
|
985
|
+
if self._is_compute_probabilities:
|
|
986
|
+
probs = convert_counts_to_probs(results, self.backend.shots)
|
|
987
|
+
return reverse_dict_endianness(probs)
|
|
988
|
+
|
|
989
|
+
if not (self._cancellation_event and self._cancellation_event.is_set()):
|
|
990
|
+
self.reporter.info(
|
|
991
|
+
message="Post-processing output", iteration=self.current_iteration
|
|
992
|
+
)
|
|
993
|
+
|
|
994
|
+
losses = {}
|
|
995
|
+
measurement_groups = self.meta_circuits["cost_circuit"].measurement_groups
|
|
996
|
+
|
|
997
|
+
for p, qem_groups in self._group_results(results).items():
|
|
998
|
+
marginal_results = self._compute_marginal_results(
|
|
999
|
+
qem_groups=qem_groups,
|
|
1000
|
+
measurement_groups=measurement_groups,
|
|
1001
|
+
ham_ops=kwargs.get("ham_ops"),
|
|
1002
|
+
)
|
|
1003
|
+
|
|
1004
|
+
pl_loss = (
|
|
1005
|
+
self.meta_circuits["cost_circuit"]
|
|
1006
|
+
.postprocessing_fn(marginal_results)
|
|
1007
|
+
.item()
|
|
1008
|
+
)
|
|
1009
|
+
|
|
1010
|
+
losses[p] = pl_loss + self.loss_constant
|
|
1011
|
+
|
|
1012
|
+
return losses
|
|
1013
|
+
|
|
1014
|
+
def _perform_final_computation(self, **kwargs) -> None:
|
|
1015
|
+
"""
|
|
1016
|
+
Perform final computations after optimization is complete.
|
|
1017
|
+
|
|
1018
|
+
This is an optional hook method that subclasses can override to perform
|
|
1019
|
+
any post-optimization processing, such as extracting solutions, running
|
|
1020
|
+
final measurements, or computing additional metrics.
|
|
1021
|
+
|
|
1022
|
+
Args:
|
|
1023
|
+
**kwargs: Additional keyword arguments for subclasses.
|
|
1024
|
+
|
|
1025
|
+
Note:
|
|
1026
|
+
The default implementation does nothing. Subclasses should override
|
|
1027
|
+
this method if they need post-optimization processing.
|
|
1028
|
+
"""
|
|
1029
|
+
pass
|
|
1030
|
+
|
|
1031
|
+
def run(
|
|
1032
|
+
self,
|
|
1033
|
+
perform_final_computation: bool = True,
|
|
1034
|
+
checkpoint_config: CheckpointConfig | None = None,
|
|
1035
|
+
**kwargs,
|
|
1036
|
+
) -> tuple[int, float]:
|
|
1037
|
+
"""Run the variational quantum algorithm.
|
|
1038
|
+
|
|
1039
|
+
The outputs are stored in the algorithm object.
|
|
1040
|
+
|
|
1041
|
+
Args:
|
|
1042
|
+
perform_final_computation (bool): Whether to perform final computation after optimization completes.
|
|
1043
|
+
Typically, this step involves sampling with the best found parameters to extract
|
|
1044
|
+
solution probability distributions. Set this to False in warm-starting or pre-training
|
|
1045
|
+
routines where the final sampling step is not needed. Defaults to True.
|
|
1046
|
+
checkpoint_config (CheckpointConfig | None): Checkpoint configuration.
|
|
1047
|
+
If None, no checkpointing is performed.
|
|
1048
|
+
**kwargs: Additional keyword arguments for subclasses.
|
|
1049
|
+
|
|
1050
|
+
Returns:
|
|
1051
|
+
tuple[int, float]: A tuple containing (total_circuit_count, total_run_time).
|
|
1052
|
+
"""
|
|
1053
|
+
# Initialize checkpointing
|
|
1054
|
+
if checkpoint_config is None:
|
|
1055
|
+
checkpoint_config = CheckpointConfig()
|
|
1056
|
+
|
|
1057
|
+
if checkpoint_config.checkpoint_dir:
|
|
1058
|
+
logger.info(
|
|
1059
|
+
f"Using checkpoint directory: {checkpoint_config.checkpoint_dir}"
|
|
1060
|
+
)
|
|
1061
|
+
|
|
1062
|
+
# Extract max_iterations from kwargs if present (for compatibility with subclasses)
|
|
1063
|
+
max_iterations = kwargs.pop("max_iterations", self.max_iterations)
|
|
1064
|
+
if max_iterations != self.max_iterations:
|
|
1065
|
+
self.max_iterations = max_iterations
|
|
1066
|
+
|
|
1067
|
+
# Warn if max_iterations is less than current_iteration (regardless of how it was set)
|
|
1068
|
+
if self.max_iterations < self.current_iteration:
|
|
1069
|
+
warn(
|
|
1070
|
+
f"max_iterations ({self.max_iterations}) is less than current_iteration "
|
|
1071
|
+
f"({self.current_iteration}). The optimization will not run additional "
|
|
1072
|
+
f"iterations since the maximum has already been reached.",
|
|
1073
|
+
UserWarning,
|
|
1074
|
+
)
|
|
1075
|
+
|
|
1076
|
+
def cost_fn(params):
|
|
1077
|
+
self.reporter.info(
|
|
1078
|
+
message="💸 Computing Cost 💸", iteration=self.current_iteration
|
|
1079
|
+
)
|
|
1080
|
+
|
|
1081
|
+
self._curr_params = np.atleast_2d(params)
|
|
1082
|
+
|
|
1083
|
+
losses = self._run_optimization_circuits(**kwargs)
|
|
1084
|
+
|
|
1085
|
+
losses = np.fromiter(losses.values(), dtype=np.float64)
|
|
1086
|
+
|
|
1087
|
+
if params.ndim > 1:
|
|
1088
|
+
return losses
|
|
1089
|
+
else:
|
|
1090
|
+
return losses.item()
|
|
1091
|
+
|
|
1092
|
+
self._grad_shift_mask = _compute_parameter_shift_mask(
|
|
1093
|
+
self.n_layers * self.n_params
|
|
1094
|
+
)
|
|
1095
|
+
|
|
1096
|
+
def grad_fn(params):
|
|
1097
|
+
self._grad_mode = True
|
|
1098
|
+
|
|
1099
|
+
self.reporter.info(
|
|
1100
|
+
message="📈 Computing Gradients 📈", iteration=self.current_iteration
|
|
1101
|
+
)
|
|
1102
|
+
|
|
1103
|
+
self._curr_params = self._grad_shift_mask + params
|
|
1104
|
+
|
|
1105
|
+
exp_vals = self._run_optimization_circuits(**kwargs)
|
|
1106
|
+
exp_vals_arr = np.fromiter(exp_vals.values(), dtype=np.float64)
|
|
1107
|
+
|
|
1108
|
+
pos_shifts = exp_vals_arr[::2]
|
|
1109
|
+
neg_shifts = exp_vals_arr[1::2]
|
|
1110
|
+
grads = 0.5 * (pos_shifts - neg_shifts)
|
|
1111
|
+
|
|
1112
|
+
self._grad_mode = False
|
|
1113
|
+
|
|
1114
|
+
return grads
|
|
1115
|
+
|
|
1116
|
+
def _iteration_counter(intermediate_result: OptimizeResult):
|
|
1117
|
+
|
|
1118
|
+
self._losses_history.append(
|
|
1119
|
+
dict(
|
|
1120
|
+
zip(
|
|
1121
|
+
[str(i) for i in range(len(intermediate_result.x))],
|
|
1122
|
+
intermediate_result.fun,
|
|
1123
|
+
)
|
|
1124
|
+
)
|
|
1125
|
+
)
|
|
1126
|
+
|
|
1127
|
+
current_loss = np.min(intermediate_result.fun)
|
|
1128
|
+
if current_loss < self._best_loss:
|
|
1129
|
+
self._best_loss = current_loss
|
|
1130
|
+
best_idx = np.argmin(intermediate_result.fun)
|
|
1131
|
+
self._best_params = intermediate_result.x[best_idx].copy()
|
|
1132
|
+
|
|
1133
|
+
self.current_iteration += 1
|
|
1134
|
+
|
|
1135
|
+
self.reporter.update(iteration=self.current_iteration)
|
|
1136
|
+
|
|
1137
|
+
# Checkpointing
|
|
1138
|
+
if checkpoint_config._should_checkpoint(self.current_iteration):
|
|
1139
|
+
self.save_state(checkpoint_config)
|
|
1140
|
+
|
|
1141
|
+
if self._cancellation_event and self._cancellation_event.is_set():
|
|
1142
|
+
raise _CancelledError("Cancellation requested by batch.")
|
|
1143
|
+
|
|
1144
|
+
# The scipy implementation of COBYLA interprets the `maxiter` option
|
|
1145
|
+
# as the maximum number of function evaluations, not iterations.
|
|
1146
|
+
# To provide a consistent user experience, we disable `scipy`'s
|
|
1147
|
+
# `maxiter` and manually stop the optimization from the callback
|
|
1148
|
+
# when the desired number of iterations is reached.
|
|
1149
|
+
if (
|
|
1150
|
+
isinstance(self.optimizer, ScipyOptimizer)
|
|
1151
|
+
and self.optimizer.method == ScipyMethod.COBYLA
|
|
1152
|
+
and intermediate_result.nit + 1 == self.max_iterations
|
|
1153
|
+
):
|
|
1154
|
+
raise StopIteration
|
|
1155
|
+
|
|
1156
|
+
self.reporter.info(message="Finished Setup")
|
|
1157
|
+
|
|
1158
|
+
if self._curr_params is None:
|
|
1159
|
+
self._initialize_params()
|
|
1160
|
+
else:
|
|
1161
|
+
self._validate_initial_params(self._curr_params)
|
|
1162
|
+
|
|
1163
|
+
try:
|
|
1164
|
+
self._minimize_res = self.optimizer.optimize(
|
|
1165
|
+
cost_fn=cost_fn,
|
|
1166
|
+
initial_params=self._curr_params,
|
|
1167
|
+
callback_fn=_iteration_counter,
|
|
1168
|
+
jac=grad_fn,
|
|
1169
|
+
max_iterations=self.max_iterations,
|
|
1170
|
+
rng=self._rng,
|
|
1171
|
+
)
|
|
1172
|
+
except _CancelledError:
|
|
1173
|
+
# The optimizer was stopped by our callback. This is not a real
|
|
1174
|
+
# error, just a signal to exit this task cleanly.
|
|
1175
|
+
return self._total_circuit_count, self._total_run_time
|
|
1176
|
+
|
|
1177
|
+
self._final_params = self._minimize_res.x
|
|
1178
|
+
|
|
1179
|
+
# Set _best_params from final result (source of truth)
|
|
1180
|
+
x = np.atleast_2d(self._minimize_res.x)
|
|
1181
|
+
fun = np.atleast_1d(self._minimize_res.fun)
|
|
1182
|
+
best_idx = np.argmin(fun)
|
|
1183
|
+
self._best_params = x[best_idx].copy()
|
|
1184
|
+
|
|
1185
|
+
if perform_final_computation:
|
|
1186
|
+
self._perform_final_computation(**kwargs)
|
|
1187
|
+
|
|
1188
|
+
self.reporter.info(message="Finished successfully!")
|
|
1189
|
+
|
|
1190
|
+
return self.total_circuit_count, self.total_run_time
|
|
1191
|
+
|
|
1192
|
+
def _run_solution_measurement(self) -> None:
|
|
1193
|
+
"""Execute measurement circuits to obtain probability distributions for solution extraction."""
|
|
1194
|
+
|
|
1195
|
+
if "meas_circuit" not in self.meta_circuits:
|
|
1196
|
+
raise NotImplementedError(
|
|
1197
|
+
f"{type(self).__name__} does not implement a 'meas_circuit'."
|
|
1198
|
+
)
|
|
1199
|
+
|
|
1200
|
+
self._is_compute_probabilities = True
|
|
1201
|
+
|
|
1202
|
+
# Compute probabilities for best parameters (the ones that achieved best loss)
|
|
1203
|
+
self._curr_params = np.atleast_2d(self._best_params)
|
|
1204
|
+
self._curr_circuits = self._generate_circuits()
|
|
1205
|
+
best_probs = self._dispatch_circuits_and_process_results()
|
|
1206
|
+
self._best_probs.update(best_probs)
|
|
1207
|
+
|
|
1208
|
+
self._is_compute_probabilities = False
|