qoro-divi 0.2.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- divi/__init__.py +8 -0
- divi/_pbar.py +73 -0
- divi/circuits.py +139 -0
- divi/exp/cirq/__init__.py +7 -0
- divi/exp/cirq/_lexer.py +126 -0
- divi/exp/cirq/_parser.py +889 -0
- divi/exp/cirq/_qasm_export.py +37 -0
- divi/exp/cirq/_qasm_import.py +35 -0
- divi/exp/cirq/exception.py +21 -0
- divi/exp/scipy/_cobyla.py +342 -0
- divi/exp/scipy/pyprima/LICENCE.txt +28 -0
- divi/exp/scipy/pyprima/__init__.py +263 -0
- divi/exp/scipy/pyprima/cobyla/__init__.py +0 -0
- divi/exp/scipy/pyprima/cobyla/cobyla.py +599 -0
- divi/exp/scipy/pyprima/cobyla/cobylb.py +849 -0
- divi/exp/scipy/pyprima/cobyla/geometry.py +240 -0
- divi/exp/scipy/pyprima/cobyla/initialize.py +269 -0
- divi/exp/scipy/pyprima/cobyla/trustregion.py +540 -0
- divi/exp/scipy/pyprima/cobyla/update.py +331 -0
- divi/exp/scipy/pyprima/common/__init__.py +0 -0
- divi/exp/scipy/pyprima/common/_bounds.py +41 -0
- divi/exp/scipy/pyprima/common/_linear_constraints.py +46 -0
- divi/exp/scipy/pyprima/common/_nonlinear_constraints.py +64 -0
- divi/exp/scipy/pyprima/common/_project.py +224 -0
- divi/exp/scipy/pyprima/common/checkbreak.py +107 -0
- divi/exp/scipy/pyprima/common/consts.py +48 -0
- divi/exp/scipy/pyprima/common/evaluate.py +101 -0
- divi/exp/scipy/pyprima/common/history.py +39 -0
- divi/exp/scipy/pyprima/common/infos.py +30 -0
- divi/exp/scipy/pyprima/common/linalg.py +452 -0
- divi/exp/scipy/pyprima/common/message.py +336 -0
- divi/exp/scipy/pyprima/common/powalg.py +131 -0
- divi/exp/scipy/pyprima/common/preproc.py +393 -0
- divi/exp/scipy/pyprima/common/present.py +5 -0
- divi/exp/scipy/pyprima/common/ratio.py +56 -0
- divi/exp/scipy/pyprima/common/redrho.py +49 -0
- divi/exp/scipy/pyprima/common/selectx.py +346 -0
- divi/interfaces.py +25 -0
- divi/parallel_simulator.py +258 -0
- divi/qasm.py +220 -0
- divi/qem.py +191 -0
- divi/qlogger.py +119 -0
- divi/qoro_service.py +343 -0
- divi/qprog/__init__.py +13 -0
- divi/qprog/_graph_partitioning.py +619 -0
- divi/qprog/_mlae.py +182 -0
- divi/qprog/_qaoa.py +440 -0
- divi/qprog/_vqe.py +275 -0
- divi/qprog/_vqe_sweep.py +144 -0
- divi/qprog/batch.py +235 -0
- divi/qprog/optimizers.py +75 -0
- divi/qprog/quantum_program.py +493 -0
- divi/utils.py +116 -0
- qoro_divi-0.2.0b1.dist-info/LICENSE +190 -0
- qoro_divi-0.2.0b1.dist-info/LICENSES/Apache-2.0.txt +73 -0
- qoro_divi-0.2.0b1.dist-info/METADATA +57 -0
- qoro_divi-0.2.0b1.dist-info/RECORD +58 -0
- qoro_divi-0.2.0b1.dist-info/WHEEL +4 -0
divi/qprog/optimizers.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: 2025 Qoro Quantum Ltd <divi@qoroquantum.de>
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from enum import Enum
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class Optimizer(Enum):
|
|
11
|
+
NELDER_MEAD = "Nelder-Mead"
|
|
12
|
+
COBYLA = "COBYLA"
|
|
13
|
+
MONTE_CARLO = "Monte Carlo"
|
|
14
|
+
L_BFGS_B = "L-BFGS-B"
|
|
15
|
+
|
|
16
|
+
def describe(self):
|
|
17
|
+
return self.name, self.value
|
|
18
|
+
|
|
19
|
+
@property
|
|
20
|
+
def n_param_sets(self):
|
|
21
|
+
if self in (Optimizer.NELDER_MEAD, Optimizer.L_BFGS_B, Optimizer.COBYLA):
|
|
22
|
+
return 1
|
|
23
|
+
elif self == Optimizer.MONTE_CARLO:
|
|
24
|
+
return 10
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def n_samples(self):
|
|
28
|
+
if self == Optimizer.MONTE_CARLO:
|
|
29
|
+
return 10
|
|
30
|
+
return 1
|
|
31
|
+
|
|
32
|
+
def compute_new_parameters(self, params, iteration, **kwargs):
|
|
33
|
+
if self != Optimizer.MONTE_CARLO:
|
|
34
|
+
raise NotImplementedError
|
|
35
|
+
|
|
36
|
+
rng = kwargs.pop("rng", np.random.default_rng())
|
|
37
|
+
|
|
38
|
+
losses = kwargs.pop("losses")
|
|
39
|
+
smallest_energy_keys = sorted(losses, key=lambda k: losses[k])[: self.n_samples]
|
|
40
|
+
|
|
41
|
+
new_params = []
|
|
42
|
+
|
|
43
|
+
for key in smallest_energy_keys:
|
|
44
|
+
new_param_set = [
|
|
45
|
+
rng.normal(
|
|
46
|
+
params[int(key)],
|
|
47
|
+
1 / (2 * iteration),
|
|
48
|
+
size=params[int(key)].shape,
|
|
49
|
+
)
|
|
50
|
+
for _ in range(self.n_param_sets)
|
|
51
|
+
]
|
|
52
|
+
|
|
53
|
+
for new_param in new_param_set:
|
|
54
|
+
new_param = np.clip(new_param, 0, 2 * np.pi)
|
|
55
|
+
|
|
56
|
+
new_params.extend(new_param_set)
|
|
57
|
+
|
|
58
|
+
return np.array(new_params)
|
|
59
|
+
|
|
60
|
+
def compute_parameter_shift_mask(self, n_params):
|
|
61
|
+
if self != Optimizer.L_BFGS_B:
|
|
62
|
+
raise NotImplementedError
|
|
63
|
+
|
|
64
|
+
mask_arr = np.arange(0, 2 * n_params, 2)
|
|
65
|
+
mask_arr[0] = 1
|
|
66
|
+
|
|
67
|
+
binary_matrix = (
|
|
68
|
+
(mask_arr[:, np.newaxis] & (1 << np.arange(n_params))) > 0
|
|
69
|
+
).astype(np.float64)
|
|
70
|
+
|
|
71
|
+
binary_matrix = binary_matrix.repeat(2, axis=0)
|
|
72
|
+
binary_matrix[1::2] *= -1
|
|
73
|
+
binary_matrix *= 0.5 * np.pi
|
|
74
|
+
|
|
75
|
+
return binary_matrix
|
|
@@ -0,0 +1,493 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: 2025 Qoro Quantum Ltd <divi@qoroquantum.de>
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import pickle
|
|
7
|
+
from abc import ABC, abstractmethod
|
|
8
|
+
from functools import partial
|
|
9
|
+
from queue import Queue
|
|
10
|
+
from typing import Optional
|
|
11
|
+
|
|
12
|
+
import numpy as np
|
|
13
|
+
from qiskit.result import marginal_counts, sampled_expectation_value
|
|
14
|
+
from scipy.optimize import OptimizeResult, minimize
|
|
15
|
+
|
|
16
|
+
from divi import QoroService
|
|
17
|
+
from divi.circuits import Circuit, MetaCircuit
|
|
18
|
+
from divi.exp.scipy._cobyla import _minimize_cobyla as cobyla_fn
|
|
19
|
+
from divi.interfaces import CircuitRunner
|
|
20
|
+
from divi.qem import _NoMitigation
|
|
21
|
+
from divi.qoro_service import JobStatus
|
|
22
|
+
from divi.qprog.optimizers import Optimizer
|
|
23
|
+
|
|
24
|
+
logger = logging.getLogger(__name__)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class QuantumProgram(ABC):
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
backend: CircuitRunner,
|
|
31
|
+
seed: Optional[int] = None,
|
|
32
|
+
progress_queue: Optional[Queue] = None,
|
|
33
|
+
**kwargs,
|
|
34
|
+
):
|
|
35
|
+
"""
|
|
36
|
+
Initializes the QuantumProgram class.
|
|
37
|
+
|
|
38
|
+
If a child class represents a hybrid quantum-classical algorithm,
|
|
39
|
+
the instance variables `n_layers` and `n_params` must be set, where:
|
|
40
|
+
- `n_layers` is the number of layers in the quantum circuit.
|
|
41
|
+
- `n_params` is the number of parameters per layer.
|
|
42
|
+
|
|
43
|
+
For exotic algorithms where these variables may not be applicable,
|
|
44
|
+
the `_initialize_params` method should be overridden to set the parameters.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
backend (CircuitRunner): An instance of a CircuitRunner object, which
|
|
48
|
+
can either be ParallelSimulator or QoroService.
|
|
49
|
+
seed (int): A seed for numpy's random number generator, which will
|
|
50
|
+
be used for the parameter initialization.
|
|
51
|
+
Defaults to None.
|
|
52
|
+
progress_queue (Queue): a queue for progress bar updates.
|
|
53
|
+
|
|
54
|
+
**kwargs: Additional keyword arguments that influence behaviour.
|
|
55
|
+
- grouping_strategy (Optional[Any]): A strategy for grouping operations, used in Pennylane's transforms.
|
|
56
|
+
Defaults to None.
|
|
57
|
+
- qem_protocol (Optional[QEMProtocol]): the quantum error mitigation protocol to apply.
|
|
58
|
+
Must be of type QEMProtocol. Defaults to None.
|
|
59
|
+
|
|
60
|
+
The following key values are reserved for internal use and should not be set by the user:
|
|
61
|
+
- losses (Optional[list]): A list to initialize the `losses` attribute. Defaults to an empty list.
|
|
62
|
+
- final_params (Optional[list]): A list to initialize the `final_params` attribute. Defaults to an empty list.
|
|
63
|
+
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
# Shared Variables
|
|
67
|
+
self.losses = kwargs.pop("losses", [])
|
|
68
|
+
self.final_params = kwargs.pop("final_params", [])
|
|
69
|
+
|
|
70
|
+
self.circuits: list[Circuit] = []
|
|
71
|
+
|
|
72
|
+
self._total_circuit_count = 0
|
|
73
|
+
self._total_run_time = 0.0
|
|
74
|
+
self._curr_params = []
|
|
75
|
+
|
|
76
|
+
self._seed = seed
|
|
77
|
+
self._rng = np.random.default_rng(self._seed)
|
|
78
|
+
|
|
79
|
+
# Lets child classes adapt their optimization
|
|
80
|
+
# step for grad calculation routine
|
|
81
|
+
self._grad_mode = False
|
|
82
|
+
|
|
83
|
+
self.backend = backend
|
|
84
|
+
self.job_id = kwargs.get("job_id", None)
|
|
85
|
+
|
|
86
|
+
self._progress_queue = progress_queue
|
|
87
|
+
|
|
88
|
+
# Needed for Pennylane's transforms
|
|
89
|
+
self._grouping_strategy = kwargs.pop("grouping_strategy", None)
|
|
90
|
+
|
|
91
|
+
self._qem_protocol = kwargs.pop("qem_protocol", None) or _NoMitigation()
|
|
92
|
+
|
|
93
|
+
self._meta_circuit_factory = partial(
|
|
94
|
+
MetaCircuit,
|
|
95
|
+
grouping_strategy=self._grouping_strategy,
|
|
96
|
+
qem_protocol=self._qem_protocol,
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
@property
|
|
100
|
+
def total_circuit_count(self):
|
|
101
|
+
return self._total_circuit_count
|
|
102
|
+
|
|
103
|
+
@property
|
|
104
|
+
def total_run_time(self):
|
|
105
|
+
return self._total_run_time
|
|
106
|
+
|
|
107
|
+
@property
|
|
108
|
+
def meta_circuits(self):
|
|
109
|
+
return self._meta_circuits
|
|
110
|
+
|
|
111
|
+
@abstractmethod
|
|
112
|
+
def _create_meta_circuits_dict(self) -> dict[str, MetaCircuit]:
|
|
113
|
+
pass
|
|
114
|
+
|
|
115
|
+
@abstractmethod
|
|
116
|
+
def _generate_circuits(self, **kwargs):
|
|
117
|
+
pass
|
|
118
|
+
|
|
119
|
+
def _initialize_params(self):
|
|
120
|
+
self._curr_params = np.array(
|
|
121
|
+
[
|
|
122
|
+
self._rng.uniform(0, 2 * np.pi, self.n_layers * self.n_params)
|
|
123
|
+
for _ in range(self.optimizer.n_param_sets)
|
|
124
|
+
]
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
def _run_optimization_circuits(self, store_data, data_file):
|
|
128
|
+
self.circuits[:] = []
|
|
129
|
+
|
|
130
|
+
self._generate_circuits()
|
|
131
|
+
|
|
132
|
+
losses = self._dispatch_circuits_and_process_results(
|
|
133
|
+
store_data=store_data, data_file=data_file
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
return losses
|
|
137
|
+
|
|
138
|
+
def _update_mc_params(self):
|
|
139
|
+
"""
|
|
140
|
+
Updates the parameters based on previous MC iteration.
|
|
141
|
+
"""
|
|
142
|
+
|
|
143
|
+
if self.current_iteration == 0:
|
|
144
|
+
self._initialize_params()
|
|
145
|
+
|
|
146
|
+
self.current_iteration += 1
|
|
147
|
+
|
|
148
|
+
return
|
|
149
|
+
|
|
150
|
+
self._curr_params = self.optimizer.compute_new_parameters(
|
|
151
|
+
self._curr_params,
|
|
152
|
+
self.current_iteration,
|
|
153
|
+
losses=self.losses[-1],
|
|
154
|
+
rng=self._rng,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
self.current_iteration += 1
|
|
158
|
+
|
|
159
|
+
def _prepare_and_send_circuits(self):
|
|
160
|
+
job_circuits = {}
|
|
161
|
+
|
|
162
|
+
for circuit in self.circuits:
|
|
163
|
+
for tag, qasm_circuit in zip(circuit.tags, circuit.qasm_circuits):
|
|
164
|
+
job_circuits[tag] = qasm_circuit
|
|
165
|
+
|
|
166
|
+
self._total_circuit_count += len(job_circuits)
|
|
167
|
+
|
|
168
|
+
backend_output = self.backend.submit_circuits(job_circuits)
|
|
169
|
+
|
|
170
|
+
if isinstance(self.backend, QoroService):
|
|
171
|
+
self._curr_service_job_id = backend_output
|
|
172
|
+
|
|
173
|
+
return backend_output
|
|
174
|
+
|
|
175
|
+
def _dispatch_circuits_and_process_results(self, store_data=False, data_file=None):
|
|
176
|
+
"""
|
|
177
|
+
Run an iteration of the program. The outputs are stored in the Program object.
|
|
178
|
+
Optionally, the data can be stored in a file.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
store_data (bool): Whether to store the data for the iteration
|
|
182
|
+
data_file (str): The file to store the data in
|
|
183
|
+
"""
|
|
184
|
+
|
|
185
|
+
results = self._prepare_and_send_circuits()
|
|
186
|
+
|
|
187
|
+
def add_run_time(response):
|
|
188
|
+
if isinstance(response, dict):
|
|
189
|
+
self._total_run_time += float(response["run_time"])
|
|
190
|
+
elif isinstance(response, list):
|
|
191
|
+
self._total_run_time += sum(float(r["run_time"]) for r in response)
|
|
192
|
+
|
|
193
|
+
if isinstance(self.backend, QoroService):
|
|
194
|
+
status = self.backend.poll_job_status(
|
|
195
|
+
self._curr_service_job_id,
|
|
196
|
+
loop_until_complete=True,
|
|
197
|
+
on_complete=add_run_time,
|
|
198
|
+
**(
|
|
199
|
+
{
|
|
200
|
+
"pbar_update_fn": lambda n_polls: self._progress_queue.put(
|
|
201
|
+
{
|
|
202
|
+
"job_id": self.job_id,
|
|
203
|
+
"progress": 0,
|
|
204
|
+
"poll_attempt": n_polls,
|
|
205
|
+
}
|
|
206
|
+
)
|
|
207
|
+
}
|
|
208
|
+
if self._progress_queue is not None
|
|
209
|
+
else {}
|
|
210
|
+
),
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
if status != JobStatus.COMPLETED:
|
|
214
|
+
raise Exception(
|
|
215
|
+
"Job has not completed yet, cannot post-process results"
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
results = self.backend.get_job_results(self._curr_service_job_id)
|
|
219
|
+
|
|
220
|
+
results = {r["label"]: r["results"] for r in results}
|
|
221
|
+
|
|
222
|
+
result = self._post_process_results(results)
|
|
223
|
+
|
|
224
|
+
if store_data:
|
|
225
|
+
self.save_iteration(data_file)
|
|
226
|
+
|
|
227
|
+
return result
|
|
228
|
+
|
|
229
|
+
def _post_process_results(
|
|
230
|
+
self, results: dict[str, dict[str, int]]
|
|
231
|
+
) -> dict[int, float]:
|
|
232
|
+
"""
|
|
233
|
+
Post-process the results of the quantum problem.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
results (dict): The shot histograms of the quantum execution step.
|
|
237
|
+
The keys should be strings of format {param_id}_*_{measurement_group_id}.
|
|
238
|
+
i.e. An underscore-separated bunch of metadata, starting always with
|
|
239
|
+
the index of some parameter and ending with the index of some measurement group.
|
|
240
|
+
Any extra piece of metadata that might be relevant to the specific application can
|
|
241
|
+
be kept in the middle.
|
|
242
|
+
|
|
243
|
+
Returns:
|
|
244
|
+
(dict) The energies for each parameter set grouping, where the dict keys
|
|
245
|
+
correspond to the parameter indices.
|
|
246
|
+
"""
|
|
247
|
+
|
|
248
|
+
losses = {}
|
|
249
|
+
measurement_groups = self._meta_circuits["cost_circuit"].measurement_groups
|
|
250
|
+
|
|
251
|
+
for p in range(self._curr_params.shape[0]):
|
|
252
|
+
# Extract relevant entries from the execution results dict
|
|
253
|
+
param_results = {k: v for k, v in results.items() if k.startswith(f"{p}_")}
|
|
254
|
+
|
|
255
|
+
# Compute the marginal results for each observable
|
|
256
|
+
marginal_results = []
|
|
257
|
+
for group_idx, curr_measurement_group in enumerate(measurement_groups):
|
|
258
|
+
group_results = {
|
|
259
|
+
k: v
|
|
260
|
+
for k, v in param_results.items()
|
|
261
|
+
if k.endswith(f"_{group_idx}")
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
curr_marginal_results = []
|
|
265
|
+
for observable in curr_measurement_group:
|
|
266
|
+
intermediate_exp_values = [
|
|
267
|
+
sampled_expectation_value(
|
|
268
|
+
marginal_counts(shots_dict, observable.wires.tolist()),
|
|
269
|
+
"Z" * len(observable.wires),
|
|
270
|
+
)
|
|
271
|
+
for shots_dict in group_results.values()
|
|
272
|
+
]
|
|
273
|
+
|
|
274
|
+
mitigated_exp_value = self._qem_protocol.postprocess_results(
|
|
275
|
+
intermediate_exp_values
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
curr_marginal_results.append(mitigated_exp_value)
|
|
279
|
+
|
|
280
|
+
marginal_results.append(
|
|
281
|
+
curr_marginal_results
|
|
282
|
+
if len(curr_marginal_results) > 1
|
|
283
|
+
else curr_marginal_results[0]
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
pl_loss = (
|
|
287
|
+
self._meta_circuits["cost_circuit"]
|
|
288
|
+
.postprocessing_fn(marginal_results)[0]
|
|
289
|
+
.item()
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
losses[p] = pl_loss + self.loss_constant
|
|
293
|
+
|
|
294
|
+
return losses
|
|
295
|
+
|
|
296
|
+
def run(self, store_data=False, data_file=None):
|
|
297
|
+
"""
|
|
298
|
+
Run the QAOA problem. The outputs are stored in the QAOA object. Optionally, the data can be stored in a file.
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
store_data (bool): Whether to store the data for the iteration
|
|
302
|
+
data_file (str): The file to store the data in
|
|
303
|
+
"""
|
|
304
|
+
|
|
305
|
+
if self._progress_queue is not None:
|
|
306
|
+
self._progress_queue.put(
|
|
307
|
+
{
|
|
308
|
+
"job_id": self.job_id,
|
|
309
|
+
"message": "Finished Setup",
|
|
310
|
+
"progress": 0,
|
|
311
|
+
}
|
|
312
|
+
)
|
|
313
|
+
else:
|
|
314
|
+
logger.info("Finished Setup")
|
|
315
|
+
|
|
316
|
+
if self.optimizer == Optimizer.MONTE_CARLO:
|
|
317
|
+
while self.current_iteration < self.max_iterations:
|
|
318
|
+
|
|
319
|
+
self._update_mc_params()
|
|
320
|
+
|
|
321
|
+
if self._progress_queue is not None:
|
|
322
|
+
self._progress_queue.put(
|
|
323
|
+
{
|
|
324
|
+
"job_id": self.job_id,
|
|
325
|
+
"message": f"⛰️ Sampling from Loss Lansdscape ⛰️",
|
|
326
|
+
"progress": 0,
|
|
327
|
+
}
|
|
328
|
+
)
|
|
329
|
+
else:
|
|
330
|
+
logger.info(
|
|
331
|
+
f"Running Iteration #{self.current_iteration} circuits\r"
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
curr_losses = self._run_optimization_circuits(store_data, data_file)
|
|
335
|
+
|
|
336
|
+
if self._progress_queue is not None:
|
|
337
|
+
self._progress_queue.put(
|
|
338
|
+
{
|
|
339
|
+
"job_id": self.job_id,
|
|
340
|
+
"progress": 1,
|
|
341
|
+
}
|
|
342
|
+
)
|
|
343
|
+
else:
|
|
344
|
+
logger.info(f"Finished Iteration #{self.current_iteration}\r\n")
|
|
345
|
+
|
|
346
|
+
self.losses.append(curr_losses)
|
|
347
|
+
|
|
348
|
+
self.final_params[:] = np.atleast_2d(self._curr_params)
|
|
349
|
+
|
|
350
|
+
elif self.optimizer in (
|
|
351
|
+
Optimizer.NELDER_MEAD,
|
|
352
|
+
Optimizer.L_BFGS_B,
|
|
353
|
+
Optimizer.COBYLA,
|
|
354
|
+
):
|
|
355
|
+
|
|
356
|
+
def cost_fn(params):
|
|
357
|
+
task_name = "💸 Computing Cost 💸"
|
|
358
|
+
|
|
359
|
+
if self._progress_queue is not None:
|
|
360
|
+
self._progress_queue.put(
|
|
361
|
+
{
|
|
362
|
+
"job_id": self.job_id,
|
|
363
|
+
"message": task_name,
|
|
364
|
+
"progress": 0,
|
|
365
|
+
}
|
|
366
|
+
)
|
|
367
|
+
else:
|
|
368
|
+
logger.info(
|
|
369
|
+
f"Running Iteration #{self.current_iteration + 1} circuits: {task_name}\r"
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
self._curr_params = np.atleast_2d(params)
|
|
373
|
+
|
|
374
|
+
losses = self._run_optimization_circuits(store_data, data_file)
|
|
375
|
+
|
|
376
|
+
return losses[0]
|
|
377
|
+
|
|
378
|
+
def grad_fn(params):
|
|
379
|
+
self._grad_mode = True
|
|
380
|
+
|
|
381
|
+
task_name = "📈 Computing Gradients 📈"
|
|
382
|
+
|
|
383
|
+
if self._progress_queue is not None:
|
|
384
|
+
self._progress_queue.put(
|
|
385
|
+
{
|
|
386
|
+
"job_id": self.job_id,
|
|
387
|
+
"message": task_name,
|
|
388
|
+
"progress": 0,
|
|
389
|
+
}
|
|
390
|
+
)
|
|
391
|
+
else:
|
|
392
|
+
logger.info(
|
|
393
|
+
f"Running Iteration #{self.current_iteration + 1} circuits: {task_name}\r"
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
shift_mask = self.optimizer.compute_parameter_shift_mask(len(params))
|
|
397
|
+
|
|
398
|
+
self._curr_params = shift_mask + params
|
|
399
|
+
|
|
400
|
+
exp_vals = self._run_optimization_circuits(store_data, data_file)
|
|
401
|
+
|
|
402
|
+
grads = np.zeros_like(params)
|
|
403
|
+
for i in range(len(params)):
|
|
404
|
+
grads[i] = 0.5 * (exp_vals[2 * i] - exp_vals[2 * i + 1])
|
|
405
|
+
|
|
406
|
+
self._grad_mode = False
|
|
407
|
+
|
|
408
|
+
return grads
|
|
409
|
+
|
|
410
|
+
def _iteration_counter(intermediate_result: OptimizeResult):
|
|
411
|
+
self.losses.append({0: intermediate_result.fun})
|
|
412
|
+
|
|
413
|
+
self.final_params[:] = np.atleast_2d(intermediate_result.x)
|
|
414
|
+
|
|
415
|
+
self.current_iteration += 1
|
|
416
|
+
|
|
417
|
+
if self._progress_queue is not None:
|
|
418
|
+
self._progress_queue.put(
|
|
419
|
+
{
|
|
420
|
+
"job_id": self.job_id,
|
|
421
|
+
"progress": 1,
|
|
422
|
+
}
|
|
423
|
+
)
|
|
424
|
+
else:
|
|
425
|
+
logger.info(f"Finished Iteration #{self.current_iteration}\r\n")
|
|
426
|
+
|
|
427
|
+
if (
|
|
428
|
+
self.optimizer == Optimizer.COBYLA
|
|
429
|
+
and intermediate_result.nit + 1 == self.max_iterations
|
|
430
|
+
):
|
|
431
|
+
raise StopIteration
|
|
432
|
+
|
|
433
|
+
if self.max_iterations is None or self.optimizer == Optimizer.COBYLA:
|
|
434
|
+
# COBYLA perceive maxiter as maxfev so we need
|
|
435
|
+
# to use the callback fn for counting instead.
|
|
436
|
+
maxiter = None
|
|
437
|
+
else:
|
|
438
|
+
# Need to add one more iteration for Nelder-Mead's simplex initialization step
|
|
439
|
+
maxiter = (
|
|
440
|
+
self.max_iterations + 1
|
|
441
|
+
if self.optimizer == Optimizer.NELDER_MEAD
|
|
442
|
+
else self.max_iterations
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
self._initialize_params()
|
|
446
|
+
self._minimize_res = minimize(
|
|
447
|
+
fun=cost_fn,
|
|
448
|
+
x0=self._curr_params[0],
|
|
449
|
+
method=(
|
|
450
|
+
cobyla_fn
|
|
451
|
+
if self.optimizer == Optimizer.COBYLA
|
|
452
|
+
else self.optimizer.value
|
|
453
|
+
),
|
|
454
|
+
jac=grad_fn if self.optimizer == Optimizer.L_BFGS_B else None,
|
|
455
|
+
callback=_iteration_counter,
|
|
456
|
+
options={"maxiter": maxiter},
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
if self._progress_queue:
|
|
460
|
+
self._progress_queue.put(
|
|
461
|
+
{
|
|
462
|
+
"job_id": self.job_id,
|
|
463
|
+
"progress": 0,
|
|
464
|
+
"final_status": "Success",
|
|
465
|
+
}
|
|
466
|
+
)
|
|
467
|
+
else:
|
|
468
|
+
logger.info(f"Finished Optimization!")
|
|
469
|
+
|
|
470
|
+
return self._total_circuit_count, self._total_run_time
|
|
471
|
+
|
|
472
|
+
def save_iteration(self, data_file):
|
|
473
|
+
"""
|
|
474
|
+
Save the current iteration of the program to a file.
|
|
475
|
+
|
|
476
|
+
Args:
|
|
477
|
+
data_file (str): The file to save the iteration to.
|
|
478
|
+
"""
|
|
479
|
+
|
|
480
|
+
with open(data_file, "wb") as f:
|
|
481
|
+
pickle.dump(self, f)
|
|
482
|
+
|
|
483
|
+
@staticmethod
|
|
484
|
+
def import_iteration(data_file):
|
|
485
|
+
"""
|
|
486
|
+
Import an iteration of the program from a file.
|
|
487
|
+
|
|
488
|
+
Args:
|
|
489
|
+
data_file (str): The file to import the iteration from.
|
|
490
|
+
"""
|
|
491
|
+
|
|
492
|
+
with open(data_file, "rb") as f:
|
|
493
|
+
return pickle.load(f)
|
divi/utils.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: 2025 Qoro Quantum Ltd <divi@qoroquantum.de>
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from functools import reduce
|
|
6
|
+
from warnings import warn
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
import pennylane as qml
|
|
10
|
+
import scipy.sparse as sps
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _is_sanitized(
|
|
14
|
+
qubo_matrix: np.ndarray | sps.spmatrix,
|
|
15
|
+
) -> np.ndarray | sps.spmatrix:
|
|
16
|
+
# Sanitize the QUBO matrix to ensure it is either symmetric or upper triangular.
|
|
17
|
+
|
|
18
|
+
is_sparse = sps.issparse(qubo_matrix)
|
|
19
|
+
|
|
20
|
+
return (
|
|
21
|
+
(
|
|
22
|
+
((qubo_matrix != qubo_matrix.T).nnz == 0)
|
|
23
|
+
or ((qubo_matrix != sps.triu(qubo_matrix)).nnz == 0)
|
|
24
|
+
)
|
|
25
|
+
if is_sparse
|
|
26
|
+
else (
|
|
27
|
+
np.allclose(qubo_matrix, qubo_matrix.T)
|
|
28
|
+
or np.allclose(qubo_matrix, np.triu(qubo_matrix))
|
|
29
|
+
)
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def convert_qubo_matrix_to_pennylane_ising(
|
|
34
|
+
qubo_matrix: np.ndarray | sps.spmatrix,
|
|
35
|
+
) -> tuple[qml.operation.Operator, float]:
|
|
36
|
+
"""Convert QUBO matrix to Ising Hamiltonian in Pennylane.
|
|
37
|
+
|
|
38
|
+
The conversion follows the mapping:
|
|
39
|
+
- QUBO variables x_i ∈ {0,1} map to Ising variables s_i ∈ {-1,1} via s_i = 2x_i - 1
|
|
40
|
+
- This transforms a QUBO problem into an equivalent Ising problem
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
qubo_matrix: The QUBO matrix Q where the objective is to minimize x^T Q x
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
A tuple of (Ising Hamiltonian as a PennyLane operator, constant term)
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
if not _is_sanitized(qubo_matrix):
|
|
50
|
+
warn(
|
|
51
|
+
"The QUBO matrix is neither symmetric nor upper triangular."
|
|
52
|
+
" Symmetrizing it for the Ising Hamiltonian creation."
|
|
53
|
+
)
|
|
54
|
+
qubo_matrix = (qubo_matrix + qubo_matrix.T) / 2
|
|
55
|
+
|
|
56
|
+
is_sparse = sps.issparse(qubo_matrix)
|
|
57
|
+
backend = sps if is_sparse else np
|
|
58
|
+
|
|
59
|
+
# Gather non-zero indices in the upper triangle of the matrix
|
|
60
|
+
triu_matrix = backend.triu(
|
|
61
|
+
qubo_matrix,
|
|
62
|
+
**(
|
|
63
|
+
{"format": qubo_matrix.format if qubo_matrix.format != "coo" else "csc"}
|
|
64
|
+
if is_sparse
|
|
65
|
+
else {}
|
|
66
|
+
),
|
|
67
|
+
)
|
|
68
|
+
rows, cols = triu_matrix.nonzero()
|
|
69
|
+
values = triu_matrix[rows, cols].A1 if is_sparse else triu_matrix[rows, cols]
|
|
70
|
+
|
|
71
|
+
n = qubo_matrix.shape[0]
|
|
72
|
+
linear_terms = np.zeros(n)
|
|
73
|
+
constant_term = 0.0
|
|
74
|
+
ising_terms = []
|
|
75
|
+
ising_weights = []
|
|
76
|
+
|
|
77
|
+
for i, j, weight in zip(rows, cols, values):
|
|
78
|
+
weight = float(weight)
|
|
79
|
+
i, j = int(i), int(j)
|
|
80
|
+
|
|
81
|
+
if i == j:
|
|
82
|
+
# Diagonal elements
|
|
83
|
+
linear_terms[i] -= weight / 2
|
|
84
|
+
constant_term += weight / 2
|
|
85
|
+
else:
|
|
86
|
+
# Off-diagonal elements (i < j since we're using triu)
|
|
87
|
+
ising_terms.append([i, j])
|
|
88
|
+
ising_weights.append(weight / 4)
|
|
89
|
+
|
|
90
|
+
# Update linear terms
|
|
91
|
+
linear_terms[i] -= weight / 4
|
|
92
|
+
linear_terms[j] -= weight / 4
|
|
93
|
+
|
|
94
|
+
# Update constant term
|
|
95
|
+
constant_term += weight / 4
|
|
96
|
+
|
|
97
|
+
# Add the linear terms (Z operators)
|
|
98
|
+
for i, curr_lin_term in filter(lambda x: x[1] != 0, enumerate(linear_terms)):
|
|
99
|
+
ising_terms.append([i])
|
|
100
|
+
ising_weights.append(float(curr_lin_term))
|
|
101
|
+
|
|
102
|
+
# Construct the Ising Hamiltonian as a PennyLane operator
|
|
103
|
+
pauli_string = qml.Identity(0) * 0
|
|
104
|
+
for term, weight in zip(ising_terms, ising_weights):
|
|
105
|
+
if len(term) == 1:
|
|
106
|
+
# Single-qubit term (Z operator)
|
|
107
|
+
curr_term = qml.Z(term[0]) * weight
|
|
108
|
+
else:
|
|
109
|
+
# Two-qubit term (ZZ interaction)
|
|
110
|
+
curr_term = (
|
|
111
|
+
reduce(lambda x, y: x @ y, map(lambda x: qml.Z(x), term)) * weight
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
pauli_string += curr_term
|
|
115
|
+
|
|
116
|
+
return pauli_string.simplify(), constant_term
|