qoro-divi 0.3.3__py3-none-any.whl → 0.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of qoro-divi might be problematic. Click here for more details.
- divi/__init__.py +1 -2
- divi/backends/__init__.py +7 -0
- divi/backends/_circuit_runner.py +46 -0
- divi/{parallel_simulator.py → backends/_parallel_simulator.py} +136 -53
- divi/backends/_qoro_service.py +531 -0
- divi/circuits/__init__.py +5 -0
- divi/circuits/_core.py +226 -0
- divi/{qasm.py → circuits/qasm.py} +21 -2
- divi/{exp → extern}/cirq/_validator.py +9 -7
- divi/qprog/__init__.py +18 -5
- divi/qprog/algorithms/__init__.py +14 -0
- divi/qprog/algorithms/_ansatze.py +311 -0
- divi/qprog/{_qaoa.py → algorithms/_qaoa.py} +69 -41
- divi/qprog/{_vqe.py → algorithms/_vqe.py} +79 -135
- divi/qprog/batch.py +239 -55
- divi/qprog/exceptions.py +9 -0
- divi/qprog/optimizers.py +219 -18
- divi/qprog/quantum_program.py +389 -57
- divi/qprog/workflows/__init__.py +10 -0
- divi/qprog/{_graph_partitioning.py → workflows/_graph_partitioning.py} +3 -34
- divi/qprog/{_qubo_partitioning.py → workflows/_qubo_partitioning.py} +42 -25
- divi/qprog/{_vqe_sweep.py → workflows/_vqe_sweep.py} +59 -26
- divi/reporting/__init__.py +7 -0
- divi/reporting/_pbar.py +112 -0
- divi/{qlogger.py → reporting/_qlogger.py} +37 -2
- divi/{reporter.py → reporting/_reporter.py} +8 -14
- divi/utils.py +49 -10
- {qoro_divi-0.3.3.dist-info → qoro_divi-0.3.5.dist-info}/METADATA +2 -1
- qoro_divi-0.3.5.dist-info/RECORD +69 -0
- divi/_pbar.py +0 -70
- divi/circuits.py +0 -139
- divi/interfaces.py +0 -25
- divi/qoro_service.py +0 -425
- qoro_divi-0.3.3.dist-info/RECORD +0 -62
- /divi/{qpu_system.py → backends/_qpu_system.py} +0 -0
- /divi/{qem.py → circuits/qem.py} +0 -0
- /divi/{exp → extern}/cirq/__init__.py +0 -0
- /divi/{exp → extern}/cirq/_lexer.py +0 -0
- /divi/{exp → extern}/cirq/_parser.py +0 -0
- /divi/{exp → extern}/cirq/_qasm_export.py +0 -0
- /divi/{exp → extern}/cirq/_qasm_import.py +0 -0
- /divi/{exp → extern}/cirq/exception.py +0 -0
- /divi/{exp → extern}/scipy/_cobyla.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/LICENCE.txt +0 -0
- /divi/{exp → extern}/scipy/pyprima/__init__.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/cobyla/__init__.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/cobyla/cobyla.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/cobyla/cobylb.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/cobyla/geometry.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/cobyla/initialize.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/cobyla/trustregion.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/cobyla/update.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/__init__.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/_bounds.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/_linear_constraints.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/_nonlinear_constraints.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/_project.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/checkbreak.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/consts.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/evaluate.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/history.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/infos.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/linalg.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/message.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/powalg.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/preproc.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/present.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/ratio.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/redrho.py +0 -0
- /divi/{exp → extern}/scipy/pyprima/common/selectx.py +0 -0
- {qoro_divi-0.3.3.dist-info → qoro_divi-0.3.5.dist-info}/LICENSE +0 -0
- {qoro_divi-0.3.3.dist-info → qoro_divi-0.3.5.dist-info}/LICENSES/.license-header +0 -0
- {qoro_divi-0.3.3.dist-info → qoro_divi-0.3.5.dist-info}/LICENSES/Apache-2.0.txt +0 -0
- {qoro_divi-0.3.3.dist-info → qoro_divi-0.3.5.dist-info}/WHEEL +0 -0
divi/qprog/optimizers.py
CHANGED
|
@@ -2,21 +2,23 @@
|
|
|
2
2
|
#
|
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
|
4
4
|
|
|
5
|
+
import time
|
|
5
6
|
from abc import ABC, abstractmethod
|
|
6
7
|
from collections.abc import Callable
|
|
7
8
|
from enum import Enum
|
|
8
|
-
from typing import Any
|
|
9
9
|
|
|
10
10
|
import numpy as np
|
|
11
|
+
from pymoo.algorithms.soo.nonconvex.cmaes import CMAES
|
|
12
|
+
from pymoo.algorithms.soo.nonconvex.de import DE
|
|
13
|
+
from pymoo.core.evaluator import Evaluator
|
|
14
|
+
from pymoo.core.individual import Individual
|
|
15
|
+
from pymoo.core.population import Population
|
|
16
|
+
from pymoo.core.problem import Problem
|
|
17
|
+
from pymoo.problems.static import StaticProblem
|
|
18
|
+
from pymoo.termination import get_termination
|
|
11
19
|
from scipy.optimize import OptimizeResult, minimize
|
|
12
20
|
|
|
13
|
-
from divi.
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class ScipyMethod(Enum):
|
|
17
|
-
NELDER_MEAD = "Nelder-Mead"
|
|
18
|
-
COBYLA = "COBYLA"
|
|
19
|
-
L_BFGS_B = "L-BFGS-B"
|
|
21
|
+
from divi.extern.scipy._cobyla import _minimize_cobyla as cobyla_fn
|
|
20
22
|
|
|
21
23
|
|
|
22
24
|
class Optimizer(ABC):
|
|
@@ -52,12 +54,166 @@ class Optimizer(ABC):
|
|
|
52
54
|
raise NotImplementedError("This method should be implemented by subclasses.")
|
|
53
55
|
|
|
54
56
|
|
|
57
|
+
class PymooMethod(Enum):
|
|
58
|
+
"""Supported optimization methods from the pymoo library."""
|
|
59
|
+
|
|
60
|
+
CMAES = "CMAES"
|
|
61
|
+
DE = "DE"
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class PymooOptimizer(Optimizer):
|
|
65
|
+
"""
|
|
66
|
+
Optimizer wrapper for pymoo optimization algorithms.
|
|
67
|
+
|
|
68
|
+
Supports population-based optimization methods from the pymoo library,
|
|
69
|
+
including CMAES (Covariance Matrix Adaptation Evolution Strategy) and
|
|
70
|
+
DE (Differential Evolution).
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def __init__(self, method: PymooMethod, population_size: int = 50, **kwargs):
|
|
74
|
+
"""
|
|
75
|
+
Initialize a pymoo-based optimizer.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
method (PymooMethod): The optimization algorithm to use (CMAES or DE).
|
|
79
|
+
population_size (int, optional): Size of the population for the algorithm.
|
|
80
|
+
Defaults to 50.
|
|
81
|
+
**kwargs: Additional algorithm-specific parameters passed to pymoo.
|
|
82
|
+
"""
|
|
83
|
+
super().__init__()
|
|
84
|
+
|
|
85
|
+
self.method = method
|
|
86
|
+
self.population_size = population_size
|
|
87
|
+
self.algorithm_kwargs = kwargs
|
|
88
|
+
|
|
89
|
+
@property
|
|
90
|
+
def n_param_sets(self):
|
|
91
|
+
"""
|
|
92
|
+
Get the number of parameter sets (population size) used by this optimizer.
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
int: Population size for the optimization algorithm.
|
|
96
|
+
"""
|
|
97
|
+
# Determine population size from stored parameters
|
|
98
|
+
if self.method.value == "DE":
|
|
99
|
+
return self.population_size
|
|
100
|
+
elif self.method.value == "CMAES":
|
|
101
|
+
# CMAES uses 'popsize' in options dict
|
|
102
|
+
return self.algorithm_kwargs.get("popsize", self.population_size)
|
|
103
|
+
return self.population_size
|
|
104
|
+
|
|
105
|
+
def optimize(
|
|
106
|
+
self,
|
|
107
|
+
cost_fn: Callable[[np.ndarray], float],
|
|
108
|
+
initial_params: np.ndarray,
|
|
109
|
+
callback_fn: Callable | None = None,
|
|
110
|
+
**kwargs,
|
|
111
|
+
):
|
|
112
|
+
"""
|
|
113
|
+
Run the pymoo optimization algorithm.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
cost_fn (Callable): Function to minimize. Should accept a 2D array of
|
|
117
|
+
parameter sets and return an array of cost values.
|
|
118
|
+
initial_params (np.ndarray): Initial parameter values as a 2D array
|
|
119
|
+
of shape (n_param_sets, n_params).
|
|
120
|
+
callback_fn (Callable, optional): Function called after each iteration
|
|
121
|
+
with an OptimizeResult object. Defaults to None.
|
|
122
|
+
**kwargs: Additional keyword arguments:
|
|
123
|
+
- maxiter (int): Maximum number of iterations
|
|
124
|
+
- rng (np.random.Generator): Random number generator
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
OptimizeResult: Optimization result with final parameters and cost value.
|
|
128
|
+
"""
|
|
129
|
+
|
|
130
|
+
# Create fresh algorithm instance for this optimization run
|
|
131
|
+
# since pymoo has no reset()-like functionality
|
|
132
|
+
optimizer_obj = globals()[self.method.value](
|
|
133
|
+
pop_size=self.population_size, parallelize=False, **self.algorithm_kwargs
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
max_iterations = kwargs.pop("maxiter", 5)
|
|
137
|
+
rng = kwargs.pop("rng", np.random.default_rng())
|
|
138
|
+
seed = rng.bit_generator.seed_seq.spawn(1)[0].generate_state(1)[0]
|
|
139
|
+
|
|
140
|
+
n_var = initial_params.shape[-1]
|
|
141
|
+
|
|
142
|
+
xl = np.zeros(n_var)
|
|
143
|
+
xu = np.ones(n_var) * 2 * np.pi
|
|
144
|
+
|
|
145
|
+
problem = Problem(n_var=n_var, n_obj=1, xl=xl, xu=xu)
|
|
146
|
+
|
|
147
|
+
optimizer_obj.setup(
|
|
148
|
+
problem,
|
|
149
|
+
termination=get_termination("n_gen", max_iterations),
|
|
150
|
+
seed=int(seed),
|
|
151
|
+
verbose=False,
|
|
152
|
+
)
|
|
153
|
+
optimizer_obj.start_time = time.time()
|
|
154
|
+
|
|
155
|
+
pop = Population.create(
|
|
156
|
+
*[Individual(X=initial_params[i]) for i in range(self.n_param_sets)]
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
while optimizer_obj.has_next():
|
|
160
|
+
X = pop.get("X")
|
|
161
|
+
|
|
162
|
+
curr_losses = cost_fn(X)
|
|
163
|
+
static = StaticProblem(problem, F=curr_losses)
|
|
164
|
+
Evaluator().eval(static, pop)
|
|
165
|
+
|
|
166
|
+
optimizer_obj.tell(infills=pop)
|
|
167
|
+
|
|
168
|
+
pop = optimizer_obj.ask()
|
|
169
|
+
|
|
170
|
+
if callback_fn:
|
|
171
|
+
callback_fn(OptimizeResult(x=pop.get("X"), fun=curr_losses))
|
|
172
|
+
|
|
173
|
+
result = optimizer_obj.result()
|
|
174
|
+
|
|
175
|
+
return OptimizeResult(
|
|
176
|
+
x=result.X,
|
|
177
|
+
fun=result.F,
|
|
178
|
+
nit=optimizer_obj.n_gen - 1,
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
class ScipyMethod(Enum):
|
|
183
|
+
"""Supported optimization methods from scipy.optimize."""
|
|
184
|
+
|
|
185
|
+
NELDER_MEAD = "Nelder-Mead"
|
|
186
|
+
COBYLA = "COBYLA"
|
|
187
|
+
L_BFGS_B = "L-BFGS-B"
|
|
188
|
+
|
|
189
|
+
|
|
55
190
|
class ScipyOptimizer(Optimizer):
|
|
191
|
+
"""
|
|
192
|
+
Optimizer wrapper for scipy.optimize methods.
|
|
193
|
+
|
|
194
|
+
Supports gradient-free and gradient-based optimization algorithms from scipy,
|
|
195
|
+
including Nelder-Mead simplex, COBYLA, and L-BFGS-B.
|
|
196
|
+
"""
|
|
197
|
+
|
|
56
198
|
def __init__(self, method: ScipyMethod):
|
|
199
|
+
"""
|
|
200
|
+
Initialize a scipy-based optimizer.
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
method (ScipyMethod): The optimization algorithm to use.
|
|
204
|
+
"""
|
|
205
|
+
super().__init__()
|
|
206
|
+
|
|
57
207
|
self.method = method
|
|
58
208
|
|
|
59
209
|
@property
|
|
60
210
|
def n_param_sets(self):
|
|
211
|
+
"""
|
|
212
|
+
Get the number of parameter sets used by this optimizer.
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
int: Always returns 1, as scipy optimizers use single-point optimization.
|
|
216
|
+
"""
|
|
61
217
|
return 1
|
|
62
218
|
|
|
63
219
|
def optimize(
|
|
@@ -67,6 +223,23 @@ class ScipyOptimizer(Optimizer):
|
|
|
67
223
|
callback_fn: Callable | None = None,
|
|
68
224
|
**kwargs,
|
|
69
225
|
):
|
|
226
|
+
"""
|
|
227
|
+
Run the scipy optimization algorithm.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
cost_fn (Callable): Function to minimize. Should accept a 1D array of
|
|
231
|
+
parameters and return a scalar cost value.
|
|
232
|
+
initial_params (np.ndarray): Initial parameter values as a 1D or 2D array.
|
|
233
|
+
If 2D with shape (1, n_params), it will be squeezed to 1D.
|
|
234
|
+
callback_fn (Callable, optional): Function called after each iteration.
|
|
235
|
+
Defaults to None.
|
|
236
|
+
**kwargs: Additional keyword arguments:
|
|
237
|
+
- maxiter (int): Maximum number of iterations
|
|
238
|
+
- jac (Callable): Gradient function (only used for L-BFGS-B)
|
|
239
|
+
|
|
240
|
+
Returns:
|
|
241
|
+
OptimizeResult: Optimization result with final parameters and cost value.
|
|
242
|
+
"""
|
|
70
243
|
max_iterations = kwargs.pop("maxiter", None)
|
|
71
244
|
|
|
72
245
|
if max_iterations is None or self.method == ScipyMethod.COBYLA:
|
|
@@ -96,7 +269,27 @@ class ScipyOptimizer(Optimizer):
|
|
|
96
269
|
|
|
97
270
|
|
|
98
271
|
class MonteCarloOptimizer(Optimizer):
|
|
272
|
+
"""
|
|
273
|
+
Monte Carlo-based parameter search optimizer.
|
|
274
|
+
|
|
275
|
+
This optimizer samples parameter space randomly, selects the best-performing
|
|
276
|
+
samples, and uses them as centers for the next generation of samples with
|
|
277
|
+
decreasing variance. This implements a simple but effective evolutionary strategy.
|
|
278
|
+
"""
|
|
279
|
+
|
|
99
280
|
def __init__(self, n_param_sets: int = 10, n_best_sets: int = 3):
|
|
281
|
+
"""
|
|
282
|
+
Initialize a Monte Carlo optimizer.
|
|
283
|
+
|
|
284
|
+
Args:
|
|
285
|
+
n_param_sets (int, optional): Total number of parameter sets to evaluate
|
|
286
|
+
per iteration. Defaults to 10.
|
|
287
|
+
n_best_sets (int, optional): Number of top-performing parameter sets to
|
|
288
|
+
use as seeds for the next generation. Defaults to 3.
|
|
289
|
+
|
|
290
|
+
Raises:
|
|
291
|
+
ValueError: If n_best_sets is greater than n_param_sets.
|
|
292
|
+
"""
|
|
100
293
|
super().__init__()
|
|
101
294
|
|
|
102
295
|
if n_best_sets > n_param_sets:
|
|
@@ -113,10 +306,22 @@ class MonteCarloOptimizer(Optimizer):
|
|
|
113
306
|
|
|
114
307
|
@property
|
|
115
308
|
def n_param_sets(self):
|
|
309
|
+
"""
|
|
310
|
+
Get the number of parameter sets evaluated per iteration.
|
|
311
|
+
|
|
312
|
+
Returns:
|
|
313
|
+
int: Total number of parameter sets.
|
|
314
|
+
"""
|
|
116
315
|
return self._n_param_sets
|
|
117
316
|
|
|
118
317
|
@property
|
|
119
318
|
def n_best_sets(self):
|
|
319
|
+
"""
|
|
320
|
+
Get the number of best parameter sets used for seeding the next generation.
|
|
321
|
+
|
|
322
|
+
Returns:
|
|
323
|
+
int: Number of best-performing sets kept.
|
|
324
|
+
"""
|
|
120
325
|
return self._n_best_sets
|
|
121
326
|
|
|
122
327
|
def _compute_new_parameters(
|
|
@@ -169,9 +374,6 @@ class MonteCarloOptimizer(Optimizer):
|
|
|
169
374
|
|
|
170
375
|
population = np.copy(initial_params)
|
|
171
376
|
|
|
172
|
-
final_params = None
|
|
173
|
-
final_losses = None
|
|
174
|
-
|
|
175
377
|
for curr_iter in range(max_iterations):
|
|
176
378
|
# Evaluate the entire population once
|
|
177
379
|
losses = cost_fn(population)
|
|
@@ -181,21 +383,20 @@ class MonteCarloOptimizer(Optimizer):
|
|
|
181
383
|
: self.n_best_sets
|
|
182
384
|
]
|
|
183
385
|
|
|
184
|
-
# Store the current best results
|
|
185
|
-
final_params = population[best_indices]
|
|
186
|
-
final_losses = losses[best_indices]
|
|
187
|
-
|
|
188
386
|
if callback_fn:
|
|
189
|
-
callback_fn(
|
|
387
|
+
callback_fn(
|
|
388
|
+
OptimizeResult(x=population[best_indices], fun=losses[best_indices])
|
|
389
|
+
)
|
|
190
390
|
|
|
191
391
|
# Generate the next generation of parameters
|
|
192
392
|
population = self._compute_new_parameters(
|
|
193
393
|
population, curr_iter, best_indices, rng
|
|
194
394
|
)
|
|
195
395
|
|
|
396
|
+
best_idx = np.argmin(losses)
|
|
196
397
|
# Return the best results from the LAST EVALUATED population
|
|
197
398
|
return OptimizeResult(
|
|
198
|
-
x=
|
|
199
|
-
fun=
|
|
399
|
+
x=population[best_idx],
|
|
400
|
+
fun=losses[best_idx],
|
|
200
401
|
nit=max_iterations,
|
|
201
402
|
)
|