qoro-divi 0.3.4__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of qoro-divi might be problematic. Click here for more details.
- divi/backends/__init__.py +1 -1
- divi/backends/_circuit_runner.py +42 -0
- divi/backends/_parallel_simulator.py +145 -49
- divi/backends/_qoro_service.py +451 -182
- divi/backends/_qpu_system.py +77 -3
- divi/circuits/_core.py +124 -4
- divi/circuits/qasm.py +20 -3
- divi/extern/cirq/_validator.py +12 -3
- divi/qprog/__init__.py +1 -0
- divi/qprog/algorithms/_ansatze.py +112 -12
- divi/qprog/algorithms/_qaoa.py +179 -110
- divi/qprog/algorithms/_vqe.py +192 -58
- divi/qprog/batch.py +270 -51
- divi/qprog/exceptions.py +9 -0
- divi/qprog/optimizers.py +336 -51
- divi/qprog/quantum_program.py +162 -339
- divi/qprog/variational_quantum_algorithm.py +786 -0
- divi/qprog/workflows/_graph_partitioning.py +43 -38
- divi/qprog/workflows/_qubo_partitioning.py +41 -24
- divi/qprog/workflows/_vqe_sweep.py +67 -39
- divi/reporting/_pbar.py +51 -9
- divi/reporting/_qlogger.py +35 -1
- divi/reporting/_reporter.py +11 -20
- divi/utils.py +100 -4
- {qoro_divi-0.3.4.dist-info → qoro_divi-0.4.0.dist-info}/METADATA +16 -1
- {qoro_divi-0.3.4.dist-info → qoro_divi-0.4.0.dist-info}/RECORD +30 -28
- {qoro_divi-0.3.4.dist-info → qoro_divi-0.4.0.dist-info}/LICENSE +0 -0
- {qoro_divi-0.3.4.dist-info → qoro_divi-0.4.0.dist-info}/LICENSES/.license-header +0 -0
- {qoro_divi-0.3.4.dist-info → qoro_divi-0.4.0.dist-info}/LICENSES/Apache-2.0.txt +0 -0
- {qoro_divi-0.3.4.dist-info → qoro_divi-0.4.0.dist-info}/WHEEL +0 -0
divi/qprog/optimizers.py
CHANGED
|
@@ -2,22 +2,26 @@
|
|
|
2
2
|
#
|
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
|
4
4
|
|
|
5
|
+
import time
|
|
5
6
|
from abc import ABC, abstractmethod
|
|
6
7
|
from collections.abc import Callable
|
|
7
8
|
from enum import Enum
|
|
9
|
+
from typing import Any
|
|
8
10
|
|
|
9
11
|
import numpy as np
|
|
12
|
+
from pymoo.algorithms.soo.nonconvex.cmaes import CMAES
|
|
13
|
+
from pymoo.algorithms.soo.nonconvex.de import DE
|
|
14
|
+
from pymoo.core.evaluator import Evaluator
|
|
15
|
+
from pymoo.core.individual import Individual
|
|
16
|
+
from pymoo.core.population import Population
|
|
17
|
+
from pymoo.core.problem import Problem
|
|
18
|
+
from pymoo.problems.static import StaticProblem
|
|
19
|
+
from pymoo.termination import get_termination
|
|
10
20
|
from scipy.optimize import OptimizeResult, minimize
|
|
11
21
|
|
|
12
22
|
from divi.extern.scipy._cobyla import _minimize_cobyla as cobyla_fn
|
|
13
23
|
|
|
14
24
|
|
|
15
|
-
class ScipyMethod(Enum):
|
|
16
|
-
NELDER_MEAD = "Nelder-Mead"
|
|
17
|
-
COBYLA = "COBYLA"
|
|
18
|
-
L_BFGS_B = "L-BFGS-B"
|
|
19
|
-
|
|
20
|
-
|
|
21
25
|
class Optimizer(ABC):
|
|
22
26
|
@property
|
|
23
27
|
@abstractmethod
|
|
@@ -34,16 +38,25 @@ class Optimizer(ABC):
|
|
|
34
38
|
self,
|
|
35
39
|
cost_fn: Callable[[np.ndarray], float],
|
|
36
40
|
initial_params: np.ndarray,
|
|
37
|
-
callback_fn: Callable | None = None,
|
|
41
|
+
callback_fn: Callable[[OptimizeResult], Any] | None = None,
|
|
38
42
|
**kwargs,
|
|
39
43
|
) -> OptimizeResult:
|
|
40
|
-
"""
|
|
41
|
-
Optimize the given cost function starting from initial parameters.
|
|
44
|
+
"""Optimize the given cost function starting from initial parameters.
|
|
42
45
|
|
|
43
46
|
Parameters:
|
|
44
47
|
cost_fn: The cost function to minimize.
|
|
45
48
|
initial_params: Initial parameters for the optimization.
|
|
46
|
-
**kwargs: Additional keyword arguments for the optimizer
|
|
49
|
+
**kwargs: Additional keyword arguments for the optimizer:
|
|
50
|
+
|
|
51
|
+
- maxiter (int, optional): Maximum number of iterations.
|
|
52
|
+
Defaults vary by optimizer (e.g., 5 for population-based optimizers,
|
|
53
|
+
None for some scipy methods).
|
|
54
|
+
- rng (np.random.Generator, optional): Random number generator for
|
|
55
|
+
stochastic optimizers (PymooOptimizer, MonteCarloOptimizer).
|
|
56
|
+
Defaults to a new generator if not provided.
|
|
57
|
+
- jac (Callable, optional): Gradient/Jacobian function for
|
|
58
|
+
gradient-based optimizers (only used by ScipyOptimizer with
|
|
59
|
+
L_BFGS_B method). Defaults to None.
|
|
47
60
|
|
|
48
61
|
Returns:
|
|
49
62
|
Optimized parameters.
|
|
@@ -51,13 +64,53 @@ class Optimizer(ABC):
|
|
|
51
64
|
raise NotImplementedError("This method should be implemented by subclasses.")
|
|
52
65
|
|
|
53
66
|
|
|
54
|
-
class
|
|
55
|
-
|
|
67
|
+
class PymooMethod(Enum):
|
|
68
|
+
"""Supported optimization methods from the pymoo library."""
|
|
69
|
+
|
|
70
|
+
CMAES = "CMAES"
|
|
71
|
+
DE = "DE"
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class PymooOptimizer(Optimizer):
|
|
75
|
+
"""
|
|
76
|
+
Optimizer wrapper for pymoo optimization algorithms.
|
|
77
|
+
|
|
78
|
+
Supports population-based optimization methods from the pymoo library,
|
|
79
|
+
including CMAES (Covariance Matrix Adaptation Evolution Strategy) and
|
|
80
|
+
DE (Differential Evolution).
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
def __init__(self, method: PymooMethod, population_size: int = 50, **kwargs):
|
|
84
|
+
"""
|
|
85
|
+
Initialize a pymoo-based optimizer.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
method (PymooMethod): The optimization algorithm to use (CMAES or DE).
|
|
89
|
+
population_size (int, optional): Size of the population for the algorithm.
|
|
90
|
+
Defaults to 50.
|
|
91
|
+
**kwargs: Additional algorithm-specific parameters passed to pymoo.
|
|
92
|
+
"""
|
|
93
|
+
super().__init__()
|
|
94
|
+
|
|
56
95
|
self.method = method
|
|
96
|
+
self.population_size = population_size
|
|
97
|
+
self.algorithm_kwargs = kwargs
|
|
57
98
|
|
|
58
99
|
@property
|
|
59
100
|
def n_param_sets(self):
|
|
60
|
-
|
|
101
|
+
"""
|
|
102
|
+
Get the number of parameter sets (population size) used by this optimizer.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
int: Population size for the optimization algorithm.
|
|
106
|
+
"""
|
|
107
|
+
# Determine population size from stored parameters
|
|
108
|
+
if self.method.value == "DE":
|
|
109
|
+
return self.population_size
|
|
110
|
+
elif self.method.value == "CMAES":
|
|
111
|
+
# CMAES uses 'popsize' in options dict
|
|
112
|
+
return self.algorithm_kwargs.get("popsize", self.population_size)
|
|
113
|
+
return self.population_size
|
|
61
114
|
|
|
62
115
|
def optimize(
|
|
63
116
|
self,
|
|
@@ -66,8 +119,157 @@ class ScipyOptimizer(Optimizer):
|
|
|
66
119
|
callback_fn: Callable | None = None,
|
|
67
120
|
**kwargs,
|
|
68
121
|
):
|
|
122
|
+
"""
|
|
123
|
+
Run the pymoo optimization algorithm.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
cost_fn (Callable): Function to minimize. Should accept a 2D array of
|
|
127
|
+
parameter sets and return an array of cost values.
|
|
128
|
+
initial_params (np.ndarray): Initial parameter values as a 2D array
|
|
129
|
+
of shape (n_param_sets, n_params).
|
|
130
|
+
callback_fn (Callable, optional): Function called after each iteration
|
|
131
|
+
with an OptimizeResult object. Defaults to None.
|
|
132
|
+
**kwargs: Additional keyword arguments:
|
|
133
|
+
- maxiter (int): Maximum number of iterations
|
|
134
|
+
- rng (np.random.Generator): Random number generator
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
OptimizeResult: Optimization result with final parameters and cost value.
|
|
138
|
+
"""
|
|
139
|
+
|
|
140
|
+
# Create fresh algorithm instance for this optimization run
|
|
141
|
+
# since pymoo has no reset()-like functionality
|
|
142
|
+
optimizer_obj = globals()[self.method.value](
|
|
143
|
+
pop_size=self.population_size, parallelize=False, **self.algorithm_kwargs
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
max_iterations = kwargs.pop("maxiter", 5)
|
|
147
|
+
rng = kwargs.pop("rng", np.random.default_rng())
|
|
148
|
+
seed = rng.bit_generator.seed_seq.spawn(1)[0].generate_state(1)[0]
|
|
149
|
+
|
|
150
|
+
n_var = initial_params.shape[-1]
|
|
151
|
+
|
|
152
|
+
xl = np.zeros(n_var)
|
|
153
|
+
xu = np.ones(n_var) * 2 * np.pi
|
|
154
|
+
|
|
155
|
+
problem = Problem(n_var=n_var, n_obj=1, xl=xl, xu=xu)
|
|
156
|
+
|
|
157
|
+
optimizer_obj.setup(
|
|
158
|
+
problem,
|
|
159
|
+
termination=get_termination("n_gen", max_iterations),
|
|
160
|
+
seed=int(seed),
|
|
161
|
+
verbose=False,
|
|
162
|
+
)
|
|
163
|
+
optimizer_obj.start_time = time.time()
|
|
164
|
+
|
|
165
|
+
pop = Population.create(
|
|
166
|
+
*[Individual(X=initial_params[i]) for i in range(self.n_param_sets)]
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
while optimizer_obj.has_next():
|
|
170
|
+
evaluated_X = pop.get("X")
|
|
171
|
+
|
|
172
|
+
curr_losses = cost_fn(evaluated_X)
|
|
173
|
+
static = StaticProblem(problem, F=curr_losses)
|
|
174
|
+
Evaluator().eval(static, pop)
|
|
175
|
+
|
|
176
|
+
optimizer_obj.tell(infills=pop)
|
|
177
|
+
|
|
178
|
+
pop = optimizer_obj.ask()
|
|
179
|
+
|
|
180
|
+
if callback_fn:
|
|
181
|
+
callback_fn(OptimizeResult(x=evaluated_X, fun=curr_losses))
|
|
182
|
+
|
|
183
|
+
result = optimizer_obj.result()
|
|
184
|
+
|
|
185
|
+
return OptimizeResult(
|
|
186
|
+
x=result.X,
|
|
187
|
+
fun=result.F,
|
|
188
|
+
nit=optimizer_obj.n_gen - 1,
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
class ScipyMethod(Enum):
|
|
193
|
+
"""Supported optimization methods from scipy.optimize."""
|
|
194
|
+
|
|
195
|
+
NELDER_MEAD = "Nelder-Mead"
|
|
196
|
+
COBYLA = "COBYLA"
|
|
197
|
+
L_BFGS_B = "L-BFGS-B"
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
class ScipyOptimizer(Optimizer):
|
|
201
|
+
"""
|
|
202
|
+
Optimizer wrapper for scipy.optimize methods.
|
|
203
|
+
|
|
204
|
+
Supports gradient-free and gradient-based optimization algorithms from scipy,
|
|
205
|
+
including Nelder-Mead simplex, COBYLA, and L-BFGS-B.
|
|
206
|
+
"""
|
|
207
|
+
|
|
208
|
+
def __init__(self, method: ScipyMethod):
|
|
209
|
+
"""
|
|
210
|
+
Initialize a scipy-based optimizer.
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
method (ScipyMethod): The optimization algorithm to use.
|
|
214
|
+
"""
|
|
215
|
+
super().__init__()
|
|
216
|
+
|
|
217
|
+
self.method = method
|
|
218
|
+
|
|
219
|
+
@property
|
|
220
|
+
def n_param_sets(self) -> int:
|
|
221
|
+
"""
|
|
222
|
+
Get the number of parameter sets used by this optimizer.
|
|
223
|
+
|
|
224
|
+
Returns:
|
|
225
|
+
int: Always returns 1, as scipy optimizers use single-point optimization.
|
|
226
|
+
"""
|
|
227
|
+
return 1
|
|
228
|
+
|
|
229
|
+
def optimize(
|
|
230
|
+
self,
|
|
231
|
+
cost_fn: Callable[[np.ndarray], float],
|
|
232
|
+
initial_params: np.ndarray,
|
|
233
|
+
callback_fn: Callable[[OptimizeResult], Any] | None = None,
|
|
234
|
+
**kwargs,
|
|
235
|
+
) -> OptimizeResult:
|
|
236
|
+
"""
|
|
237
|
+
Run the scipy optimization algorithm.
|
|
238
|
+
|
|
239
|
+
Args:
|
|
240
|
+
cost_fn (Callable): Function to minimize. Should accept a 1D array of
|
|
241
|
+
parameters and return a scalar cost value.
|
|
242
|
+
initial_params (np.ndarray): Initial parameter values as a 1D or 2D array.
|
|
243
|
+
If 2D with shape (1, n_params), it will be squeezed to 1D.
|
|
244
|
+
callback_fn (Callable, optional): Function called after each iteration
|
|
245
|
+
with an `OptimizeResult` object. Defaults to None.
|
|
246
|
+
**kwargs: Additional keyword arguments:
|
|
247
|
+
- maxiter (int): Maximum number of iterations
|
|
248
|
+
- jac (Callable): Gradient function (only used for L-BFGS-B)
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
OptimizeResult: Optimization result with final parameters and cost value.
|
|
252
|
+
"""
|
|
69
253
|
max_iterations = kwargs.pop("maxiter", None)
|
|
70
254
|
|
|
255
|
+
# If a callback is provided, we wrap the cost function and callback
|
|
256
|
+
# to ensure the data passed to the callback has a consistent shape.
|
|
257
|
+
if callback_fn:
|
|
258
|
+
|
|
259
|
+
def callback_wrapper(intermediate_result: OptimizeResult):
|
|
260
|
+
# Create a dictionary from the intermediate result to preserve all of its keys.
|
|
261
|
+
result_dict = dict(intermediate_result)
|
|
262
|
+
|
|
263
|
+
# Overwrite 'x' and 'fun' to ensure they have consistent dimensions.
|
|
264
|
+
result_dict["x"] = np.atleast_2d(intermediate_result.x)
|
|
265
|
+
result_dict["fun"] = np.atleast_1d(intermediate_result.fun)
|
|
266
|
+
|
|
267
|
+
# Create a new OptimizeResult and pass it to the user's callback.
|
|
268
|
+
return callback_fn(OptimizeResult(**result_dict))
|
|
269
|
+
|
|
270
|
+
else:
|
|
271
|
+
callback_wrapper = None
|
|
272
|
+
|
|
71
273
|
if max_iterations is None or self.method == ScipyMethod.COBYLA:
|
|
72
274
|
# COBYLA perceive maxiter as maxfev so we need
|
|
73
275
|
# to use the callback fn for counting instead.
|
|
@@ -89,35 +291,97 @@ class ScipyOptimizer(Optimizer):
|
|
|
89
291
|
jac=(
|
|
90
292
|
kwargs.pop("jac", None) if self.method == ScipyMethod.L_BFGS_B else None
|
|
91
293
|
),
|
|
92
|
-
callback=
|
|
294
|
+
callback=callback_wrapper,
|
|
93
295
|
options={"maxiter": maxiter},
|
|
94
296
|
)
|
|
95
297
|
|
|
96
298
|
|
|
97
299
|
class MonteCarloOptimizer(Optimizer):
|
|
98
|
-
|
|
300
|
+
"""
|
|
301
|
+
Monte Carlo-based parameter search optimizer.
|
|
302
|
+
|
|
303
|
+
This optimizer samples parameter space randomly, selects the best-performing
|
|
304
|
+
samples, and uses them as centers for the next generation of samples with
|
|
305
|
+
decreasing variance. This implements a simple but effective evolutionary strategy.
|
|
306
|
+
"""
|
|
307
|
+
|
|
308
|
+
def __init__(
|
|
309
|
+
self,
|
|
310
|
+
population_size: int = 10,
|
|
311
|
+
n_best_sets: int = 3,
|
|
312
|
+
keep_best_params: bool = False,
|
|
313
|
+
):
|
|
314
|
+
"""
|
|
315
|
+
Initialize a Monte Carlo optimizer.
|
|
316
|
+
|
|
317
|
+
Args:
|
|
318
|
+
population_size (int, optional): Size of the population for the algorithm.
|
|
319
|
+
Defaults to 10.
|
|
320
|
+
n_best_sets (int, optional): Number of top-performing parameter sets to
|
|
321
|
+
use as seeds for the next generation. Defaults to 3.
|
|
322
|
+
keep_best_params (bool, optional): If True, includes the best parameter sets
|
|
323
|
+
directly in the new population. If False, generates all new parameters
|
|
324
|
+
by sampling around the best ones. Defaults to False.
|
|
325
|
+
|
|
326
|
+
Raises:
|
|
327
|
+
ValueError: If n_best_sets is greater than population_size.
|
|
328
|
+
ValueError: If keep_best_params is True and n_best_sets equals population_size.
|
|
329
|
+
"""
|
|
99
330
|
super().__init__()
|
|
100
331
|
|
|
101
|
-
if n_best_sets >
|
|
102
|
-
raise ValueError(
|
|
332
|
+
if n_best_sets > population_size:
|
|
333
|
+
raise ValueError(
|
|
334
|
+
"n_best_sets must be less than or equal to population_size."
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
if keep_best_params and n_best_sets == population_size:
|
|
338
|
+
raise ValueError(
|
|
339
|
+
"If keep_best_params is True, n_best_sets must be less than population_size."
|
|
340
|
+
)
|
|
103
341
|
|
|
104
|
-
self.
|
|
342
|
+
self._population_size = population_size
|
|
105
343
|
self._n_best_sets = n_best_sets
|
|
344
|
+
self._keep_best_params = keep_best_params
|
|
106
345
|
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
346
|
+
@property
|
|
347
|
+
def population_size(self) -> int:
|
|
348
|
+
"""
|
|
349
|
+
Get the size of the population.
|
|
350
|
+
|
|
351
|
+
Returns:
|
|
352
|
+
int: Size of the population.
|
|
353
|
+
"""
|
|
354
|
+
return self._population_size
|
|
112
355
|
|
|
113
356
|
@property
|
|
114
|
-
def n_param_sets(self):
|
|
115
|
-
|
|
357
|
+
def n_param_sets(self) -> int:
|
|
358
|
+
"""Number of parameter sets (population size), per the Optimizer interface.
|
|
359
|
+
|
|
360
|
+
Returns:
|
|
361
|
+
int: The population size.
|
|
362
|
+
"""
|
|
363
|
+
return self._population_size
|
|
116
364
|
|
|
117
365
|
@property
|
|
118
|
-
def n_best_sets(self):
|
|
366
|
+
def n_best_sets(self) -> int:
|
|
367
|
+
"""
|
|
368
|
+
Get the number of best parameter sets used for seeding the next generation.
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
int: Number of best-performing sets kept.
|
|
372
|
+
"""
|
|
119
373
|
return self._n_best_sets
|
|
120
374
|
|
|
375
|
+
@property
|
|
376
|
+
def keep_best_params(self) -> bool:
|
|
377
|
+
"""
|
|
378
|
+
Get whether the best parameters are kept in the new population.
|
|
379
|
+
|
|
380
|
+
Returns:
|
|
381
|
+
bool: True if best parameters are included in new population, False otherwise.
|
|
382
|
+
"""
|
|
383
|
+
return self._keep_best_params
|
|
384
|
+
|
|
121
385
|
def _compute_new_parameters(
|
|
122
386
|
self,
|
|
123
387
|
params: np.ndarray,
|
|
@@ -132,34 +396,57 @@ class MonteCarloOptimizer(Optimizer):
|
|
|
132
396
|
# 1. Select the best parameter sets from the current population
|
|
133
397
|
best_params = params[best_indices]
|
|
134
398
|
|
|
135
|
-
# 2.
|
|
136
|
-
|
|
137
|
-
|
|
399
|
+
# 2. Determine how many new samples to generate and calculate repeat counts
|
|
400
|
+
if self._keep_best_params:
|
|
401
|
+
n_new_samples = self._population_size - self._n_best_sets
|
|
402
|
+
# Calculate repeat counts for new samples only
|
|
403
|
+
samples_per_best = n_new_samples // self._n_best_sets
|
|
404
|
+
remainder = n_new_samples % self._n_best_sets
|
|
405
|
+
else:
|
|
406
|
+
# Calculate repeat counts for the entire population
|
|
407
|
+
samples_per_best = self._population_size // self._n_best_sets
|
|
408
|
+
remainder = self._population_size % self._n_best_sets
|
|
409
|
+
|
|
410
|
+
repeat_counts = np.full(self._n_best_sets, samples_per_best)
|
|
411
|
+
repeat_counts[:remainder] += 1
|
|
412
|
+
|
|
413
|
+
# 3. Prepare the means for sampling by repeating each best parameter set
|
|
414
|
+
new_means = np.repeat(best_params, repeat_counts, axis=0)
|
|
138
415
|
|
|
139
|
-
#
|
|
416
|
+
# 4. Define the standard deviation (scale), which shrinks over iterations
|
|
140
417
|
scale = 1.0 / (2.0 * (curr_iteration + 1.0))
|
|
141
418
|
|
|
142
|
-
#
|
|
419
|
+
# 5. Generate new parameters by sampling around the best ones
|
|
143
420
|
new_params = rng.normal(loc=new_means, scale=scale)
|
|
144
421
|
|
|
145
|
-
# Apply periodic boundary conditions
|
|
146
|
-
|
|
422
|
+
# 6. Apply periodic boundary conditions
|
|
423
|
+
new_params = new_params % (2 * np.pi)
|
|
424
|
+
|
|
425
|
+
# 7. Conditionally combine with best params if keeping them
|
|
426
|
+
if self._keep_best_params:
|
|
427
|
+
return np.vstack([best_params, new_params])
|
|
428
|
+
else:
|
|
429
|
+
return new_params
|
|
147
430
|
|
|
148
431
|
def optimize(
|
|
149
432
|
self,
|
|
150
433
|
cost_fn: Callable[[np.ndarray], float],
|
|
151
434
|
initial_params: np.ndarray,
|
|
152
|
-
callback_fn: Callable[[OptimizeResult],
|
|
435
|
+
callback_fn: Callable[[OptimizeResult], Any] | None = None,
|
|
153
436
|
**kwargs,
|
|
154
437
|
) -> OptimizeResult:
|
|
155
|
-
"""
|
|
156
|
-
Perform Monte Carlo optimization on the cost function.
|
|
438
|
+
"""Perform Monte Carlo optimization on the cost function.
|
|
157
439
|
|
|
158
440
|
Parameters:
|
|
159
441
|
cost_fn: The cost function to minimize.
|
|
160
442
|
initial_params: Initial parameters for the optimization.
|
|
161
443
|
callback_fn: Optional callback function to monitor progress.
|
|
162
|
-
**kwargs: Additional keyword arguments
|
|
444
|
+
**kwargs: Additional keyword arguments:
|
|
445
|
+
|
|
446
|
+
- maxiter (int, optional): Maximum number of iterations. Defaults to 5.
|
|
447
|
+
- rng (np.random.Generator, optional): Random number generator for
|
|
448
|
+
parameter sampling. Defaults to a new generator if not provided.
|
|
449
|
+
|
|
163
450
|
Returns:
|
|
164
451
|
Optimized parameters.
|
|
165
452
|
"""
|
|
@@ -167,34 +454,32 @@ class MonteCarloOptimizer(Optimizer):
|
|
|
167
454
|
max_iterations = kwargs.pop("maxiter", 5)
|
|
168
455
|
|
|
169
456
|
population = np.copy(initial_params)
|
|
170
|
-
|
|
171
|
-
final_params = None
|
|
172
|
-
final_losses = None
|
|
457
|
+
evaluated_population = population
|
|
173
458
|
|
|
174
459
|
for curr_iter in range(max_iterations):
|
|
175
460
|
# Evaluate the entire population once
|
|
176
461
|
losses = cost_fn(population)
|
|
462
|
+
evaluated_population = population
|
|
463
|
+
|
|
464
|
+
if callback_fn:
|
|
465
|
+
callback_fn(OptimizeResult(x=evaluated_population, fun=losses))
|
|
177
466
|
|
|
178
|
-
# Find the indices of the best-performing parameter sets
|
|
467
|
+
# Find the indices of the best-performing parameter sets
|
|
179
468
|
best_indices = np.argpartition(losses, self.n_best_sets - 1)[
|
|
180
469
|
: self.n_best_sets
|
|
181
470
|
]
|
|
182
471
|
|
|
183
|
-
# Store the current best results
|
|
184
|
-
final_params = population[best_indices]
|
|
185
|
-
final_losses = losses[best_indices]
|
|
186
|
-
|
|
187
|
-
if callback_fn:
|
|
188
|
-
callback_fn(OptimizeResult(x=final_params, fun=final_losses))
|
|
189
|
-
|
|
190
472
|
# Generate the next generation of parameters
|
|
191
473
|
population = self._compute_new_parameters(
|
|
192
|
-
|
|
474
|
+
evaluated_population, curr_iter, best_indices, rng
|
|
193
475
|
)
|
|
194
476
|
|
|
477
|
+
# Note: 'losses' here are from the last successfully evaluated population
|
|
478
|
+
best_idx = np.argmin(losses)
|
|
479
|
+
|
|
195
480
|
# Return the best results from the LAST EVALUATED population
|
|
196
481
|
return OptimizeResult(
|
|
197
|
-
x=
|
|
198
|
-
fun=
|
|
482
|
+
x=evaluated_population[best_idx],
|
|
483
|
+
fun=losses[best_idx],
|
|
199
484
|
nit=max_iterations,
|
|
200
485
|
)
|