qoro-divi 0.3.4__py3-none-any.whl → 0.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of qoro-divi might be problematic. Click here for more details.

divi/qprog/optimizers.py CHANGED
@@ -2,22 +2,25 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
+ import time
5
6
  from abc import ABC, abstractmethod
6
7
  from collections.abc import Callable
7
8
  from enum import Enum
8
9
 
9
10
  import numpy as np
11
+ from pymoo.algorithms.soo.nonconvex.cmaes import CMAES
12
+ from pymoo.algorithms.soo.nonconvex.de import DE
13
+ from pymoo.core.evaluator import Evaluator
14
+ from pymoo.core.individual import Individual
15
+ from pymoo.core.population import Population
16
+ from pymoo.core.problem import Problem
17
+ from pymoo.problems.static import StaticProblem
18
+ from pymoo.termination import get_termination
10
19
  from scipy.optimize import OptimizeResult, minimize
11
20
 
12
21
  from divi.extern.scipy._cobyla import _minimize_cobyla as cobyla_fn
13
22
 
14
23
 
15
- class ScipyMethod(Enum):
16
- NELDER_MEAD = "Nelder-Mead"
17
- COBYLA = "COBYLA"
18
- L_BFGS_B = "L-BFGS-B"
19
-
20
-
21
24
  class Optimizer(ABC):
22
25
  @property
23
26
  @abstractmethod
@@ -51,12 +54,166 @@ class Optimizer(ABC):
51
54
  raise NotImplementedError("This method should be implemented by subclasses.")
52
55
 
53
56
 
57
+ class PymooMethod(Enum):
58
+ """Supported optimization methods from the pymoo library."""
59
+
60
+ CMAES = "CMAES"
61
+ DE = "DE"
62
+
63
+
64
+ class PymooOptimizer(Optimizer):
65
+ """
66
+ Optimizer wrapper for pymoo optimization algorithms.
67
+
68
+ Supports population-based optimization methods from the pymoo library,
69
+ including CMAES (Covariance Matrix Adaptation Evolution Strategy) and
70
+ DE (Differential Evolution).
71
+ """
72
+
73
+ def __init__(self, method: PymooMethod, population_size: int = 50, **kwargs):
74
+ """
75
+ Initialize a pymoo-based optimizer.
76
+
77
+ Args:
78
+ method (PymooMethod): The optimization algorithm to use (CMAES or DE).
79
+ population_size (int, optional): Size of the population for the algorithm.
80
+ Defaults to 50.
81
+ **kwargs: Additional algorithm-specific parameters passed to pymoo.
82
+ """
83
+ super().__init__()
84
+
85
+ self.method = method
86
+ self.population_size = population_size
87
+ self.algorithm_kwargs = kwargs
88
+
89
+ @property
90
+ def n_param_sets(self):
91
+ """
92
+ Get the number of parameter sets (population size) used by this optimizer.
93
+
94
+ Returns:
95
+ int: Population size for the optimization algorithm.
96
+ """
97
+ # Determine population size from stored parameters
98
+ if self.method.value == "DE":
99
+ return self.population_size
100
+ elif self.method.value == "CMAES":
101
+ # CMAES uses 'popsize' in options dict
102
+ return self.algorithm_kwargs.get("popsize", self.population_size)
103
+ return self.population_size
104
+
105
+ def optimize(
106
+ self,
107
+ cost_fn: Callable[[np.ndarray], float],
108
+ initial_params: np.ndarray,
109
+ callback_fn: Callable | None = None,
110
+ **kwargs,
111
+ ):
112
+ """
113
+ Run the pymoo optimization algorithm.
114
+
115
+ Args:
116
+ cost_fn (Callable): Function to minimize. Should accept a 2D array of
117
+ parameter sets and return an array of cost values.
118
+ initial_params (np.ndarray): Initial parameter values as a 2D array
119
+ of shape (n_param_sets, n_params).
120
+ callback_fn (Callable, optional): Function called after each iteration
121
+ with an OptimizeResult object. Defaults to None.
122
+ **kwargs: Additional keyword arguments:
123
+ - maxiter (int): Maximum number of iterations
124
+ - rng (np.random.Generator): Random number generator
125
+
126
+ Returns:
127
+ OptimizeResult: Optimization result with final parameters and cost value.
128
+ """
129
+
130
+ # Create fresh algorithm instance for this optimization run
131
+ # since pymoo has no reset()-like functionality
132
+ optimizer_obj = globals()[self.method.value](
133
+ pop_size=self.population_size, parallelize=False, **self.algorithm_kwargs
134
+ )
135
+
136
+ max_iterations = kwargs.pop("maxiter", 5)
137
+ rng = kwargs.pop("rng", np.random.default_rng())
138
+ seed = rng.bit_generator.seed_seq.spawn(1)[0].generate_state(1)[0]
139
+
140
+ n_var = initial_params.shape[-1]
141
+
142
+ xl = np.zeros(n_var)
143
+ xu = np.ones(n_var) * 2 * np.pi
144
+
145
+ problem = Problem(n_var=n_var, n_obj=1, xl=xl, xu=xu)
146
+
147
+ optimizer_obj.setup(
148
+ problem,
149
+ termination=get_termination("n_gen", max_iterations),
150
+ seed=int(seed),
151
+ verbose=False,
152
+ )
153
+ optimizer_obj.start_time = time.time()
154
+
155
+ pop = Population.create(
156
+ *[Individual(X=initial_params[i]) for i in range(self.n_param_sets)]
157
+ )
158
+
159
+ while optimizer_obj.has_next():
160
+ X = pop.get("X")
161
+
162
+ curr_losses = cost_fn(X)
163
+ static = StaticProblem(problem, F=curr_losses)
164
+ Evaluator().eval(static, pop)
165
+
166
+ optimizer_obj.tell(infills=pop)
167
+
168
+ pop = optimizer_obj.ask()
169
+
170
+ if callback_fn:
171
+ callback_fn(OptimizeResult(x=pop.get("X"), fun=curr_losses))
172
+
173
+ result = optimizer_obj.result()
174
+
175
+ return OptimizeResult(
176
+ x=result.X,
177
+ fun=result.F,
178
+ nit=optimizer_obj.n_gen - 1,
179
+ )
180
+
181
+
182
+ class ScipyMethod(Enum):
183
+ """Supported optimization methods from scipy.optimize."""
184
+
185
+ NELDER_MEAD = "Nelder-Mead"
186
+ COBYLA = "COBYLA"
187
+ L_BFGS_B = "L-BFGS-B"
188
+
189
+
54
190
  class ScipyOptimizer(Optimizer):
191
+ """
192
+ Optimizer wrapper for scipy.optimize methods.
193
+
194
+ Supports gradient-free and gradient-based optimization algorithms from scipy,
195
+ including Nelder-Mead simplex, COBYLA, and L-BFGS-B.
196
+ """
197
+
55
198
  def __init__(self, method: ScipyMethod):
199
+ """
200
+ Initialize a scipy-based optimizer.
201
+
202
+ Args:
203
+ method (ScipyMethod): The optimization algorithm to use.
204
+ """
205
+ super().__init__()
206
+
56
207
  self.method = method
57
208
 
58
209
  @property
59
210
  def n_param_sets(self):
211
+ """
212
+ Get the number of parameter sets used by this optimizer.
213
+
214
+ Returns:
215
+ int: Always returns 1, as scipy optimizers use single-point optimization.
216
+ """
60
217
  return 1
61
218
 
62
219
  def optimize(
@@ -66,6 +223,23 @@ class ScipyOptimizer(Optimizer):
66
223
  callback_fn: Callable | None = None,
67
224
  **kwargs,
68
225
  ):
226
+ """
227
+ Run the scipy optimization algorithm.
228
+
229
+ Args:
230
+ cost_fn (Callable): Function to minimize. Should accept a 1D array of
231
+ parameters and return a scalar cost value.
232
+ initial_params (np.ndarray): Initial parameter values as a 1D or 2D array.
233
+ If 2D with shape (1, n_params), it will be squeezed to 1D.
234
+ callback_fn (Callable, optional): Function called after each iteration.
235
+ Defaults to None.
236
+ **kwargs: Additional keyword arguments:
237
+ - maxiter (int): Maximum number of iterations
238
+ - jac (Callable): Gradient function (only used for L-BFGS-B)
239
+
240
+ Returns:
241
+ OptimizeResult: Optimization result with final parameters and cost value.
242
+ """
69
243
  max_iterations = kwargs.pop("maxiter", None)
70
244
 
71
245
  if max_iterations is None or self.method == ScipyMethod.COBYLA:
@@ -95,7 +269,27 @@ class ScipyOptimizer(Optimizer):
95
269
 
96
270
 
97
271
  class MonteCarloOptimizer(Optimizer):
272
+ """
273
+ Monte Carlo-based parameter search optimizer.
274
+
275
+ This optimizer samples parameter space randomly, selects the best-performing
276
+ samples, and uses them as centers for the next generation of samples with
277
+ decreasing variance. This implements a simple but effective evolutionary strategy.
278
+ """
279
+
98
280
  def __init__(self, n_param_sets: int = 10, n_best_sets: int = 3):
281
+ """
282
+ Initialize a Monte Carlo optimizer.
283
+
284
+ Args:
285
+ n_param_sets (int, optional): Total number of parameter sets to evaluate
286
+ per iteration. Defaults to 10.
287
+ n_best_sets (int, optional): Number of top-performing parameter sets to
288
+ use as seeds for the next generation. Defaults to 3.
289
+
290
+ Raises:
291
+ ValueError: If n_best_sets is greater than n_param_sets.
292
+ """
99
293
  super().__init__()
100
294
 
101
295
  if n_best_sets > n_param_sets:
@@ -112,10 +306,22 @@ class MonteCarloOptimizer(Optimizer):
112
306
 
113
307
  @property
114
308
  def n_param_sets(self):
309
+ """
310
+ Get the number of parameter sets evaluated per iteration.
311
+
312
+ Returns:
313
+ int: Total number of parameter sets.
314
+ """
115
315
  return self._n_param_sets
116
316
 
117
317
  @property
118
318
  def n_best_sets(self):
319
+ """
320
+ Get the number of best parameter sets used for seeding the next generation.
321
+
322
+ Returns:
323
+ int: Number of best-performing sets kept.
324
+ """
119
325
  return self._n_best_sets
120
326
 
121
327
  def _compute_new_parameters(
@@ -168,9 +374,6 @@ class MonteCarloOptimizer(Optimizer):
168
374
 
169
375
  population = np.copy(initial_params)
170
376
 
171
- final_params = None
172
- final_losses = None
173
-
174
377
  for curr_iter in range(max_iterations):
175
378
  # Evaluate the entire population once
176
379
  losses = cost_fn(population)
@@ -180,21 +383,20 @@ class MonteCarloOptimizer(Optimizer):
180
383
  : self.n_best_sets
181
384
  ]
182
385
 
183
- # Store the current best results
184
- final_params = population[best_indices]
185
- final_losses = losses[best_indices]
186
-
187
386
  if callback_fn:
188
- callback_fn(OptimizeResult(x=final_params, fun=final_losses))
387
+ callback_fn(
388
+ OptimizeResult(x=population[best_indices], fun=losses[best_indices])
389
+ )
189
390
 
190
391
  # Generate the next generation of parameters
191
392
  population = self._compute_new_parameters(
192
393
  population, curr_iter, best_indices, rng
193
394
  )
194
395
 
396
+ best_idx = np.argmin(losses)
195
397
  # Return the best results from the LAST EVALUATED population
196
398
  return OptimizeResult(
197
- x=final_params,
198
- fun=final_losses,
399
+ x=population[best_idx],
400
+ fun=losses[best_idx],
199
401
  nit=max_iterations,
200
402
  )