qoro-divi 0.3.5__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of qoro-divi might be problematic. Click here for more details.

divi/qprog/optimizers.py CHANGED
@@ -6,6 +6,7 @@ import time
6
6
  from abc import ABC, abstractmethod
7
7
  from collections.abc import Callable
8
8
  from enum import Enum
9
+ from typing import Any
9
10
 
10
11
  import numpy as np
11
12
  from pymoo.algorithms.soo.nonconvex.cmaes import CMAES
@@ -37,16 +38,25 @@ class Optimizer(ABC):
37
38
  self,
38
39
  cost_fn: Callable[[np.ndarray], float],
39
40
  initial_params: np.ndarray,
40
- callback_fn: Callable | None = None,
41
+ callback_fn: Callable[[OptimizeResult], Any] | None = None,
41
42
  **kwargs,
42
43
  ) -> OptimizeResult:
43
- """
44
- Optimize the given cost function starting from initial parameters.
44
+ """Optimize the given cost function starting from initial parameters.
45
45
 
46
46
  Parameters:
47
47
  cost_fn: The cost function to minimize.
48
48
  initial_params: Initial parameters for the optimization.
49
- **kwargs: Additional keyword arguments for the optimizer.
49
+ **kwargs: Additional keyword arguments for the optimizer:
50
+
51
+ - maxiter (int, optional): Maximum number of iterations.
52
+ Defaults vary by optimizer (e.g., 5 for population-based optimizers,
53
+ None for some scipy methods).
54
+ - rng (np.random.Generator, optional): Random number generator for
55
+ stochastic optimizers (PymooOptimizer, MonteCarloOptimizer).
56
+ Defaults to a new generator if not provided.
57
+ - jac (Callable, optional): Gradient/Jacobian function for
58
+ gradient-based optimizers (only used by ScipyOptimizer with
59
+ L_BFGS_B method). Defaults to None.
50
60
 
51
61
  Returns:
52
62
  Optimized parameters.
@@ -157,9 +167,9 @@ class PymooOptimizer(Optimizer):
157
167
  )
158
168
 
159
169
  while optimizer_obj.has_next():
160
- X = pop.get("X")
170
+ evaluated_X = pop.get("X")
161
171
 
162
- curr_losses = cost_fn(X)
172
+ curr_losses = cost_fn(evaluated_X)
163
173
  static = StaticProblem(problem, F=curr_losses)
164
174
  Evaluator().eval(static, pop)
165
175
 
@@ -168,7 +178,7 @@ class PymooOptimizer(Optimizer):
168
178
  pop = optimizer_obj.ask()
169
179
 
170
180
  if callback_fn:
171
- callback_fn(OptimizeResult(x=pop.get("X"), fun=curr_losses))
181
+ callback_fn(OptimizeResult(x=evaluated_X, fun=curr_losses))
172
182
 
173
183
  result = optimizer_obj.result()
174
184
 
@@ -207,7 +217,7 @@ class ScipyOptimizer(Optimizer):
207
217
  self.method = method
208
218
 
209
219
  @property
210
- def n_param_sets(self):
220
+ def n_param_sets(self) -> int:
211
221
  """
212
222
  Get the number of parameter sets used by this optimizer.
213
223
 
@@ -220,9 +230,9 @@ class ScipyOptimizer(Optimizer):
220
230
  self,
221
231
  cost_fn: Callable[[np.ndarray], float],
222
232
  initial_params: np.ndarray,
223
- callback_fn: Callable | None = None,
233
+ callback_fn: Callable[[OptimizeResult], Any] | None = None,
224
234
  **kwargs,
225
- ):
235
+ ) -> OptimizeResult:
226
236
  """
227
237
  Run the scipy optimization algorithm.
228
238
 
@@ -231,8 +241,8 @@ class ScipyOptimizer(Optimizer):
231
241
  parameters and return a scalar cost value.
232
242
  initial_params (np.ndarray): Initial parameter values as a 1D or 2D array.
233
243
  If 2D with shape (1, n_params), it will be squeezed to 1D.
234
- callback_fn (Callable, optional): Function called after each iteration.
235
- Defaults to None.
244
+ callback_fn (Callable, optional): Function called after each iteration
245
+ with an `OptimizeResult` object. Defaults to None.
236
246
  **kwargs: Additional keyword arguments:
237
247
  - maxiter (int): Maximum number of iterations
238
248
  - jac (Callable): Gradient function (only used for L-BFGS-B)
@@ -242,6 +252,24 @@ class ScipyOptimizer(Optimizer):
242
252
  """
243
253
  max_iterations = kwargs.pop("maxiter", None)
244
254
 
255
+ # If a callback is provided, we wrap the cost function and callback
256
+ # to ensure the data passed to the callback has a consistent shape.
257
+ if callback_fn:
258
+
259
+ def callback_wrapper(intermediate_result: OptimizeResult):
260
+ # Create a dictionary from the intermediate result to preserve all of its keys.
261
+ result_dict = dict(intermediate_result)
262
+
263
+ # Overwrite 'x' and 'fun' to ensure they have consistent dimensions.
264
+ result_dict["x"] = np.atleast_2d(intermediate_result.x)
265
+ result_dict["fun"] = np.atleast_1d(intermediate_result.fun)
266
+
267
+ # Create a new OptimizeResult and pass it to the user's callback.
268
+ return callback_fn(OptimizeResult(**result_dict))
269
+
270
+ else:
271
+ callback_wrapper = None
272
+
245
273
  if max_iterations is None or self.method == ScipyMethod.COBYLA:
246
274
  # COBYLA perceive maxiter as maxfev so we need
247
275
  # to use the callback fn for counting instead.
@@ -263,7 +291,7 @@ class ScipyOptimizer(Optimizer):
263
291
  jac=(
264
292
  kwargs.pop("jac", None) if self.method == ScipyMethod.L_BFGS_B else None
265
293
  ),
266
- callback=callback_fn,
294
+ callback=callback_wrapper,
267
295
  options={"maxiter": maxiter},
268
296
  )
269
297
 
@@ -277,45 +305,65 @@ class MonteCarloOptimizer(Optimizer):
277
305
  decreasing variance. This implements a simple but effective evolutionary strategy.
278
306
  """
279
307
 
280
- def __init__(self, n_param_sets: int = 10, n_best_sets: int = 3):
308
+ def __init__(
309
+ self,
310
+ population_size: int = 10,
311
+ n_best_sets: int = 3,
312
+ keep_best_params: bool = False,
313
+ ):
281
314
  """
282
315
  Initialize a Monte Carlo optimizer.
283
316
 
284
317
  Args:
285
- n_param_sets (int, optional): Total number of parameter sets to evaluate
286
- per iteration. Defaults to 10.
318
+ population_size (int, optional): Size of the population for the algorithm.
319
+ Defaults to 10.
287
320
  n_best_sets (int, optional): Number of top-performing parameter sets to
288
321
  use as seeds for the next generation. Defaults to 3.
322
+ keep_best_params (bool, optional): If True, includes the best parameter sets
323
+ directly in the new population. If False, generates all new parameters
324
+ by sampling around the best ones. Defaults to False.
289
325
 
290
326
  Raises:
291
- ValueError: If n_best_sets is greater than n_param_sets.
327
+ ValueError: If n_best_sets is greater than population_size.
328
+ ValueError: If keep_best_params is True and n_best_sets equals population_size.
292
329
  """
293
330
  super().__init__()
294
331
 
295
- if n_best_sets > n_param_sets:
296
- raise ValueError("n_best_sets must be less than or equal to n_param_sets.")
332
+ if n_best_sets > population_size:
333
+ raise ValueError(
334
+ "n_best_sets must be less than or equal to population_size."
335
+ )
297
336
 
298
- self._n_param_sets = n_param_sets
299
- self._n_best_sets = n_best_sets
337
+ if keep_best_params and n_best_sets == population_size:
338
+ raise ValueError(
339
+ "If keep_best_params is True, n_best_sets must be less than population_size."
340
+ )
300
341
 
301
- # Calculate how many times each of the best sets should be repeated
302
- samples_per_best = self.n_param_sets // self.n_best_sets
303
- remainder = self.n_param_sets % self.n_best_sets
304
- self._repeat_counts = np.full(self.n_best_sets, samples_per_best)
305
- self._repeat_counts[:remainder] += 1
342
+ self._population_size = population_size
343
+ self._n_best_sets = n_best_sets
344
+ self._keep_best_params = keep_best_params
306
345
 
307
346
  @property
308
- def n_param_sets(self):
347
+ def population_size(self) -> int:
348
+ """
349
+ Get the size of the population.
350
+
351
+ Returns:
352
+ int: Size of the population.
309
353
  """
310
- Get the number of parameter sets evaluated per iteration.
354
+ return self._population_size
355
+
356
+ @property
357
+ def n_param_sets(self) -> int:
358
+ """Number of parameter sets (population size), per the Optimizer interface.
311
359
 
312
360
  Returns:
313
- int: Total number of parameter sets.
361
+ int: The population size.
314
362
  """
315
- return self._n_param_sets
363
+ return self._population_size
316
364
 
317
365
  @property
318
- def n_best_sets(self):
366
+ def n_best_sets(self) -> int:
319
367
  """
320
368
  Get the number of best parameter sets used for seeding the next generation.
321
369
 
@@ -324,6 +372,16 @@ class MonteCarloOptimizer(Optimizer):
324
372
  """
325
373
  return self._n_best_sets
326
374
 
375
+ @property
376
+ def keep_best_params(self) -> bool:
377
+ """
378
+ Get whether the best parameters are kept in the new population.
379
+
380
+ Returns:
381
+ bool: True if best parameters are included in new population, False otherwise.
382
+ """
383
+ return self._keep_best_params
384
+
327
385
  def _compute_new_parameters(
328
386
  self,
329
387
  params: np.ndarray,
@@ -338,34 +396,57 @@ class MonteCarloOptimizer(Optimizer):
338
396
  # 1. Select the best parameter sets from the current population
339
397
  best_params = params[best_indices]
340
398
 
341
- # 2. Prepare the means for sampling by repeating each best parameter set
342
- # according to its assigned count
343
- new_means = np.repeat(best_params, self._repeat_counts, axis=0)
399
+ # 2. Determine how many new samples to generate and calculate repeat counts
400
+ if self._keep_best_params:
401
+ n_new_samples = self._population_size - self._n_best_sets
402
+ # Calculate repeat counts for new samples only
403
+ samples_per_best = n_new_samples // self._n_best_sets
404
+ remainder = n_new_samples % self._n_best_sets
405
+ else:
406
+ # Calculate repeat counts for the entire population
407
+ samples_per_best = self._population_size // self._n_best_sets
408
+ remainder = self._population_size % self._n_best_sets
409
+
410
+ repeat_counts = np.full(self._n_best_sets, samples_per_best)
411
+ repeat_counts[:remainder] += 1
412
+
413
+ # 3. Prepare the means for sampling by repeating each best parameter set
414
+ new_means = np.repeat(best_params, repeat_counts, axis=0)
344
415
 
345
- # 3. Define the standard deviation (scale), which shrinks over iterations
416
+ # 4. Define the standard deviation (scale), which shrinks over iterations
346
417
  scale = 1.0 / (2.0 * (curr_iteration + 1.0))
347
418
 
348
- # 4. Generate all new parameters in a single vectorized call
419
+ # 5. Generate new parameters by sampling around the best ones
349
420
  new_params = rng.normal(loc=new_means, scale=scale)
350
421
 
351
- # Apply periodic boundary conditions
352
- return new_params % (2 * np.pi)
422
+ # 6. Apply periodic boundary conditions
423
+ new_params = new_params % (2 * np.pi)
424
+
425
+ # 7. Conditionally combine with best params if keeping them
426
+ if self._keep_best_params:
427
+ return np.vstack([best_params, new_params])
428
+ else:
429
+ return new_params
353
430
 
354
431
  def optimize(
355
432
  self,
356
433
  cost_fn: Callable[[np.ndarray], float],
357
434
  initial_params: np.ndarray,
358
- callback_fn: Callable[[OptimizeResult], float | np.ndarray] | None = None,
435
+ callback_fn: Callable[[OptimizeResult], Any] | None = None,
359
436
  **kwargs,
360
437
  ) -> OptimizeResult:
361
- """
362
- Perform Monte Carlo optimization on the cost function.
438
+ """Perform Monte Carlo optimization on the cost function.
363
439
 
364
440
  Parameters:
365
441
  cost_fn: The cost function to minimize.
366
442
  initial_params: Initial parameters for the optimization.
367
443
  callback_fn: Optional callback function to monitor progress.
368
- **kwargs: Additional keyword arguments for the optimizer.
444
+ **kwargs: Additional keyword arguments:
445
+
446
+ - maxiter (int, optional): Maximum number of iterations. Defaults to 5.
447
+ - rng (np.random.Generator, optional): Random number generator for
448
+ parameter sampling. Defaults to a new generator if not provided.
449
+
369
450
  Returns:
370
451
  Optimized parameters.
371
452
  """
@@ -373,30 +454,32 @@ class MonteCarloOptimizer(Optimizer):
373
454
  max_iterations = kwargs.pop("maxiter", 5)
374
455
 
375
456
  population = np.copy(initial_params)
457
+ evaluated_population = population
376
458
 
377
459
  for curr_iter in range(max_iterations):
378
460
  # Evaluate the entire population once
379
461
  losses = cost_fn(population)
462
+ evaluated_population = population
380
463
 
381
- # Find the indices of the best-performing parameter sets (only once)
464
+ if callback_fn:
465
+ callback_fn(OptimizeResult(x=evaluated_population, fun=losses))
466
+
467
+ # Find the indices of the best-performing parameter sets
382
468
  best_indices = np.argpartition(losses, self.n_best_sets - 1)[
383
469
  : self.n_best_sets
384
470
  ]
385
471
 
386
- if callback_fn:
387
- callback_fn(
388
- OptimizeResult(x=population[best_indices], fun=losses[best_indices])
389
- )
390
-
391
472
  # Generate the next generation of parameters
392
473
  population = self._compute_new_parameters(
393
- population, curr_iter, best_indices, rng
474
+ evaluated_population, curr_iter, best_indices, rng
394
475
  )
395
476
 
477
+ # Note: 'losses' here are from the last successfully evaluated population
396
478
  best_idx = np.argmin(losses)
479
+
397
480
  # Return the best results from the LAST EVALUATED population
398
481
  return OptimizeResult(
399
- x=population[best_idx],
482
+ x=evaluated_population[best_idx],
400
483
  fun=losses[best_idx],
401
484
  nit=max_iterations,
402
485
  )