qoro-divi 0.2.0b1__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. divi/__init__.py +1 -2
  2. divi/backends/__init__.py +10 -0
  3. divi/backends/_backend_properties_conversion.py +227 -0
  4. divi/backends/_circuit_runner.py +70 -0
  5. divi/backends/_execution_result.py +70 -0
  6. divi/backends/_parallel_simulator.py +486 -0
  7. divi/backends/_qoro_service.py +663 -0
  8. divi/backends/_qpu_system.py +101 -0
  9. divi/backends/_results_processing.py +133 -0
  10. divi/circuits/__init__.py +13 -0
  11. divi/{exp/cirq → circuits/_cirq}/__init__.py +1 -2
  12. divi/circuits/_cirq/_parser.py +110 -0
  13. divi/circuits/_cirq/_qasm_export.py +78 -0
  14. divi/circuits/_core.py +391 -0
  15. divi/{qasm.py → circuits/_qasm_conversion.py} +73 -14
  16. divi/circuits/_qasm_validation.py +694 -0
  17. divi/qprog/__init__.py +27 -8
  18. divi/qprog/_expectation.py +181 -0
  19. divi/qprog/_hamiltonians.py +281 -0
  20. divi/qprog/algorithms/__init__.py +16 -0
  21. divi/qprog/algorithms/_ansatze.py +368 -0
  22. divi/qprog/algorithms/_custom_vqa.py +263 -0
  23. divi/qprog/algorithms/_pce.py +262 -0
  24. divi/qprog/algorithms/_qaoa.py +579 -0
  25. divi/qprog/algorithms/_vqe.py +262 -0
  26. divi/qprog/batch.py +387 -74
  27. divi/qprog/checkpointing.py +556 -0
  28. divi/qprog/exceptions.py +9 -0
  29. divi/qprog/optimizers.py +1014 -43
  30. divi/qprog/quantum_program.py +243 -412
  31. divi/qprog/typing.py +62 -0
  32. divi/qprog/variational_quantum_algorithm.py +1208 -0
  33. divi/qprog/workflows/__init__.py +10 -0
  34. divi/qprog/{_graph_partitioning.py → workflows/_graph_partitioning.py} +139 -95
  35. divi/qprog/workflows/_qubo_partitioning.py +221 -0
  36. divi/qprog/workflows/_vqe_sweep.py +560 -0
  37. divi/reporting/__init__.py +7 -0
  38. divi/reporting/_pbar.py +127 -0
  39. divi/reporting/_qlogger.py +68 -0
  40. divi/reporting/_reporter.py +155 -0
  41. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info}/METADATA +43 -15
  42. qoro_divi-0.6.0.dist-info/RECORD +47 -0
  43. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info}/WHEEL +1 -1
  44. qoro_divi-0.6.0.dist-info/licenses/LICENSES/.license-header +3 -0
  45. divi/_pbar.py +0 -73
  46. divi/circuits.py +0 -139
  47. divi/exp/cirq/_lexer.py +0 -126
  48. divi/exp/cirq/_parser.py +0 -889
  49. divi/exp/cirq/_qasm_export.py +0 -37
  50. divi/exp/cirq/_qasm_import.py +0 -35
  51. divi/exp/cirq/exception.py +0 -21
  52. divi/exp/scipy/_cobyla.py +0 -342
  53. divi/exp/scipy/pyprima/LICENCE.txt +0 -28
  54. divi/exp/scipy/pyprima/__init__.py +0 -263
  55. divi/exp/scipy/pyprima/cobyla/__init__.py +0 -0
  56. divi/exp/scipy/pyprima/cobyla/cobyla.py +0 -599
  57. divi/exp/scipy/pyprima/cobyla/cobylb.py +0 -849
  58. divi/exp/scipy/pyprima/cobyla/geometry.py +0 -240
  59. divi/exp/scipy/pyprima/cobyla/initialize.py +0 -269
  60. divi/exp/scipy/pyprima/cobyla/trustregion.py +0 -540
  61. divi/exp/scipy/pyprima/cobyla/update.py +0 -331
  62. divi/exp/scipy/pyprima/common/__init__.py +0 -0
  63. divi/exp/scipy/pyprima/common/_bounds.py +0 -41
  64. divi/exp/scipy/pyprima/common/_linear_constraints.py +0 -46
  65. divi/exp/scipy/pyprima/common/_nonlinear_constraints.py +0 -64
  66. divi/exp/scipy/pyprima/common/_project.py +0 -224
  67. divi/exp/scipy/pyprima/common/checkbreak.py +0 -107
  68. divi/exp/scipy/pyprima/common/consts.py +0 -48
  69. divi/exp/scipy/pyprima/common/evaluate.py +0 -101
  70. divi/exp/scipy/pyprima/common/history.py +0 -39
  71. divi/exp/scipy/pyprima/common/infos.py +0 -30
  72. divi/exp/scipy/pyprima/common/linalg.py +0 -452
  73. divi/exp/scipy/pyprima/common/message.py +0 -336
  74. divi/exp/scipy/pyprima/common/powalg.py +0 -131
  75. divi/exp/scipy/pyprima/common/preproc.py +0 -393
  76. divi/exp/scipy/pyprima/common/present.py +0 -5
  77. divi/exp/scipy/pyprima/common/ratio.py +0 -56
  78. divi/exp/scipy/pyprima/common/redrho.py +0 -49
  79. divi/exp/scipy/pyprima/common/selectx.py +0 -346
  80. divi/interfaces.py +0 -25
  81. divi/parallel_simulator.py +0 -258
  82. divi/qlogger.py +0 -119
  83. divi/qoro_service.py +0 -343
  84. divi/qprog/_mlae.py +0 -182
  85. divi/qprog/_qaoa.py +0 -440
  86. divi/qprog/_vqe.py +0 -275
  87. divi/qprog/_vqe_sweep.py +0 -144
  88. divi/utils.py +0 -116
  89. qoro_divi-0.2.0b1.dist-info/RECORD +0 -58
  90. /divi/{qem.py → circuits/qem.py} +0 -0
  91. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info/licenses}/LICENSE +0 -0
  92. {qoro_divi-0.2.0b1.dist-info → qoro_divi-0.6.0.dist-info/licenses}/LICENSES/Apache-2.0.txt +0 -0
divi/qprog/optimizers.py CHANGED
@@ -2,74 +2,1045 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
+ import base64
6
+ import pickle
7
+ import time
8
+ from abc import ABC, abstractmethod
9
+ from collections.abc import Callable
5
10
  from enum import Enum
11
+ from pathlib import Path
12
+ from typing import Any
6
13
 
14
+ import cma
15
+ import dill
7
16
  import numpy as np
17
+ import numpy.typing as npt
18
+ from pydantic import BaseModel
19
+ from pymoo.algorithms.soo.nonconvex.de import DE # type: ignore
20
+ from pymoo.core.evaluator import Evaluator
21
+ from pymoo.core.individual import Individual
22
+ from pymoo.core.population import Population
23
+ from pymoo.core.problem import Problem
24
+ from pymoo.core.termination import NoTermination
25
+ from pymoo.problems.static import StaticProblem
26
+ from scipy.optimize import OptimizeResult, minimize
8
27
 
28
+ from divi.qprog.checkpointing import (
29
+ OPTIMIZER_STATE_FILE,
30
+ _atomic_write,
31
+ _load_and_validate_pydantic_model,
32
+ )
33
+
34
+
35
+ class MonteCarloState(BaseModel):
36
+ """Pydantic model for Monte Carlo optimizer state."""
37
+
38
+ population_size: int
39
+ n_best_sets: int
40
+ keep_best_params: bool
41
+ curr_iteration: int
42
+ # Store arrays as lists for JSON serialization
43
+ # Population arrays are always 2D: (population_size, n_params)
44
+ population: list[list[float]]
45
+ evaluated_population: list[list[float]]
46
+ losses: list[float]
47
+ # RNG state is a dict/tuple complex structure, simplified storage as dict or bytes
48
+ # Stored as base64 encoded string for JSON compatibility
49
+ rng_state_b64: str
50
+
51
+
52
+ class PymooState(BaseModel):
53
+ """Pydantic model for Pymoo optimizer state."""
54
+
55
+ method_value: str
56
+ population_size: int
57
+ algorithm_kwargs: dict[str, Any]
58
+ # We store the pickled algorithm object as base64 encoded string
59
+ algorithm_obj_b64: str
60
+
61
+
62
+ class Optimizer(ABC):
63
+ """
64
+ Abstract base class for all optimizers.
65
+
66
+ .. warning::
67
+ **Thread Safety**: Optimizer instances are **not thread-safe**. They maintain
68
+ internal state (e.g., current population, iteration count, RNG state) that changes
69
+ during optimization.
70
+
71
+ Do **not** share a single `Optimizer` instance across multiple `QuantumProgram`
72
+ instances or threads running in parallel. Doing so will lead to race conditions,
73
+ corrupted state, and potential crashes.
74
+
75
+ If you need to use the same optimizer configuration for multiple programs,
76
+ create a separate instance for each program. You can use the helper function
77
+ :func:`copy_optimizer` to create a fresh copy with the same configuration.
78
+ """
79
+
80
+ @property
81
+ @abstractmethod
82
+ def n_param_sets(self):
83
+ """
84
+ Returns the number of parameter sets the optimizer can handle per optimization run.
85
+ Returns:
86
+ int: Number of parameter sets.
87
+ """
88
+ raise NotImplementedError("This method should be implemented by subclasses.")
89
+
90
+ @abstractmethod
91
+ def optimize(
92
+ self,
93
+ cost_fn: Callable[[npt.NDArray[np.float64]], float],
94
+ initial_params: npt.NDArray[np.float64],
95
+ callback_fn: Callable[[OptimizeResult], Any] | None = None,
96
+ **kwargs,
97
+ ) -> OptimizeResult:
98
+ """Optimize the given cost function starting from initial parameters.
99
+
100
+ Parameters:
101
+ cost_fn: The cost function to minimize.
102
+ initial_params: Initial parameters for the optimization.
103
+ callback_fn: Function called after each iteration with an OptimizeResult object.
104
+ **kwargs: Additional keyword arguments for the optimizer:
105
+
106
+ - max_iterations (int, optional): Total desired number of iterations.
107
+ When resuming from a checkpoint, this represents the total iterations
108
+ desired across all runs. The optimizer will automatically calculate
109
+ and run only the remaining iterations needed.
110
+ Defaults vary by optimizer (e.g., 5 for population-based optimizers,
111
+ None for some scipy methods).
112
+ - rng (np.random.Generator, optional): Random number generator for
113
+ stochastic optimizers (PymooOptimizer, MonteCarloOptimizer).
114
+ Defaults to a new generator if not provided.
115
+ - jac (Callable, optional): Gradient/Jacobian function for
116
+ gradient-based optimizers (only used by ScipyOptimizer with
117
+ L_BFGS_B method). Defaults to None.
118
+
119
+ Returns:
120
+ Optimized parameters.
121
+ """
122
+ raise NotImplementedError("This method should be implemented by subclasses.")
123
+
124
+ @abstractmethod
125
+ def get_config(self) -> dict[str, Any]:
126
+ """Get optimizer configuration for checkpoint reconstruction.
127
+
128
+ Returns:
129
+ dict[str, Any]: Dictionary containing optimizer type and configuration parameters.
130
+
131
+ Raises:
132
+ NotImplementedError: If the optimizer does not support checkpointing.
133
+ """
134
+ raise NotImplementedError("This method should be implemented by subclasses.")
135
+
136
+ @abstractmethod
137
+ def save_state(self, checkpoint_dir: str) -> None:
138
+ """Save the optimizer's internal state to a checkpoint directory.
139
+
140
+ Args:
141
+ checkpoint_dir (str): Directory path where the optimizer state will be saved.
142
+ """
143
+ raise NotImplementedError("This method should be implemented by subclasses.")
144
+
145
+ @classmethod
146
+ @abstractmethod
147
+ def load_state(cls, checkpoint_dir: str) -> "Optimizer":
148
+ """Load the optimizer's internal state from a checkpoint directory.
149
+
150
+ Creates a new optimizer instance with the state restored from the checkpoint.
151
+
152
+ Args:
153
+ checkpoint_dir (str): Directory path where the optimizer state is saved.
154
+
155
+ Returns:
156
+ Optimizer: A new optimizer instance with restored state.
157
+ """
158
+ raise NotImplementedError("This method should be implemented by subclasses.")
159
+
160
+ @abstractmethod
161
+ def reset(self) -> None:
162
+ """Reset the optimizer's internal state to allow fresh optimization runs.
163
+
164
+ Clears any state accumulated during previous optimization runs, allowing
165
+ the optimizer to be reused for new optimization problems without creating
166
+ a new instance.
167
+ """
168
+ raise NotImplementedError("This method should be implemented by subclasses.")
169
+
170
+
171
+ class PymooMethod(Enum):
172
+ """Supported optimization methods from the pymoo library."""
173
+
174
+ CMAES = "CMAES"
175
+ DE = "DE"
176
+
177
+
178
+ class PymooOptimizer(Optimizer):
179
+ """
180
+ Optimizer wrapper for pymoo optimization algorithms and CMA-ES.
181
+
182
+ Supports population-based optimization methods from the pymoo library (DE)
183
+ and the cma library (CMAES).
184
+ """
185
+
186
+ def __init__(self, method: PymooMethod, population_size: int = 50, **kwargs):
187
+ """
188
+ Initialize a pymoo-based optimizer.
189
+
190
+ Args:
191
+ method (PymooMethod): The optimization algorithm to use (CMAES or DE).
192
+ population_size (int, optional): Size of the population for the algorithm.
193
+ Defaults to 50.
194
+ **kwargs: Additional algorithm-specific parameters passed to pymoo/cma.
195
+ """
196
+ super().__init__()
197
+
198
+ self.method = method
199
+ self.population_size = population_size
200
+ self.algorithm_kwargs = kwargs
201
+
202
+ # Optimization state (updated during optimize(), used for checkpointing)
203
+ self._curr_algorithm_obj: Any | None = None
204
+
205
+ @property
206
+ def n_param_sets(self):
207
+ """
208
+ Get the number of parameter sets (population size) used by this optimizer.
209
+
210
+ Returns:
211
+ int: Population size for the optimization algorithm.
212
+ """
213
+ # Determine population size from stored parameters
214
+ if self.method.value == "DE":
215
+ return self.population_size
216
+ elif self.method.value == "CMAES":
217
+ # CMAES uses 'popsize' in options dict
218
+ return self.algorithm_kwargs.get("popsize", self.population_size)
219
+ return self.population_size
220
+
221
+ def get_config(self) -> dict[str, Any]:
222
+ """Get optimizer configuration for checkpoint reconstruction.
223
+
224
+ Returns:
225
+ dict[str, Any]: Dictionary containing optimizer type and configuration parameters.
226
+ """
227
+ return {
228
+ "type": "PymooOptimizer",
229
+ "method": self.method.value,
230
+ "population_size": self.population_size,
231
+ **self.algorithm_kwargs,
232
+ }
233
+
234
+ def _initialize_cmaes(
235
+ self,
236
+ initial_params: npt.NDArray[np.float64],
237
+ rng: np.random.Generator,
238
+ ) -> Any:
239
+ """Initialize CMA-ES strategy."""
240
+ # Initialize CMA-ES using cma library
241
+ # cma expects a single initial solution (mean) and initial sigma
242
+ x0 = initial_params[0] # Use first parameter set as mean
243
+
244
+ # Handle sigma/sigma0
245
+ sigma0 = self.algorithm_kwargs.get(
246
+ "sigma0", self.algorithm_kwargs.get("sigma", 0.1)
247
+ )
248
+
249
+ # Filter kwargs for CMAEvolutionStrategy
250
+ cma_kwargs = {
251
+ k: v
252
+ for k, v in self.algorithm_kwargs.items()
253
+ if k not in ["sigma0", "sigma", "popsize"]
254
+ }
255
+ cma_kwargs["popsize"] = self.population_size
256
+ cma_kwargs["seed"] = rng.integers(0, 2**32)
257
+
258
+ es = cma.CMAEvolutionStrategy(x0, sigma0, cma_kwargs)
259
+ return es
260
+
261
+ def _initialize_pymoo(
262
+ self,
263
+ initial_params: npt.NDArray[np.float64],
264
+ rng: np.random.Generator,
265
+ ) -> Any:
266
+ """Initialize Pymoo strategy (DE)."""
267
+ # Initialize DE using pymoo
268
+ optimizer_obj = globals()[self.method.value](
269
+ pop_size=self.population_size,
270
+ parallelize=False,
271
+ **self.algorithm_kwargs,
272
+ )
273
+
274
+ seed = rng.bit_generator.seed_seq.spawn(1)[0].generate_state(1)[0]
275
+ n_var = initial_params.shape[-1]
276
+
277
+ xl = np.zeros(n_var)
278
+ xu = np.ones(n_var) * 2 * np.pi
279
+ problem = Problem(n_var=n_var, n_obj=1, xl=xl, xu=xu)
280
+
281
+ optimizer_obj.setup(
282
+ problem,
283
+ termination=NoTermination(),
284
+ seed=int(seed),
285
+ verbose=False,
286
+ )
287
+ optimizer_obj.start_time = time.time()
288
+
289
+ init_pop = Population.create(
290
+ *[Individual(X=initial_params[i]) for i in range(self.n_param_sets)]
291
+ )
292
+ optimizer_obj.pop = init_pop
293
+
294
+ return optimizer_obj
295
+
296
+ def _initialize_optimizer(
297
+ self,
298
+ initial_params: npt.NDArray[np.float64],
299
+ rng: np.random.Generator,
300
+ ) -> Any:
301
+ """Initialize a fresh optimizer instance.
302
+
303
+ Args:
304
+ initial_params: Initial parameter values.
305
+ rng: Random number generator.
306
+
307
+ Returns:
308
+ Optimizer object (cma.CMAEvolutionStrategy or pymoo.DE).
309
+ """
310
+ if self.method == PymooMethod.CMAES:
311
+ return self._initialize_cmaes(initial_params, rng)
312
+ else:
313
+ return self._initialize_pymoo(initial_params, rng)
314
+
315
+ def _optimize_cmaes(
316
+ self,
317
+ cost_fn: Callable[[npt.NDArray[np.float64]], float],
318
+ iterations_to_run: int,
319
+ callback_fn: Callable | None,
320
+ ) -> OptimizeResult:
321
+ """Run CMA-ES optimization loop."""
322
+ es = self._curr_algorithm_obj
323
+ for _ in range(iterations_to_run):
324
+ # Ask
325
+ X = es.ask()
326
+ evaluated_X = np.array(X)
327
+
328
+ # Evaluate
329
+ curr_losses = cost_fn(evaluated_X)
330
+
331
+ # Tell
332
+ es.tell(X, curr_losses)
333
+
334
+ if callback_fn:
335
+ callback_fn(OptimizeResult(x=evaluated_X, fun=curr_losses))
336
+
337
+ # Return result
338
+ return OptimizeResult(
339
+ x=es.result.xbest,
340
+ fun=es.result.fbest,
341
+ nit=es.countiter,
342
+ )
343
+
344
+ def _optimize_pymoo(
345
+ self,
346
+ cost_fn: Callable[[npt.NDArray[np.float64]], float],
347
+ iterations_to_run: int,
348
+ callback_fn: Callable | None,
349
+ ) -> OptimizeResult:
350
+ """Run Pymoo (DE) optimization loop."""
351
+ problem = self._curr_algorithm_obj.problem
352
+
353
+ for _ in range(iterations_to_run):
354
+ pop = self._curr_algorithm_obj.pop
355
+ evaluated_X = pop.get("X")
356
+
357
+ curr_losses = cost_fn(evaluated_X)
358
+ Evaluator().eval(StaticProblem(problem, F=curr_losses), pop)
359
+
360
+ self._curr_algorithm_obj.tell(infills=pop)
361
+
362
+ # Ask for next population to evaluate
363
+ self._curr_algorithm_obj.pop = self._curr_algorithm_obj.ask()
364
+
365
+ if callback_fn:
366
+ callback_fn(OptimizeResult(x=evaluated_X, fun=curr_losses))
367
+
368
+ result = self._curr_algorithm_obj.result()
369
+
370
+ # nit should represent total iterations completed (n_gen is 1-indexed)
371
+ return OptimizeResult(
372
+ x=result.X,
373
+ fun=result.F,
374
+ nit=self._curr_algorithm_obj.n_gen - 1,
375
+ )
376
+
377
+ def optimize(
378
+ self,
379
+ cost_fn: Callable[[npt.NDArray[np.float64]], float],
380
+ initial_params: npt.NDArray[np.float64] | None = None,
381
+ callback_fn: Callable | None = None,
382
+ **kwargs,
383
+ ):
384
+ """
385
+ Run the optimization algorithm.
386
+
387
+ Args:
388
+ cost_fn (Callable): Function to minimize. Should accept a 2D array of
389
+ parameter sets and return an array of cost values.
390
+ initial_params (npt.NDArray[np.float64], optional): Initial parameter values as a 2D array
391
+ of shape (n_param_sets, n_params). Should be None when resuming from a checkpoint.
392
+ callback_fn (Callable, optional): Function called after each iteration
393
+ with an OptimizeResult object. Defaults to None.
394
+ **kwargs: Additional keyword arguments:
395
+
396
+ - max_iterations (int): Total desired number of iterations.
397
+ When resuming from a checkpoint, this represents the total iterations
398
+ desired across all runs. The optimizer will automatically calculate
399
+ and run only the remaining iterations needed. Defaults to 5.
400
+ - rng (np.random.Generator): Random number generator.
401
+
402
+ Returns:
403
+ OptimizeResult: Optimization result with final parameters and cost value.
404
+ """
405
+ max_iterations = kwargs.pop("max_iterations", 5)
406
+
407
+ # Resume from checkpoint or initialize fresh
408
+ if self._curr_algorithm_obj is not None:
409
+ if self.method == PymooMethod.CMAES:
410
+ es = self._curr_algorithm_obj
411
+ # cma uses counteigen as generation counter roughly
412
+ # strictly speaking es.countiter is the iteration counter
413
+ iterations_completed = es.countiter
414
+ else:
415
+ # Pymoo DE
416
+ # n_gen is 1-indexed (includes initialization), so actual iterations = n_gen - 1
417
+ iterations_completed = self._curr_algorithm_obj.n_gen - 1
418
+
419
+ iterations_remaining = max_iterations - iterations_completed
420
+ iterations_to_run = max(0, iterations_remaining)
421
+ else:
422
+ rng = kwargs.pop("rng", np.random.default_rng())
423
+ self._curr_algorithm_obj = self._initialize_optimizer(initial_params, rng)
424
+ iterations_to_run = max_iterations
425
+
426
+ if self.method == PymooMethod.CMAES:
427
+ return self._optimize_cmaes(cost_fn, iterations_to_run, callback_fn)
428
+ else:
429
+ return self._optimize_pymoo(cost_fn, iterations_to_run, callback_fn)
430
+
431
+ def save_state(self, checkpoint_dir: Path | str) -> None:
432
+ """Save the optimizer's internal state to a checkpoint directory.
433
+
434
+ Args:
435
+ checkpoint_dir (Path | str): Directory path where the optimizer state will be saved.
436
+
437
+ Raises:
438
+ RuntimeError: If optimization has not been run (no state to save).
439
+ """
440
+ if self._curr_algorithm_obj is None:
441
+ raise RuntimeError(
442
+ "Cannot save checkpoint: optimization has not been run. "
443
+ "At least one iteration must complete before saving optimizer state."
444
+ )
445
+
446
+ checkpoint_path = Path(checkpoint_dir)
447
+ checkpoint_path.mkdir(parents=True, exist_ok=True)
448
+
449
+ state_file = checkpoint_path / OPTIMIZER_STATE_FILE
450
+
451
+ # Serialize algorithm object using dill, then base64 encode
452
+ # For CMAES (cma lib), algorithm object is picklable.
453
+ # For DE (pymoo), algorithm object is picklable and includes pop and problem.
454
+
455
+ algorithm_obj_bytes = dill.dumps(self._curr_algorithm_obj)
456
+ algorithm_obj_b64 = base64.b64encode(algorithm_obj_bytes).decode("ascii")
457
+
458
+ state = PymooState(
459
+ method_value=self.method.value,
460
+ population_size=self.population_size,
461
+ algorithm_kwargs=self.algorithm_kwargs,
462
+ algorithm_obj_b64=algorithm_obj_b64,
463
+ )
464
+
465
+ _atomic_write(state_file, state.model_dump_json(indent=2))
466
+
467
+ @classmethod
468
+ def load_state(cls, checkpoint_dir: Path | str) -> "PymooOptimizer":
469
+ """Load the optimizer's internal state from a checkpoint directory.
470
+
471
+ Creates a new PymooOptimizer instance with the state restored from the checkpoint.
472
+
473
+ Args:
474
+ checkpoint_dir (Path | str): Directory path where the optimizer state is saved.
475
+
476
+ Returns:
477
+ PymooOptimizer: A new optimizer instance with restored state.
478
+
479
+ Raises:
480
+ FileNotFoundError: If the checkpoint file does not exist.
481
+ """
482
+ checkpoint_path = Path(checkpoint_dir)
483
+ state_file = checkpoint_path / OPTIMIZER_STATE_FILE
484
+
485
+ state = _load_and_validate_pydantic_model(
486
+ state_file,
487
+ PymooState,
488
+ required_fields=["method_value", "algorithm_obj_b64"],
489
+ error_context="Pymoo optimizer",
490
+ )
491
+
492
+ # Create new instance with saved configuration
493
+ optimizer = cls(
494
+ method=PymooMethod(state.method_value),
495
+ population_size=state.population_size,
496
+ **state.algorithm_kwargs,
497
+ )
498
+
499
+ # Restore algorithm object from base64 string
500
+ # For DE, this includes the population and problem
501
+ optimizer._curr_algorithm_obj = dill.loads(
502
+ base64.b64decode(state.algorithm_obj_b64)
503
+ )
504
+
505
+ return optimizer
506
+
507
+ def reset(self) -> None:
508
+ """Reset the optimizer's internal state.
509
+
510
+ Clears the current algorithm object, allowing the optimizer
511
+ to be reused for fresh optimization runs.
512
+ """
513
+ self._curr_algorithm_obj = None
514
+
515
+
516
+ class ScipyMethod(Enum):
517
+ """Supported optimization methods from scipy.optimize."""
9
518
 
10
- class Optimizer(Enum):
11
519
  NELDER_MEAD = "Nelder-Mead"
12
520
  COBYLA = "COBYLA"
13
- MONTE_CARLO = "Monte Carlo"
14
521
  L_BFGS_B = "L-BFGS-B"
15
522
 
16
- def describe(self):
17
- return self.name, self.value
18
523
 
19
- @property
20
- def n_param_sets(self):
21
- if self in (Optimizer.NELDER_MEAD, Optimizer.L_BFGS_B, Optimizer.COBYLA):
22
- return 1
23
- elif self == Optimizer.MONTE_CARLO:
24
- return 10
524
+ class ScipyOptimizer(Optimizer):
525
+ """
526
+ Optimizer wrapper for scipy.optimize methods.
527
+
528
+ Supports gradient-free and gradient-based optimization algorithms from scipy,
529
+ including Nelder-Mead simplex, COBYLA, and L-BFGS-B.
530
+ """
531
+
532
+ def __init__(self, method: ScipyMethod):
533
+ """
534
+ Initialize a scipy-based optimizer.
535
+
536
+ Args:
537
+ method (ScipyMethod): The optimization algorithm to use.
538
+ """
539
+ super().__init__()
540
+
541
+ self.method = method
25
542
 
26
543
  @property
27
- def n_samples(self):
28
- if self == Optimizer.MONTE_CARLO:
29
- return 10
544
+ def n_param_sets(self) -> int:
545
+ """
546
+ Get the number of parameter sets used by this optimizer.
547
+
548
+ Returns:
549
+ int: Always returns 1, as scipy optimizers use single-point optimization.
550
+ """
30
551
  return 1
31
552
 
32
- def compute_new_parameters(self, params, iteration, **kwargs):
33
- if self != Optimizer.MONTE_CARLO:
34
- raise NotImplementedError
553
+ def optimize(
554
+ self,
555
+ cost_fn: Callable[[npt.NDArray[np.float64]], float],
556
+ initial_params: npt.NDArray[np.float64],
557
+ callback_fn: Callable[[OptimizeResult], Any] | None = None,
558
+ **kwargs,
559
+ ) -> OptimizeResult:
560
+ """
561
+ Run the scipy optimization algorithm.
562
+
563
+ Args:
564
+ cost_fn (Callable): Function to minimize. Should accept a 1D array of
565
+ parameters and return a scalar cost value.
566
+ initial_params (npt.NDArray[np.float64]): Initial parameter values as a 1D or 2D array.
567
+ If 2D with shape (1, n_params), it will be squeezed to 1D.
568
+ callback_fn (Callable, optional): Function called after each iteration
569
+ with an `OptimizeResult` object. Defaults to None.
570
+ **kwargs: Additional keyword arguments:
571
+
572
+ - max_iterations (int, optional): Total desired number of iterations.
573
+ Defaults to None (no limit for some methods).
574
+ - jac (Callable): Gradient function (only used for L-BFGS-B).
575
+
576
+ Returns:
577
+ OptimizeResult: Optimization result with final parameters and cost value.
578
+ """
579
+ max_iterations = kwargs.pop("max_iterations", None)
580
+
581
+ # If a callback is provided, we wrap the cost function and callback
582
+ # to ensure the data passed to the callback has a consistent shape.
583
+ if callback_fn:
584
+
585
+ def callback_wrapper(intermediate_result: OptimizeResult):
586
+ # Create a dictionary from the intermediate result to preserve all of its keys.
587
+ result_dict = dict(intermediate_result)
588
+
589
+ # Overwrite 'x' and 'fun' to ensure they have consistent dimensions.
590
+ result_dict["x"] = np.atleast_2d(intermediate_result.x)
591
+ result_dict["fun"] = np.atleast_1d(intermediate_result.fun)
592
+
593
+ # Create a new OptimizeResult and pass it to the user's callback.
594
+ return callback_fn(OptimizeResult(**result_dict))
595
+
596
+ else:
597
+ callback_wrapper = None
598
+
599
+ if max_iterations is None or self.method == ScipyMethod.COBYLA:
600
+ # COBYLA perceive maxiter as maxfev so we need
601
+ # to use the callback fn for counting instead.
602
+ maxiter = None
603
+ else:
604
+ # Need to add one more iteration for Nelder-Mead's simplex initialization step
605
+ maxiter = (
606
+ max_iterations + 1
607
+ if self.method == ScipyMethod.NELDER_MEAD
608
+ else max_iterations
609
+ )
610
+
611
+ return minimize(
612
+ cost_fn,
613
+ initial_params.squeeze(),
614
+ method=self.method.value,
615
+ jac=(
616
+ kwargs.pop("jac", None) if self.method == ScipyMethod.L_BFGS_B else None
617
+ ),
618
+ callback=callback_wrapper,
619
+ options={"maxiter": maxiter},
620
+ )
621
+
622
+ def save_state(self, checkpoint_dir: str) -> None:
623
+ """Save the optimizer's internal state to a checkpoint directory.
624
+
625
+ Scipy optimizers do not support saving state mid-minimization as scipy.optimize
626
+ does not provide access to the internal optimizer state.
35
627
 
628
+ Args:
629
+ checkpoint_dir (str): Directory path where the optimizer state would be saved.
630
+
631
+ Raises:
632
+ NotImplementedError: Always raised, as scipy optimizers cannot save state.
633
+ """
634
+ raise NotImplementedError(
635
+ "ScipyOptimizer does not support state saving. Scipy's optimization methods "
636
+ "do not provide access to internal optimizer state during minimization. "
637
+ "Please use MonteCarloOptimizer or PymooOptimizer for checkpointing support."
638
+ )
639
+
640
+ @classmethod
641
+ def load_state(cls, checkpoint_dir: str) -> "ScipyOptimizer":
642
+ """Load the optimizer's internal state from a checkpoint directory.
643
+
644
+ Scipy optimizers do not support loading state as they cannot save state.
645
+
646
+ Args:
647
+ checkpoint_dir (str): Directory path where the optimizer state would be loaded from.
648
+
649
+ Raises:
650
+ NotImplementedError: Always raised, as scipy optimizers cannot load state.
651
+ """
652
+ raise NotImplementedError(
653
+ "ScipyOptimizer does not support state loading. Scipy's optimization methods "
654
+ "do not provide access to internal optimizer state during minimization. "
655
+ "Please use MonteCarloOptimizer or PymooOptimizer for checkpointing support."
656
+ )
657
+
658
+ def reset(self) -> None:
659
+ """Reset the optimizer's internal state.
660
+
661
+ ScipyOptimizer does not maintain internal state between optimization runs,
662
+ so this method is a no-op.
663
+ """
664
+ pass
665
+
666
+ def get_config(self) -> dict[str, Any]:
667
+ """Get optimizer configuration for checkpoint reconstruction.
668
+
669
+ Raises:
670
+ NotImplementedError: ScipyOptimizer does not support checkpointing.
671
+ """
672
+ raise NotImplementedError(
673
+ "ScipyOptimizer does not support checkpointing. Please use "
674
+ "MonteCarloOptimizer or PymooOptimizer for checkpointing support."
675
+ )
676
+
677
+
678
+ class MonteCarloOptimizer(Optimizer):
679
+ """
680
+ Monte Carlo-based parameter search optimizer.
681
+
682
+ This optimizer samples parameter space randomly, selects the best-performing
683
+ samples, and uses them as centers for the next generation of samples with
684
+ decreasing variance. This implements a simple but effective evolutionary strategy.
685
+ """
686
+
687
+ def __init__(
688
+ self,
689
+ population_size: int = 10,
690
+ n_best_sets: int = 3,
691
+ keep_best_params: bool = False,
692
+ ):
693
+ """
694
+ Initialize a Monte Carlo optimizer.
695
+
696
+ Args:
697
+ population_size (int, optional): Size of the population for the algorithm.
698
+ Defaults to 10.
699
+ n_best_sets (int, optional): Number of top-performing parameter sets to
700
+ use as seeds for the next generation. Defaults to 3.
701
+ keep_best_params (bool, optional): If True, includes the best parameter sets
702
+ directly in the new population. If False, generates all new parameters
703
+ by sampling around the best ones. Defaults to False.
704
+
705
+ Raises:
706
+ ValueError: If n_best_sets is greater than population_size.
707
+ ValueError: If keep_best_params is True and n_best_sets equals population_size.
708
+ """
709
+ super().__init__()
710
+
711
+ if n_best_sets > population_size:
712
+ raise ValueError(
713
+ "n_best_sets must be less than or equal to population_size."
714
+ )
715
+
716
+ if keep_best_params and n_best_sets == population_size:
717
+ raise ValueError(
718
+ "If keep_best_params is True, n_best_sets must be less than population_size."
719
+ )
720
+
721
+ self._population_size = population_size
722
+ self._n_best_sets = n_best_sets
723
+ self._keep_best_params = keep_best_params
724
+
725
+ # Optimization state (updated during optimize(), used for checkpointing)
726
+ self._curr_population: npt.NDArray[np.float64] | None = None
727
+ self._curr_evaluated_population: npt.NDArray[np.float64] | None = None
728
+ self._curr_losses: npt.NDArray[np.float64] | None = None
729
+ self._curr_iteration: int | None = None
730
+ self._curr_rng_state: dict | None = None
731
+
732
+ @property
733
+ def population_size(self) -> int:
734
+ """
735
+ Get the size of the population.
736
+
737
+ Returns:
738
+ int: Size of the population.
739
+ """
740
+ return self._population_size
741
+
742
+ @property
743
+ def n_param_sets(self) -> int:
744
+ """Number of parameter sets (population size), per the Optimizer interface.
745
+
746
+ Returns:
747
+ int: The population size.
748
+ """
749
+ return self._population_size
750
+
751
+ @property
752
+ def n_best_sets(self) -> int:
753
+ """
754
+ Get the number of best parameter sets used for seeding the next generation.
755
+
756
+ Returns:
757
+ int: Number of best-performing sets kept.
758
+ """
759
+ return self._n_best_sets
760
+
761
+ @property
762
+ def keep_best_params(self) -> bool:
763
+ """
764
+ Get whether the best parameters are kept in the new population.
765
+
766
+ Returns:
767
+ bool: True if best parameters are included in new population, False otherwise.
768
+ """
769
+ return self._keep_best_params
770
+
771
+ def get_config(self) -> dict[str, Any]:
772
+ """Get optimizer configuration for checkpoint reconstruction.
773
+
774
+ Returns:
775
+ dict[str, Any]: Dictionary containing optimizer type and configuration parameters.
776
+ """
777
+ return {
778
+ "type": "MonteCarloOptimizer",
779
+ "population_size": self._population_size,
780
+ "n_best_sets": self._n_best_sets,
781
+ "keep_best_params": self._keep_best_params,
782
+ }
783
+
784
+ def _compute_new_parameters(
785
+ self,
786
+ params: npt.NDArray[np.float64],
787
+ curr_iteration: int,
788
+ best_indices: npt.NDArray[np.intp],
789
+ rng: np.random.Generator,
790
+ ) -> npt.NDArray[np.float64]:
791
+ """
792
+ Generates a new population of parameters based on the best-performing ones.
793
+ """
794
+
795
+ # 1. Select the best parameter sets from the current population
796
+ best_params = params[best_indices]
797
+
798
+ # 2. Determine how many new samples to generate and calculate repeat counts
799
+ if self._keep_best_params:
800
+ n_new_samples = self._population_size - self._n_best_sets
801
+ # Calculate repeat counts for new samples only
802
+ samples_per_best = n_new_samples // self._n_best_sets
803
+ remainder = n_new_samples % self._n_best_sets
804
+ else:
805
+ # Calculate repeat counts for the entire population
806
+ samples_per_best = self._population_size // self._n_best_sets
807
+ remainder = self._population_size % self._n_best_sets
808
+
809
+ repeat_counts = np.full(self._n_best_sets, samples_per_best)
810
+ repeat_counts[:remainder] += 1
811
+
812
+ # 3. Prepare the means for sampling by repeating each best parameter set
813
+ new_means = np.repeat(best_params, repeat_counts, axis=0)
814
+
815
+ # 4. Define the standard deviation (scale), which shrinks over iterations
816
+ scale = 1.0 / (2.0 * (curr_iteration + 1.0))
817
+
818
+ # 5. Generate new parameters by sampling around the best ones
819
+ new_params = rng.normal(loc=new_means, scale=scale)
820
+
821
+ # 6. Apply periodic boundary conditions
822
+ new_params = new_params % (2 * np.pi)
823
+
824
+ # 7. Conditionally combine with best params if keeping them
825
+ if self._keep_best_params:
826
+ return np.vstack([best_params, new_params])
827
+ else:
828
+ return new_params
829
+
830
+ def optimize(
831
+ self,
832
+ cost_fn: Callable[[npt.NDArray[np.float64]], float],
833
+ initial_params: npt.NDArray[np.float64],
834
+ callback_fn: Callable[[OptimizeResult], Any] | None = None,
835
+ **kwargs,
836
+ ) -> OptimizeResult:
837
+ """Perform Monte Carlo optimization on the cost function.
838
+
839
+ Parameters:
840
+ cost_fn: The cost function to minimize.
841
+ initial_params: Initial parameters for the optimization.
842
+ callback_fn: Optional callback function to monitor progress.
843
+ **kwargs: Additional keyword arguments:
844
+
845
+ - max_iterations (int, optional): Total desired number of iterations.
846
+ When resuming from a checkpoint, this represents the total iterations
847
+ desired across all runs. The optimizer will automatically calculate
848
+ and run only the remaining iterations needed. Defaults to 5.
849
+ - rng (np.random.Generator, optional): Random number generator for
850
+ parameter sampling. Defaults to a new generator if not provided.
851
+
852
+ Returns:
853
+ Optimized parameters.
854
+ """
36
855
  rng = kwargs.pop("rng", np.random.default_rng())
856
+ max_iterations = kwargs.pop("max_iterations", 5)
37
857
 
38
- losses = kwargs.pop("losses")
39
- smallest_energy_keys = sorted(losses, key=lambda k: losses[k])[: self.n_samples]
858
+ # Resume from checkpoint or initialize fresh
859
+ if self._curr_population is not None:
860
+ start_iter = self._curr_iteration + 1
861
+ rng.bit_generator.state = self._curr_rng_state
862
+ # Calculate remaining iterations to reach total desired
863
+ iterations_completed = self._curr_iteration + 1
864
+ iterations_remaining = max_iterations - iterations_completed
865
+ end_iter = start_iter + max(0, iterations_remaining)
866
+ else:
867
+ self._curr_population = np.copy(initial_params)
868
+ start_iter = 0
869
+ end_iter = max_iterations
40
870
 
41
- new_params = []
871
+ for curr_iter in range(start_iter, end_iter):
872
+ # Evaluate the entire population once
873
+ self._curr_losses = cost_fn(self._curr_population)
874
+ self._curr_evaluated_population = np.copy(self._curr_population)
42
875
 
43
- for key in smallest_energy_keys:
44
- new_param_set = [
45
- rng.normal(
46
- params[int(key)],
47
- 1 / (2 * iteration),
48
- size=params[int(key)].shape,
49
- )
50
- for _ in range(self.n_param_sets)
876
+ # Find the indices of the best-performing parameter sets
877
+ best_indices = np.argpartition(self._curr_losses, self.n_best_sets - 1)[
878
+ : self.n_best_sets
51
879
  ]
52
880
 
53
- for new_param in new_param_set:
54
- new_param = np.clip(new_param, 0, 2 * np.pi)
881
+ # Generate the next generation of parameters (uses RNG, so capture state after)
882
+ self._curr_population = self._compute_new_parameters(
883
+ self._curr_evaluated_population, curr_iter, best_indices, rng
884
+ )
885
+ self._curr_iteration = curr_iter
886
+ self._curr_rng_state = rng.bit_generator.state
887
+
888
+ if callback_fn:
889
+ callback_fn(
890
+ OptimizeResult(
891
+ x=self._curr_evaluated_population, fun=self._curr_losses
892
+ )
893
+ )
894
+
895
+ # Note: 'losses' here are from the last successfully evaluated population
896
+ # (either from the loop above, or from checkpoint state if loop didn't run)
897
+ best_idx = np.argmin(self._curr_losses)
898
+
899
+ # Return the best results from the LAST EVALUATED population
900
+ # nit should be the total number of iterations completed
901
+ total_iterations_completed = (
902
+ self._curr_iteration + 1 if self._curr_iteration is not None else 0
903
+ )
904
+ return OptimizeResult(
905
+ x=self._curr_evaluated_population[best_idx],
906
+ fun=self._curr_losses[best_idx],
907
+ nit=total_iterations_completed,
908
+ )
909
+
910
+ def save_state(self, checkpoint_dir: Path | str) -> None:
911
+ """Save the optimizer's internal state to a checkpoint directory.
912
+
913
+ Args:
914
+ checkpoint_dir (Path | str): Directory path where the optimizer state will be saved.
915
+
916
+ Raises:
917
+ RuntimeError: If optimization has not been run (no state to save).
918
+ """
919
+ if self._curr_population is None:
920
+ raise RuntimeError(
921
+ "Cannot save checkpoint: optimization has not been run. "
922
+ "At least one iteration must complete before saving optimizer state."
923
+ )
924
+
925
+ checkpoint_path = Path(checkpoint_dir)
926
+ checkpoint_path.mkdir(parents=True, exist_ok=True)
927
+
928
+ state_file = checkpoint_path / OPTIMIZER_STATE_FILE
929
+
930
+ # RNG state is a dict/tuple structure, pickle it for bytes storage
931
+ # Then encode to base64 string for JSON serialization
932
+ rng_state_bytes = pickle.dumps(self._curr_rng_state)
933
+ rng_state_b64 = base64.b64encode(rng_state_bytes).decode("ascii")
934
+
935
+ state = MonteCarloState(
936
+ population_size=self._population_size,
937
+ n_best_sets=self._n_best_sets,
938
+ keep_best_params=self._keep_best_params,
939
+ curr_iteration=self._curr_iteration,
940
+ population=self._curr_population.tolist(),
941
+ evaluated_population=self._curr_evaluated_population.tolist(),
942
+ losses=self._curr_losses.tolist(),
943
+ rng_state_b64=rng_state_b64,
944
+ )
945
+
946
+ _atomic_write(state_file, state.model_dump_json(indent=2))
947
+
948
+ @classmethod
949
+ def load_state(cls, checkpoint_dir: Path | str) -> "MonteCarloOptimizer":
950
+ """Load the optimizer's internal state from a checkpoint directory.
951
+
952
+ Creates a new MonteCarloOptimizer instance with the state restored from the checkpoint.
953
+
954
+ Args:
955
+ checkpoint_dir (Path | str): Directory path where the optimizer state is saved.
956
+
957
+ Returns:
958
+ MonteCarloOptimizer: A new optimizer instance with restored state.
959
+
960
+ Raises:
961
+ FileNotFoundError: If the checkpoint file does not exist.
962
+ """
963
+ checkpoint_path = Path(checkpoint_dir)
964
+ state_file = checkpoint_path / OPTIMIZER_STATE_FILE
965
+
966
+ state = _load_and_validate_pydantic_model(
967
+ state_file,
968
+ MonteCarloState,
969
+ required_fields=["population_size", "curr_iteration", "rng_state_b64"],
970
+ error_context="Monte Carlo optimizer",
971
+ )
972
+
973
+ # Create new instance with saved configuration
974
+ optimizer = cls(
975
+ population_size=state.population_size,
976
+ n_best_sets=state.n_best_sets,
977
+ keep_best_params=state.keep_best_params,
978
+ )
979
+
980
+ # Restore state
981
+ optimizer._curr_population = (
982
+ np.array(state.population) if state.population else None
983
+ )
984
+ optimizer._curr_evaluated_population = (
985
+ np.array(state.evaluated_population) if state.evaluated_population else None
986
+ )
987
+ optimizer._curr_losses = np.array(state.losses) if state.losses else None
988
+ optimizer._curr_iteration = (
989
+ state.curr_iteration if state.curr_iteration != -1 else None
990
+ )
991
+
992
+ # Restore RNG state from base64 string -> bytes -> pickle
993
+ rng_state_bytes = base64.b64decode(state.rng_state_b64)
994
+ optimizer._curr_rng_state = pickle.loads(rng_state_bytes)
995
+
996
+ return optimizer
997
+
998
+ def reset(self) -> None:
999
+ """Reset the optimizer's internal state.
1000
+
1001
+ Clears all current optimization state (population, losses, iteration, RNG state),
1002
+ allowing the optimizer to be reused for fresh optimization runs.
1003
+ """
1004
+ self._curr_population = None
1005
+ self._curr_evaluated_population = None
1006
+ self._curr_losses = None
1007
+ self._curr_iteration = None
1008
+ self._curr_rng_state = None
55
1009
 
56
- new_params.extend(new_param_set)
57
1010
 
58
- return np.array(new_params)
1011
+ def copy_optimizer(optimizer: Optimizer) -> Optimizer:
1012
+ """Create a new optimizer instance with the same configuration as the given optimizer.
59
1013
 
60
- def compute_parameter_shift_mask(self, n_params):
61
- if self != Optimizer.L_BFGS_B:
62
- raise NotImplementedError
1014
+ This function creates a fresh copy of an optimizer with identical configuration
1015
+ parameters but with reset internal state. This is useful when multiple programs
1016
+ need their own optimizer instances to avoid state contamination.
63
1017
 
64
- mask_arr = np.arange(0, 2 * n_params, 2)
65
- mask_arr[0] = 1
1018
+ .. tip::
1019
+ Use this function when preparing a batch of programs that will run in parallel.
1020
+ Pass a fresh copy of the optimizer to each program instance to ensure thread safety.
66
1021
 
67
- binary_matrix = (
68
- (mask_arr[:, np.newaxis] & (1 << np.arange(n_params))) > 0
69
- ).astype(np.float64)
1022
+ Args:
1023
+ optimizer: The optimizer to copy.
70
1024
 
71
- binary_matrix = binary_matrix.repeat(2, axis=0)
72
- binary_matrix[1::2] *= -1
73
- binary_matrix *= 0.5 * np.pi
1025
+ Returns:
1026
+ A new optimizer instance with the same configuration but fresh state.
74
1027
 
75
- return binary_matrix
1028
+ Raises:
1029
+ ValueError: If the optimizer type is not recognized.
1030
+ """
1031
+ if isinstance(optimizer, MonteCarloOptimizer):
1032
+ return MonteCarloOptimizer(
1033
+ population_size=optimizer.population_size,
1034
+ n_best_sets=optimizer.n_best_sets,
1035
+ keep_best_params=optimizer.keep_best_params,
1036
+ )
1037
+ elif isinstance(optimizer, PymooOptimizer):
1038
+ return PymooOptimizer(
1039
+ method=optimizer.method,
1040
+ population_size=optimizer.population_size,
1041
+ **optimizer.algorithm_kwargs,
1042
+ )
1043
+ elif isinstance(optimizer, ScipyOptimizer):
1044
+ return ScipyOptimizer(method=optimizer.method)
1045
+ else:
1046
+ raise ValueError(f"Unknown optimizer type: {type(optimizer)}")