fcmaes 1.1.3__py3-none-any.whl → 1.6.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fcmaes/optimizer.py CHANGED
@@ -4,68 +4,88 @@
4
4
  # LICENSE file in the root directory.
5
5
 
6
6
  import numpy as np
7
- from numpy.random import MT19937, Generator
7
+ from numpy.random import PCG64DXSM, Generator
8
8
  from scipy.optimize import Bounds, minimize, shgo, differential_evolution, dual_annealing, basinhopping
9
9
  import sys
10
10
  import time
11
- import math
12
- import logging
13
-
14
- from fcmaes import cmaes, cmaescpp, decpp, dacpp, hhcpp
15
-
16
- _logger = None
17
-
18
- def logger(logfile = 'optimizer.log'):
19
- '''default logger used by the parallel retry. Logs both to stdout and into a file.'''
20
- global _logger
21
- if _logger is None:
22
- formatter = logging.Formatter('%(message)s')
23
- file_handler = logging.FileHandler(filename=logfile)
24
- file_handler.setLevel(logging.INFO)
25
- stdout_handler = logging.StreamHandler(sys.stdout)
26
- stdout_handler.setLevel(logging.INFO)
27
- file_handler.setFormatter(formatter)
28
- stdout_handler.setFormatter(formatter)
29
- _logger = logging.getLogger('optimizer')
30
- _logger.addHandler(file_handler)
31
- _logger.addHandler(stdout_handler)
32
- _logger.setLevel(logging.INFO)
33
- return _logger
11
+ from loguru import logger
12
+ import ctypes as ct
13
+ import multiprocessing as mp
14
+ from fcmaes.evaluator import serial, parallel
15
+ from fcmaes import crfmnes, crfmnescpp, pgpecpp, cmaes, de, cmaescpp, decpp, dacpp, bitecpp
16
+
17
+ from typing import Optional, Callable, Tuple, Union
18
+ from numpy.typing import ArrayLike
34
19
 
35
20
  def eprint(*args, **kwargs):
36
21
  """print message to stderr."""
37
22
  print(*args, file=sys.stderr, **kwargs)
38
23
 
39
- def scale(lower, upper):
24
+ def scale(lower: ArrayLike,
25
+ upper: ArrayLike) -> np.ndarray:
40
26
  """scaling = 0.5 * difference of the bounds."""
41
27
  return 0.5 * (np.asarray(upper) - np.asarray(lower))
42
28
 
43
- def typical(lower, upper):
29
+ def typical(lower: ArrayLike,
30
+ upper: ArrayLike) -> np.ndarray:
44
31
  """typical value = mean of the bounds."""
45
32
  return 0.5 * (np.asarray(upper) + np.asarray(lower))
46
33
 
47
- def fitting(guess, lower, upper):
34
+ def fitting(guess: ArrayLike,
35
+ lower: ArrayLike,
36
+ upper: ArrayLike) -> np.ndarray:
48
37
  """fit a guess into the bounds."""
49
- return np.minimum(np.asarray(upper), np.maximum(np.asarray(guess), np.asarray(lower)))
38
+ return np.clip(np.asarray(guess), np.asarray(upper), np.asarray(lower))
50
39
 
51
- def is_terminate(runid, iterations, val):
40
+ def is_terminate(runid: int,
41
+ iterations: int,
42
+ val: float) -> bool:
52
43
  """dummy is_terminate call back."""
53
44
  return False
54
45
 
55
- def random_x(lower, upper):
46
+ def random_x(lower: ArrayLike, upper: ArrayLike) -> np.ndarray:
56
47
  """feasible random value uniformly distributed inside the bounds."""
57
48
  lower = np.asarray(lower)
58
49
  upper = np.asarray(upper)
59
50
  return lower + np.multiply(upper - lower, np.random.rand(lower.size))
60
51
 
61
- def dtime(t0):
52
+ def dtime(t0: float) -> float:
62
53
  """time since t0."""
63
54
  return round(time.perf_counter() - t0, 2)
64
55
 
56
+ class wrapper(object):
57
+ """Fitness function wrapper for use with parallel retry."""
58
+
59
+ def __init__(self,
60
+ fit: Callable[[ArrayLike], float]):
61
+ self.fit = fit
62
+ self.evals = mp.RawValue(ct.c_int, 0)
63
+ self.best_y = mp.RawValue(ct.c_double, np.inf)
64
+ self.t0 = time.perf_counter()
65
+
66
+ def __call__(self, x: ArrayLike) -> float:
67
+ try:
68
+ self.evals.value += 1
69
+ y = self.fit(x)
70
+ y0 = y if np.isscalar(y) else sum(y)
71
+ if y0 < self.best_y.value:
72
+ self.best_y.value = y0
73
+ logger.info(str(dtime(self.t0)) + ' ' +
74
+ str(self.evals.value) + ' ' +
75
+ str(round(self.evals.value/(1E-9 + dtime(self.t0)),0)) + ' ' +
76
+ str(self.best_y.value) + ' ' +
77
+ str(list(x)))
78
+ return y
79
+ except Exception as ex:
80
+ print(str(ex))
81
+ return sys.float_info.max
82
+
65
83
  class Optimizer(object):
66
84
  """Provides different optimization methods for use with parallel retry."""
67
85
 
68
- def __init__(self, max_evaluations=50000, name=''):
86
+ def __init__(self,
87
+ max_evaluations: Optional[int] = 50000,
88
+ name: Optional[str] = ''):
69
89
  self.max_evaluations = max_evaluations
70
90
  self.name = name
71
91
 
@@ -80,136 +100,826 @@ class Optimizer(object):
80
100
  class Sequence(Optimizer):
81
101
  """Sequence of optimizers."""
82
102
 
83
- def __init__(self, optimizers):
103
+ def __init__(self, optimizers: ArrayLike):
84
104
  Optimizer.__init__(self)
85
105
  self.optimizers = optimizers
86
106
  self.max_evaluations = 0
87
107
  for optimizer in self.optimizers:
88
- self.name += optimizer.name + ' '
108
+ self.name += optimizer.name + ' -> '
89
109
  self.max_evaluations += optimizer.max_evaluations
90
-
91
- def minimize(self, fun, bounds, guess=None, sdevs=None, rg=Generator(MT19937()), store=None):
110
+ self.name = self.name[:-4]
111
+
112
+ def minimize(self,
113
+ fun: Callable[[ArrayLike], float],
114
+ bounds: Bounds,
115
+ guess: Optional[ArrayLike] = None,
116
+ sdevs: Optional[Union[float, ArrayLike, Callable]] = None,
117
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
118
+ store=None) -> Tuple[np.ndarray, float, int]:
92
119
  evals = 0
120
+ y = np.inf
93
121
  for optimizer in self.optimizers:
94
122
  ret = optimizer.minimize(fun, bounds, guess, sdevs, rg, store)
95
- guess = ret[0]
123
+ if ret[1] < y:
124
+ y = ret[1]
125
+ x = ret[0]
126
+ guess = x
96
127
  evals += ret[2]
97
- return ret[0], ret[1], evals
98
-
128
+ return x, y, evals
129
+
99
130
  class Choice(Optimizer):
100
131
  """Random choice of optimizers."""
101
132
 
102
- def __init__(self, optimizers):
133
+ def __init__(self, optimizers: ArrayLike):
103
134
  Optimizer.__init__(self)
104
135
  self.optimizers = optimizers
105
136
  self.max_evaluations = optimizers[0].max_evaluations
106
137
  for optimizer in self.optimizers:
107
- self.name += optimizer.name + '|'
138
+ self.name += optimizer.name + ' | '
139
+ self.name = self.name[:-3]
108
140
 
109
- def minimize(self, fun, bounds, guess=None, sdevs=None, rg=Generator(MT19937()), store=None):
141
+ def minimize(self,
142
+ fun: Callable[[ArrayLike], float],
143
+ bounds: Bounds,
144
+ guess: Optional[ArrayLike] = None,
145
+ sdevs: Optional[Union[float, ArrayLike, Callable]] = None,
146
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
147
+ store=None) -> Tuple[np.ndarray, float, int]:
148
+
110
149
  choice = rg.integers(0, len(self.optimizers))
111
150
  opt = self.optimizers[choice]
112
151
  return opt.minimize(fun, bounds, guess, sdevs, rg, store)
113
152
 
114
- def de_cma(max_evaluations = 50000, popsize=31, stop_fittness = math.inf,
115
- de_max_evals = None, cma_max_evals = None):
153
+ def de_cma(max_evaluations: Optional[int] = 50000,
154
+ popsize: Optional[int] = 31,
155
+ stop_fitness: Optional[float] = -np.inf,
156
+ de_max_evals: Optional[int] = None,
157
+ cma_max_evals: Optional[int] = None,
158
+ ints: Optional[ArrayLike] = None,
159
+ workers: Optional[int] = None) -> Sequence:
116
160
  """Sequence differential evolution -> CMA-ES."""
117
161
 
162
+ de_evals = np.random.uniform(0.1, 0.5)
118
163
  if de_max_evals is None:
119
- de_max_evals = int(0.5*max_evaluations)
164
+ de_max_evals = int(de_evals*max_evaluations)
120
165
  if cma_max_evals is None:
121
- cma_max_evals = int(0.5*max_evaluations)
122
- opt1 = De_cpp(max_evaluations = de_max_evals, stop_fittness = stop_fittness)
166
+ cma_max_evals = int((1.0-de_evals)*max_evaluations)
167
+ opt1 = De_cpp(popsize=popsize, max_evaluations = de_max_evals,
168
+ stop_fitness = stop_fitness, ints=ints, workers = workers)
123
169
  opt2 = Cma_cpp(popsize=popsize, max_evaluations = cma_max_evals,
124
- stop_fittness = stop_fittness)
170
+ stop_fitness = stop_fitness, workers = workers)
125
171
  return Sequence([opt1, opt2])
126
172
 
127
- def da_cma(max_evaluations = 50000, da_max_evals = None, cma_max_evals = None,
128
- popsize=31, stop_fittness = math.inf):
129
- """Sequence differential evolution -> CMA-ES."""
173
+ def de_cma_py(max_evaluations: Optional[int] = 50000,
174
+ popsize: Optional[int] = 31,
175
+ stop_fitness: Optional[float] = -np.inf,
176
+ de_max_evals: Optional[int] = None,
177
+ cma_max_evals: Optional[int] = None,
178
+ ints: Optional[ArrayLike] = None,
179
+ workers: Optional[int] = None) -> Sequence:
180
+ """Sequence differential evolution -> CMA-ES in python."""
181
+
182
+ de_evals = np.random.uniform(0.1, 0.5)
183
+ if de_max_evals is None:
184
+ de_max_evals = int(de_evals*max_evaluations)
185
+ if cma_max_evals is None:
186
+ cma_max_evals = int((1.0-de_evals)*max_evaluations)
187
+ opt1 = De_python(popsize=popsize, max_evaluations = de_max_evals,
188
+ stop_fitness = stop_fitness, ints=ints, workers = workers)
189
+ opt2 = Cma_python(popsize=popsize, max_evaluations = cma_max_evals,
190
+ stop_fitness = stop_fitness, workers = workers)
191
+ return Sequence([opt1, opt2])
130
192
 
193
+ def da_cma(max_evaluations: Optional[int] = 50000,
194
+ popsize: Optional[int] = 31,
195
+ da_max_evals: Optional[int] = None,
196
+ cma_max_evals: Optional[int] = None,
197
+ stop_fitness: Optional[float] = -np.inf) -> Sequence:
198
+ """Sequence dual annealing -> CMA-ES."""
199
+
200
+ da_evals = np.random.uniform(0.1, 0.5)
131
201
  if da_max_evals is None:
132
- da_max_evals = int(0.5*max_evaluations)
202
+ da_max_evals = int(da_evals*max_evaluations)
203
+ if cma_max_evals is None:
204
+ cma_max_evals = int((1.0-da_evals)*max_evaluations)
205
+ opt1 = Da_cpp(max_evaluations = da_max_evals, stop_fitness = stop_fitness)
206
+ opt2 = Cma_cpp(popsize=popsize, max_evaluations = cma_max_evals,
207
+ stop_fitness = stop_fitness)
208
+ return Sequence([opt1, opt2])
209
+
210
+ def de_crfmnes(max_evaluations: Optional[int] = 50000,
211
+ popsize: Optional[int] = 32,
212
+ stop_fitness: Optional[float] = -np.inf,
213
+ de_max_evals: Optional[int] = None,
214
+ crfm_max_evals: Optional[int] = None,
215
+ ints: Optional[ArrayLike] = None,
216
+ workers: Optional[int] = None) -> Sequence:
217
+ """Sequence differential evolution -> CRFMNES."""
218
+
219
+ de_evals = np.random.uniform(0.1, 0.5)
220
+ if de_max_evals is None:
221
+ de_max_evals = int(de_evals*max_evaluations)
222
+ if crfm_max_evals is None:
223
+ crfm_max_evals = int((1.0-de_evals)*max_evaluations)
224
+ opt1 = De_cpp(popsize=popsize, max_evaluations = de_max_evals,
225
+ stop_fitness = stop_fitness, ints=ints, workers = workers)
226
+ opt2 = Crfmnes_cpp(popsize=popsize, max_evaluations = crfm_max_evals,
227
+ stop_fitness = stop_fitness, workers = workers)
228
+ return Sequence([opt1, opt2])
229
+
230
+ def crfmnes_bite(max_evaluations: Optional[int] = 50000,
231
+ popsize: Optional[int] = 31,
232
+ stop_fitness: Optional[float] = -np.inf,
233
+ crfm_max_evals: Optional[int] = None,
234
+ bite_max_evals: Optional[int] = None,
235
+ M: Optional[int] = 1) -> Sequence:
236
+ """Sequence CRFMNES -> Bite."""
237
+
238
+ crfmnes_evals = np.random.uniform(0.1, 0.5)
239
+ if crfm_max_evals is None:
240
+ crfm_max_evals = int(crfmnes_evals*max_evaluations)
241
+ if bite_max_evals is None:
242
+ bite_max_evals = int((1.0-crfmnes_evals)*max_evaluations)
243
+ opt1 = Crfmnes_cpp(popsize=popsize, max_evaluations = crfm_max_evals,
244
+ stop_fitness = stop_fitness)
245
+ opt2 = Bite_cpp(popsize=popsize, max_evaluations = bite_max_evals,
246
+ stop_fitness = stop_fitness, M=M)
247
+ return Sequence([opt1, opt2])
248
+
249
+ def bite_cma(max_evaluations: Optional[int] = 50000,
250
+ popsize: Optional[int] = 31,
251
+ stop_fitness: Optional[float] = -np.inf,
252
+ bite_max_evals: Optional[int] = None,
253
+ cma_max_evals: Optional[int] = None,
254
+ M: Optional[int] = 1) -> Sequence:
255
+ """Sequence Bite -> CMA-ES."""
256
+
257
+ bite_evals = np.random.uniform(0.1, 0.5)
258
+ if bite_max_evals is None:
259
+ bite_max_evals = int(bite_evals*max_evaluations)
133
260
  if cma_max_evals is None:
134
- cma_max_evals = int(0.5*max_evaluations)
135
- opt1 = Da_cpp(max_evaluations = da_max_evals, stop_fittness = stop_fittness)
261
+ cma_max_evals = int((1.0-bite_evals)*max_evaluations)
262
+ opt1 = Bite_cpp(popsize=popsize, max_evaluations = bite_max_evals,
263
+ stop_fitness = stop_fitness, M=M)
136
264
  opt2 = Cma_cpp(popsize=popsize, max_evaluations = cma_max_evals,
137
- stop_fittness = stop_fittness)
265
+ stop_fitness = stop_fitness)
138
266
  return Sequence([opt1, opt2])
139
267
 
268
+ def cma_bite(max_evaluations: Optional[int] = 50000,
269
+ popsize: Optional[int] = 32,
270
+ stop_fitness: Optional[float] = -np.inf,
271
+ cma_max_evals: Optional[int] = None,
272
+ bite_max_evals: Optional[int] = None,
273
+ M: Optional[int] = 1) -> Sequence:
274
+ """Sequence CMA-ES -> Bite."""
275
+
276
+ cma_evals = np.random.uniform(0.1, 0.5)
277
+ if cma_max_evals is None:
278
+ cma_max_evals = int(cma_evals*max_evaluations)
279
+ if bite_max_evals is None:
280
+ bite_max_evals = int((1.0-cma_evals)*max_evaluations)
281
+ opt1 = Cma_cpp(popsize=popsize, max_evaluations = cma_max_evals,
282
+ stop_fitness = stop_fitness, stop_hist = 0)
283
+ opt2 = Bite_cpp(popsize=popsize, max_evaluations = bite_max_evals,
284
+ stop_fitness = stop_fitness, M=M)
285
+ return Sequence([opt1, opt2])
286
+
287
+ class Crfmnes(Optimizer):
288
+ """CRFMNES Python implementation."""
289
+
290
+ def __init__(self,
291
+ max_evaluations: Optional[int] = 50000,
292
+ popsize: Optional[int] = 32,
293
+ guess: Optional[ArrayLike] = None,
294
+ stop_fitness: Optional[float] = -np.inf,
295
+ sdevs: Optional[float] = None,
296
+ workers: Optional[int] = None):
297
+
298
+ Optimizer.__init__(self, max_evaluations, 'crfmnes')
299
+ self.popsize = popsize
300
+ self.stop_fitness = stop_fitness
301
+ self.guess = guess
302
+ self.sdevs = sdevs
303
+ self.workers = workers
304
+
305
+ def minimize(self,
306
+ fun: Callable[[ArrayLike], float],
307
+ bounds: Optional[Bounds],
308
+ guess: Optional[ArrayLike] = None,
309
+ sdevs: Optional[float] = 0.3,
310
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
311
+ store = None) -> Tuple[np.ndarray, float, int]:
312
+
313
+ ret = crfmnes.minimize(fun, bounds,
314
+ self.guess if not self.guess is None else guess,
315
+ input_sigma = self.sdevs if not self.sdevs is None else sdevs,
316
+ max_evaluations = self.max_eval_num(store),
317
+ popsize=self.popsize,
318
+ stop_fitness = self.stop_fitness,
319
+ rg=rg, runid=self.get_count_runs(store),
320
+ workers = self.workers)
321
+ return ret.x, ret.fun, ret.nfev
322
+
323
+ class Crfmnes_cpp(Optimizer):
324
+ """CRFMNES C++ implementation."""
325
+
326
+ def __init__(self,
327
+ max_evaluations: Optional[int] = 50000,
328
+ popsize: Optional[int] = 32,
329
+ guess: Optional[ArrayLike] = None,
330
+ stop_fitness: Optional[float] = -np.inf,
331
+ sdevs: Optional[float] = None,
332
+ workers: Optional[int] = None):
333
+
334
+ Optimizer.__init__(self, max_evaluations, 'crfmnes cpp')
335
+ self.popsize = popsize
336
+ self.stop_fitness = stop_fitness
337
+ self.guess = guess
338
+ self.sdevs = sdevs
339
+ self.workers = workers
340
+
341
+ def minimize(self,
342
+ fun: Callable[[ArrayLike], float],
343
+ bounds: Optional[Bounds],
344
+ guess: Optional[ArrayLike] = None,
345
+ sdevs: Optional[float] = 0.3,
346
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
347
+ store = None) -> Tuple[np.ndarray, float, int]:
348
+
349
+ ret = crfmnescpp.minimize(fun, bounds,
350
+ self.guess if not self.guess is None else guess,
351
+ input_sigma = self.sdevs if not self.sdevs is None else sdevs,
352
+ max_evaluations = self.max_eval_num(store),
353
+ popsize=self.popsize,
354
+ stop_fitness = self.stop_fitness,
355
+ rg=rg, runid=self.get_count_runs(store),
356
+ workers = self.workers)
357
+ return ret.x, ret.fun, ret.nfev
358
+
359
+ class Pgpe_cpp(Optimizer):
360
+ """PGPE C++ implementation."""
361
+
362
+ def __init__(self,
363
+ max_evaluations: Optional[int] = 500000,
364
+ popsize: Optional[int] = 640,
365
+ guess: Optional[ArrayLike] = None,
366
+ stop_fitness: Optional[float] = -np.inf,
367
+ sdevs: Optional[float] = None,
368
+ workers: Optional[int] = None):
369
+
370
+ Optimizer.__init__(self, max_evaluations, 'pgpe cpp')
371
+ self.popsize = popsize
372
+ self.stop_fitness = stop_fitness
373
+ self.guess = guess
374
+ self.sdevs = sdevs
375
+ self.workers = workers
376
+
377
+ def minimize(self,
378
+ fun: Callable[[ArrayLike], float],
379
+ bounds: Optional[Bounds],
380
+ guess: Optional[ArrayLike] = None,
381
+ sdevs: Optional[float] = 0.1,
382
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
383
+ store = None) -> Tuple[np.ndarray, float, int]:
384
+
385
+ ret = pgpecpp.minimize(fun, bounds,
386
+ self.guess if not self.guess is None else guess,
387
+ input_sigma = self.sdevs if not self.sdevs is None else sdevs,
388
+ max_evaluations = self.max_eval_num(store),
389
+ popsize=self.popsize,
390
+ stop_fitness = self.stop_fitness,
391
+ rg=rg, runid=self.get_count_runs(store),
392
+ workers = self.workers)
393
+ return ret.x, ret.fun, ret.nfev
394
+
140
395
  class Cma_python(Optimizer):
141
396
  """CMA_ES Python implementation."""
142
397
 
143
- def __init__(self, max_evaluations=50000,
144
- popsize = 31, guess=None, stop_fittness = None):
398
+ def __init__(self,
399
+ max_evaluations: Optional[int] = 50000,
400
+ popsize: Optional[int] = 31,
401
+ guess: Optional[ArrayLike] = None,
402
+ stop_fitness: Optional[float] = -np.inf,
403
+ sdevs: Optional[float] = None,
404
+ workers: Optional[int] = None,
405
+ update_gap: Optional[int] = None,
406
+ normalize: Optional[bool] = True):
407
+
145
408
  Optimizer.__init__(self, max_evaluations, 'cma py')
146
409
  self.popsize = popsize
147
- self.stop_fittness = stop_fittness
410
+ self.stop_fitness = stop_fitness
411
+ self.update_gap = update_gap
148
412
  self.guess = guess
413
+ self.sdevs = sdevs
414
+ self.normalize = normalize
415
+ self.workers = workers
416
+
417
+ def minimize(self,
418
+ fun: Callable[[ArrayLike], float],
419
+ bounds: Optional[Bounds],
420
+ guess: Optional[ArrayLike] = None,
421
+ sdevs: Optional[Union[float, ArrayLike, Callable]] = 0.1,
422
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
423
+ store = None) -> Tuple[np.ndarray, float, int]:
149
424
 
150
- def minimize(self, fun, bounds, guess=None, sdevs=0.3, rg=Generator(MT19937()), store=None):
151
425
  ret = cmaes.minimize(fun, bounds,
152
- self.guess if guess is None else guess,
153
- input_sigma=sdevs,
426
+ self.guess if not self.guess is None else guess,
427
+ input_sigma= self.sdevs if not self.sdevs is None else sdevs,
154
428
  max_evaluations = self.max_eval_num(store),
155
429
  popsize=self.popsize,
156
- stop_fittness = self.stop_fittness,
157
- rg=rg, runid=self.get_count_runs(store))
430
+ stop_fitness = self.stop_fitness,
431
+ rg=rg, runid=self.get_count_runs(store),
432
+ normalize = self.normalize,
433
+ update_gap = self.update_gap,
434
+ workers = self.workers)
158
435
  return ret.x, ret.fun, ret.nfev
159
436
 
160
437
  class Cma_cpp(Optimizer):
161
438
  """CMA_ES C++ implementation."""
162
439
 
163
- def __init__(self, max_evaluations=50000,
164
- popsize = 31, guess=None, stop_fittness = None):
440
+ def __init__(self,
441
+ max_evaluations: Optional[int] = 50000,
442
+ popsize: Optional[int] = 31,
443
+ guess: Optional[ArrayLike] = None,
444
+ stop_fitness: Optional[float] = -np.inf,
445
+ sdevs: Optional[float] = None,
446
+ workers: Optional[int] = None,
447
+ update_gap: Optional[int] = None,
448
+ normalize: Optional[bool] = True,
449
+ delayed_update: Optional[bool] = True,
450
+ stop_hist: Optional[int] = -1):
451
+
165
452
  Optimizer.__init__(self, max_evaluations, 'cma cpp')
166
453
  self.popsize = popsize
167
- self.stop_fittness = stop_fittness
454
+ self.stop_fitness = stop_fitness
455
+ self.stop_hist = stop_hist
168
456
  self.guess = guess
457
+ self.sdevs = sdevs
458
+ self.update_gap = update_gap
459
+ self.delayed_update = delayed_update
460
+ self.normalize = normalize
461
+ self.workers = workers
169
462
 
170
- def minimize(self, fun, bounds, guess=None, sdevs=0.3, rg=Generator(MT19937()), store=None):
171
- ret = cmaescpp.minimize(fun, bounds,
172
- self.guess if guess is None else guess,
173
- input_sigma=sdevs,
174
- max_evaluations = self.max_eval_num(store),
175
- popsize=self.popsize,
176
- stop_fittness = self.stop_fittness,
177
- rg=rg, runid = self.get_count_runs(store))
463
+ def minimize(self,
464
+ fun: Callable[[ArrayLike], float],
465
+ bounds: Optional[Bounds],
466
+ guess: Optional[ArrayLike] = None,
467
+ sdevs: Optional[Union[float, ArrayLike, Callable]] = 0.1,
468
+ rg=Generator(PCG64DXSM()),
469
+ store = None) -> Tuple[np.ndarray, float, int]:
470
+
471
+ ret = cmaescpp.minimize(fun, bounds,
472
+ self.guess if not self.guess is None else guess,
473
+ input_sigma = self.sdevs if not self.sdevs is None else sdevs,
474
+ max_evaluations =self.max_eval_num(store),
475
+ popsize = self.popsize,
476
+ stop_fitness = self.stop_fitness,
477
+ stop_hist = self.stop_hist,
478
+ rg = rg, runid = self.get_count_runs(store),
479
+ update_gap = self.update_gap,
480
+ normalize = self.normalize,
481
+ delayed_update = self.delayed_update,
482
+ workers = self.workers)
178
483
  return ret.x, ret.fun, ret.nfev
179
484
 
485
+ class Cma_orig(Optimizer):
486
+ """CMA_ES original implementation."""
487
+
488
+ def __init__(self,
489
+ max_evaluations: Optional[int] = 50000,
490
+ popsize: Optional[int] = 31,
491
+ guess: Optional[ArrayLike] = None,
492
+ stop_fitness: Optional[float] = -np.inf,
493
+ sdevs: Optional[float] = None):
494
+
495
+ Optimizer.__init__(self, max_evaluations, 'cma orig')
496
+ self.popsize = popsize
497
+ self.stop_fitness = stop_fitness
498
+ self.guess = guess
499
+ self.sdevs = sdevs
500
+
501
+ def minimize(self,
502
+ fun: Callable[[ArrayLike], float],
503
+ bounds: Optional[Bounds],
504
+ guess: Optional[ArrayLike] = None,
505
+ sdevs: Optional[Union[float, ArrayLike]] = 0.3,
506
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
507
+ store = None) -> Tuple[np.ndarray, float, int]:
508
+
509
+ lower = bounds.lb
510
+ upper = bounds.ub
511
+ guess = self.guess if not self.guess is None else guess
512
+ if guess is None:
513
+ guess = rg.uniform(lower, upper)
514
+ max_evaluations = self.max_eval_num(store)
515
+ input_sigma= self.sdevs if not self.sdevs is None else sdevs
516
+ try:
517
+ import cma
518
+ except ImportError as e:
519
+ raise ImportError("Please install CMA (pip install cma)")
520
+ try:
521
+ es = cma.CMAEvolutionStrategy(guess, 0.1, {'bounds': [lower, upper],
522
+ 'typical_x': guess,
523
+ 'scaling_of_variables': scale(lower, upper),
524
+ 'popsize': self.popsize,
525
+ 'CMA_stds': input_sigma,
526
+ 'verbose': -1,
527
+ 'verb_disp': -1})
528
+ evals = 0
529
+ for i in range(max_evaluations):
530
+ X, Y = es.ask_and_eval(fun)
531
+ es.tell(X, Y)
532
+ evals += self.popsize
533
+ if es.stop():
534
+ break
535
+ if evals > max_evaluations:
536
+ break
537
+ return es.result.xbest, es.result.fbest, evals
538
+ except Exception as ex:
539
+ print(ex)
540
+
541
+ class Cma_lw(Optimizer):
542
+ """CMA lightweight Python implementation. See https://github.com/CyberAgentAILab/cmaes """
543
+
544
+ def __init__(self,
545
+ max_evaluations: Optional[int] = 50000,
546
+ popsize: Optional[int] = 31,
547
+ guess: Optional[ArrayLike] = None,
548
+ stop_fitness: Optional[float] = -np.inf,
549
+ sdevs: Optional[Union[float, ArrayLike]] = None,
550
+ workers: Optional[int] = None):
551
+
552
+ Optimizer.__init__(self, max_evaluations, 'cma_lw')
553
+ self.popsize = popsize
554
+ self.stop_fitness = stop_fitness
555
+ self.guess = guess
556
+ self.sdevs = sdevs
557
+ self.workers = workers
558
+
559
+ def minimize(self,
560
+ fun: Callable[[ArrayLike], float],
561
+ bounds: Optional[Bounds],
562
+ guess: Optional[ArrayLike] = None,
563
+ sdevs: Optional[Union[float, ArrayLike]] = 0.3,
564
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
565
+ store = None) -> Tuple[np.ndarray, float, int]:
566
+
567
+ try:
568
+ import cmaes
569
+ except ImportError as e:
570
+ raise ImportError("Please install cmaes (pip install cmaes)")
571
+
572
+ if guess is None:
573
+ guess = self.guess
574
+ if guess is None:
575
+ guess = rg.uniform(bounds.lb, bounds.ub)
576
+ bds = np.array([t for t in zip(bounds.lb, bounds.ub)])
577
+ seed = int(rg.uniform(0, 2**32 - 1))
578
+ optimizer = cmaes.CMA(mean=guess, sigma=np.mean(sdevs), bounds=bds, seed=seed, population_size=self.popsize)
579
+ best_y = np.inf
580
+ evals = 0
581
+ fun = serial(fun) if (self.workers is None or self.workers <= 1) else parallel(fun, self.workers)
582
+ while evals < self.max_evaluations and not optimizer.should_stop():
583
+ xs = [optimizer.ask() for _ in range(optimizer.population_size)]
584
+ ys = fun(xs)
585
+ solutions = []
586
+ for i in range(optimizer.population_size):
587
+ x = xs[i]
588
+ y = ys[i]
589
+ solutions.append((x, y))
590
+ if y < best_y:
591
+ best_y = y
592
+ best_x = x
593
+ optimizer.tell(solutions)
594
+ evals += optimizer.population_size
595
+ if isinstance(fun, parallel):
596
+ fun.stop()
597
+ return best_x, best_y, evals
598
+
599
+ class Cma_awm(Optimizer):
600
+ """CMA awm Python implementation. See https://github.com/CyberAgentAILab/cmaes """
601
+
602
+ def __init__(self,
603
+ max_evaluations: Optional[int] = 50000,
604
+ popsize: Optional[int] = 31,
605
+ guess: Optional[ArrayLike] = None,
606
+ stop_fitness: Optional[float] = -np.inf,
607
+ sdevs: Optional[Union[float, ArrayLike]] = None,
608
+ continuous_space = None,
609
+ discrete_space = None,
610
+ workers: Optional[int] = None):
611
+
612
+ Optimizer.__init__(self, max_evaluations, 'cma_awm')
613
+ self.popsize = popsize
614
+ self.stop_fitness = stop_fitness
615
+ self.guess = guess
616
+ self.sdevs = sdevs
617
+ self.workers = workers
618
+ self.continuous_space = continuous_space
619
+ self.discrete_space = discrete_space
620
+
621
+ def minimize(self,
622
+ fun: Callable[[ArrayLike], float],
623
+ bounds: Optional[Bounds],
624
+ guess: Optional[ArrayLike] = None,
625
+ sdevs: Optional[Union[float, ArrayLike]] = 0.3,
626
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
627
+ store = None) -> Tuple[np.ndarray, float, int]:
628
+ try:
629
+ import cmaes
630
+ except ImportError as e:
631
+ raise ImportError("Please install cmaes (pip install cmaes)")
632
+
633
+ if guess is None:
634
+ guess = self.guess
635
+ if guess is None:
636
+ guess = rg.uniform(bounds.lb, bounds.ub)
637
+ seed = int(rg.uniform(0, 2**32 - 1))
638
+ optimizer = cmaes.CMAwM(mean=guess, sigma=np.mean(sdevs),
639
+ continuous_space=self.continuous_space,
640
+ discrete_space=self.discrete_space,
641
+ seed=seed, population_size=self.popsize)
642
+ best_y = 1E99
643
+ evals = 0
644
+ fun = serial(fun) if (self.workers is None or self.workers <= 1) else parallel(fun, self.workers)
645
+ while evals < self.max_evaluations and not optimizer.should_stop():
646
+ asks = [optimizer.ask() for _ in range(optimizer.population_size)]
647
+ ys = fun([x[0] for x in asks])
648
+ solutions = []
649
+ for i in range(optimizer.population_size):
650
+ x = asks[i][1]
651
+ y = ys[i]
652
+ solutions.append((x, y))
653
+ if y < best_y:
654
+ best_y = y
655
+ best_x = x
656
+ optimizer.tell(solutions)
657
+ evals += optimizer.population_size
658
+ if isinstance(fun, parallel):
659
+ fun.stop()
660
+ return best_x, best_y, evals
661
+
662
+ class Cma_sep(Optimizer):
663
+ """CMA sep Python implementation. See https://github.com/CyberAgentAILab/cmaes """
664
+
665
+ def __init__(self,
666
+ max_evaluations: Optional[int] = 50000,
667
+ popsize: Optional[int] = 31,
668
+ guess: Optional[ArrayLike] = None,
669
+ stop_fitness: Optional[float] = -np.inf,
670
+ sdevs: Optional[Union[float, ArrayLike]] = None,
671
+ workers: Optional[int] = None):
672
+
673
+ Optimizer.__init__(self, max_evaluations, 'cma_sep')
674
+ self.popsize = popsize
675
+ self.stop_fitness = stop_fitness
676
+ self.guess = guess
677
+ self.sdevs = sdevs
678
+ self.workers = workers
679
+
680
+ def minimize(self,
681
+ fun: Callable[[ArrayLike], float],
682
+ bounds: Optional[Bounds],
683
+ guess: Optional[ArrayLike] = None,
684
+ sdevs: Optional[Union[float, ArrayLike]] = 0.3,
685
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
686
+ store = None) -> Tuple[np.ndarray, float, int]:
687
+ try:
688
+ import cmaes
689
+ except ImportError as e:
690
+ raise ImportError("Please install cmaes (pip install cmaes)")
691
+
692
+ if guess is None:
693
+ guess = self.guess
694
+ if guess is None:
695
+ guess = rg.uniform(bounds.lb, bounds.ub)
696
+ bds = np.array([t for t in zip(bounds.lb, bounds.ub)])
697
+ seed = int(rg.uniform(0, 2**32 - 1))
698
+ optimizer = cmaes.SepCMA(mean=guess, sigma=np.mean(sdevs), bounds=bds, seed=seed, population_size=self.popsize)
699
+ best_y = np.inf
700
+ evals = 0
701
+ fun = serial(fun) if (self.workers is None or self.workers <= 1) else parallel(fun, self.workers)
702
+ while evals < self.max_evaluations and not optimizer.should_stop():
703
+ xs = [optimizer.ask() for _ in range(optimizer.population_size)]
704
+ ys = fun(xs)
705
+ solutions = []
706
+ for i in range(optimizer.population_size):
707
+ x = xs[i]
708
+ y = ys[i]
709
+ solutions.append((x, y))
710
+ if y < best_y:
711
+ best_y = y
712
+ best_x = x
713
+ optimizer.tell(solutions)
714
+ evals += optimizer.population_size
715
+ if isinstance(fun, parallel):
716
+ fun.stop()
717
+ return best_x, best_y, evals
718
+
180
719
  class De_cpp(Optimizer):
181
720
  """Differential Evolution C++ implementation."""
182
721
 
183
- def __init__(self, max_evaluations=50000,
184
- popsize = None, stop_fittness = None,
185
- keep = 200, f = 0.5, cr = 0.9):
722
+ def __init__(self,
723
+ max_evaluations: Optional[int] = 50000,
724
+ popsize: Optional[int] = None,
725
+ guess: Optional[ArrayLike] = None,
726
+ stop_fitness: Optional[float] = -np.inf,
727
+ keep: Optional[int] = 200,
728
+ f: Optional[float] = 0.5,
729
+ cr: Optional[float] = 0.9,
730
+ ints: Optional[ArrayLike] = None,
731
+ workers: Optional[int] = None):
732
+
186
733
  Optimizer.__init__(self, max_evaluations, 'de cpp')
187
734
  self.popsize = popsize
188
- self.stop_fittness = stop_fittness
735
+ self.guess = guess
736
+ self.stop_fitness = stop_fitness
189
737
  self.keep = keep
190
738
  self.f = f
191
739
  self.cr = cr
740
+ self.ints = ints
741
+ self.workers = workers
192
742
 
193
- def minimize(self, fun, bounds, guess=None, sdevs=None, rg=Generator(MT19937()), store=None):
194
- ret = decpp.minimize(fun, len(bounds.lb), bounds,
743
+ def minimize(self,
744
+ fun: Callable[[ArrayLike], float],
745
+ bounds: Optional[Bounds],
746
+ guess: Optional[ArrayLike] = None,
747
+ sdevs: Optional[float] = None, # ignored
748
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
749
+ store = None) -> Tuple[np.ndarray, float, int]:
750
+
751
+ if guess is None:
752
+ guess = self.guess
753
+
754
+ ret = decpp.minimize(fun, None, bounds,
195
755
  popsize=self.popsize,
196
756
  max_evaluations = self.max_eval_num(store),
197
- stop_fittness = self.stop_fittness,
198
- keep = self.keep, f = self.f, cr = self.cr,
199
- rg=rg, runid = self.get_count_runs(store))
757
+ stop_fitness = self.stop_fitness,
758
+ keep = self.keep, f = self.f, cr = self.cr, ints=self.ints,
759
+ rg=rg, runid = self.get_count_runs(store),
760
+ workers = self.workers, x0 = guess)
200
761
  return ret.x, ret.fun, ret.nfev
762
+
763
+ class De_python(Optimizer):
764
+ """Differential Evolution Python implementation."""
765
+
766
+ def __init__(self,
767
+ max_evaluations: Optional[int] = 50000,
768
+ popsize: Optional[int] = None,
769
+ stop_fitness: Optional[float] = -np.inf,
770
+ keep: Optional[int] = 200,
771
+ f: Optional[float] = 0.5,
772
+ cr: Optional[float] = 0.9,
773
+ ints: Optional[ArrayLike] = None,
774
+ workers: Optional[int] = None):
775
+
776
+ Optimizer.__init__(self, max_evaluations, 'de py')
777
+ self.popsize = popsize
778
+ self.stop_fitness = stop_fitness
779
+ self.keep = keep
780
+ self.f = f
781
+ self.cr = cr
782
+ self.ints = ints
783
+ self.workers = workers
784
+
785
+ def minimize(self,
786
+ fun: Callable[[ArrayLike], float],
787
+ bounds: Optional[Bounds],
788
+ guess: Optional[ArrayLike] = None,
789
+ sdevs: Optional[float] = None, # ignored
790
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
791
+ store = None) -> Tuple[np.ndarray, float, int]:
792
+
793
+ ret = de.minimize(fun, None,
794
+ bounds, self.popsize, self.max_eval_num(store),
795
+ stop_fitness = self.stop_fitness,
796
+ keep = self.keep, f = self.f, cr = self.cr, ints=self.ints,
797
+ rg=rg, workers = self.workers)
798
+ return ret.x, ret.fun, ret.nfev
799
+
800
+ class Cma_ask_tell(Optimizer):
801
+ """CMA ask tell implementation."""
802
+
803
+ def __init__(self, max_evaluations=50000,
804
+ popsize = 31, guess=None, stop_fitness = -np.inf, sdevs = None):
805
+ Optimizer.__init__(self, max_evaluations, 'cma at')
806
+ self.popsize = popsize
807
+ self.stop_fitness = stop_fitness
808
+ self.guess = guess
809
+ self.sdevs = sdevs
810
+
811
+ def minimize(self,
812
+ fun: Callable[[ArrayLike], float],
813
+ bounds: Optional[Bounds],
814
+ guess: Optional[ArrayLike] = None,
815
+ sdevs: Optional[float] = None, # ignored
816
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
817
+ store = None) -> Tuple[np.ndarray, float, int]:
818
+
819
+ es = cmaes.Cmaes(bounds,
820
+ popsize = self.popsize,
821
+ input_sigma = self.sdevs if not self.sdevs is None else sdevs,
822
+ rg = rg)
823
+ iters = self.max_eval_num(store) // self.popsize
824
+ evals = 0
825
+ for j in range(iters):
826
+ xs = es.ask()
827
+ ys = [fun(x) for x in xs]
828
+ evals += len(xs)
829
+ stop = es.tell(ys)
830
+ if stop != 0:
831
+ break
832
+ return es.best_x, es.best_value, evals
833
+
834
+ class De_ask_tell(Optimizer):
835
+ """Differential Evolution ask tell implementation."""
836
+
837
+ def __init__(self,
838
+ max_evaluations: Optional[int] = 50000,
839
+ popsize: Optional[int] = None,
840
+ stop_fitness: Optional[float] = -np.inf,
841
+ keep: Optional[int] = 200,
842
+ f: Optional[float] = 0.5,
843
+ cr: Optional[float] = 0.9):
844
+ Optimizer.__init__(self, max_evaluations, 'de at')
845
+ self.popsize = popsize
846
+ self.stop_fitness = stop_fitness
847
+ self.keep = keep
848
+ self.f = f
849
+ self.cr = cr
850
+
851
+ def minimize(self,
852
+ fun: Callable[[ArrayLike], float],
853
+ bounds: Optional[Bounds],
854
+ guess: Optional[ArrayLike] = None,
855
+ sdevs: Optional[float] = None, # ignored
856
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
857
+ store = None) -> Tuple[np.ndarray, float, int]:
858
+
859
+ dim = len(bounds.lb)
860
+ popsize = 31 if self.popsize is None else self.popsize
861
+ es = de.DE(dim, bounds, popsize = popsize, rg = rg, keep = self.keep, F = self.f, Cr = self.cr)
862
+ es.fun = fun #remove
863
+ max_evals = self.max_eval_num(store)
864
+ while es.evals < max_evals:
865
+ xs = es.ask()
866
+ ys = [fun(x) for x in xs]
867
+ stop = es.tell(ys, xs)
868
+ if stop != 0:
869
+ break
870
+ return es.best_x, es.best_value, es.evals
871
+
872
+ class random_search(Optimizer):
873
+ """Random search."""
874
+
875
+ def __init__(self, max_evaluations=50000):
876
+ Optimizer.__init__(self, max_evaluations, 'random')
877
+
878
+ def minimize(self,
879
+ fun: Callable[[ArrayLike], float],
880
+ bounds: Optional[Bounds],
881
+ guess: Optional[ArrayLike] = None,
882
+ sdevs: Optional[float] = None, # ignored
883
+ rg: Optional[Generator] = Generator(PCG64DXSM()),
884
+ store = None) -> Tuple[np.ndarray, float, int]:
885
+
886
+ dim, x_min, y_min = len(bounds.lb), None, None
887
+ max_chunk_size = 1 + 4e4 / dim
888
+ evals = self.max_eval_num(store)
889
+ budget = evals
890
+ while budget > 0:
891
+ chunk = int(max([1, min([budget, max_chunk_size])]))
892
+ X = rg.uniform(bounds.lb, bounds.ub, size = [chunk, dim])
893
+ F = [fun(x) for x in X]
894
+ index = np.argmin(F) if len(F) else None
895
+ if index is not None and (y_min is None or F[index] < y_min):
896
+ x_min, y_min = X[index], F[index]
897
+ budget -= chunk
898
+ return x_min, y_min, evals
899
+
201
900
 
202
901
  class Da_cpp(Optimizer):
203
902
  """Dual Annealing C++ implementation."""
204
903
 
205
- def __init__(self, max_evaluations=50000,
206
- stop_fittness = None, use_local_search=True, guess = None):
904
+ def __init__(self,
905
+ max_evaluations: Optional[int] = 50000,
906
+ stop_fitness: Optional[float] = -np.inf,
907
+ use_local_search: Optional[bool] = True,
908
+ guess: Optional[ArrayLike] = None):
909
+
207
910
  Optimizer.__init__(self, max_evaluations, 'da cpp',)
208
- self.stop_fittness = stop_fittness
911
+ self.stop_fitness = stop_fitness
209
912
  self.use_local_search = use_local_search
210
913
  self.guess = guess
211
914
 
212
- def minimize(self, fun, bounds, guess=None, sdevs=None, rg=Generator(MT19937()), store=None):
915
+ def minimize(self,
916
+ fun: Callable[[ArrayLike], float],
917
+ bounds: Optional[Bounds],
918
+ guess: Optional[ArrayLike] = None,
919
+ sdevs: Optional[float] = None, # ignored
920
+ rg=Generator(PCG64DXSM()),
921
+ store = None) -> Tuple[np.ndarray, float, int]:
922
+
213
923
  ret = dacpp.minimize(fun, bounds,
214
924
  self.guess if guess is None else guess,
215
925
  max_evaluations = self.max_eval_num(store),
@@ -217,32 +927,57 @@ class Da_cpp(Optimizer):
217
927
  rg=rg, runid = self.get_count_runs(store))
218
928
  return ret.x, ret.fun, ret.nfev
219
929
 
220
- class Hh_cpp(Optimizer):
221
- """Harris hawks C++ implementation."""
222
-
223
- def __init__(self, max_evaluations=50000,
224
- popsize = 31, stop_fittness = None):
225
- Optimizer.__init__(self, max_evaluations, 'hh cpp')
226
- self.popsize = popsize
227
- self.stop_fittness = stop_fittness
930
+ class Bite_cpp(Optimizer):
931
+ """Bite C++ implementation."""
932
+
933
+ def __init__(self,
934
+ max_evaluations: Optional[int] = 50000,
935
+ guess: Optional[ArrayLike] = None,
936
+ stop_fitness: Optional[float] = -np.inf,
937
+ M: Optional[int] = None,
938
+ popsize: Optional[int] = None,
939
+ stall_criterion: Optional[int] = None):
940
+
941
+ Optimizer.__init__(self, max_evaluations, 'bite cpp')
942
+ self.guess = guess
943
+ self.stop_fitness = stop_fitness
944
+ self.M = 1 if M is None else M
945
+ self.popsize = 0 if popsize is None else popsize
946
+ self.stall_criterion = 0 if stall_criterion is None else stall_criterion
228
947
 
229
- def minimize(self, fun, bounds, guess=None, sdevs=None, rg=Generator(MT19937()), store=None):
230
- ret = hhcpp.minimize(fun, len(bounds.lb), bounds,
231
- popsize=self.popsize,
948
+ def minimize(self,
949
+ fun: Callable[[ArrayLike], float],
950
+ bounds: Optional[Bounds],
951
+ guess: Optional[ArrayLike] = None,
952
+ sdevs: Optional[float] = None, # ignored
953
+ rg=Generator(PCG64DXSM()),
954
+ store = None) -> Tuple[np.ndarray, float, int]:
955
+
956
+ ret = bitecpp.minimize(fun, bounds,
957
+ self.guess if guess is None else guess,
232
958
  max_evaluations = self.max_eval_num(store),
233
- stop_fittness = self.stop_fittness,
234
- rg=rg, runid = self.get_count_runs(store))
959
+ stop_fitness = self.stop_fitness, M = self.M, popsize = self.popsize,
960
+ stall_criterion = self.stall_criterion,
961
+ rg=rg, runid = self.get_count_runs(store))
235
962
  return ret.x, ret.fun, ret.nfev
236
963
 
237
964
  class Dual_annealing(Optimizer):
238
965
  """scipy dual_annealing."""
239
966
 
240
- def __init__(self, max_evaluations=50000,
241
- rg=Generator(MT19937()), use_local_search=True):
967
+ def __init__(self,
968
+ max_evaluations: Optional[int] = 50000,
969
+ use_local_search: Optional[bool] = True):
970
+
242
971
  Optimizer.__init__(self, max_evaluations, 'scipy da')
243
972
  self.no_local_search = not use_local_search
244
973
 
245
- def minimize(self, fun, bounds, guess=None, sdevs=None, rg=Generator(MT19937()), store=None):
974
+ def minimize(self,
975
+ fun: Callable[[ArrayLike], float],
976
+ bounds: Optional[Bounds],
977
+ guess: Optional[ArrayLike] = None,
978
+ sdevs: Optional[float] = None, # ignored
979
+ rg=Generator(PCG64DXSM()),
980
+ store = None) -> Tuple[np.ndarray, float, int]:
246
981
  ret = dual_annealing(fun, bounds=list(zip(bounds.lb, bounds.ub)),
247
982
  maxfun = self.max_eval_num(store),
248
983
  no_local_search = self.no_local_search,
@@ -253,12 +988,21 @@ class Dual_annealing(Optimizer):
253
988
  class Differential_evolution(Optimizer):
254
989
  """scipy differential_evolution."""
255
990
 
256
- def __init__(self, max_evaluations=50000, store=None,
257
- popsize = 15):
991
+ def __init__(self,
992
+ max_evaluations: Optional[int] = 50000,
993
+ popsize: Optional[int] = 31):
994
+
258
995
  Optimizer.__init__(self, max_evaluations, 'scipy de')
259
996
  self.popsize = popsize
260
997
 
261
- def minimize(self, fun, bounds, guess=None, sdevs=None, rg=Generator(MT19937()), store=None):
998
+ def minimize(self,
999
+ fun: Callable[[ArrayLike], float],
1000
+ bounds: Optional[Bounds],
1001
+ guess: Optional[ArrayLike] = None,
1002
+ sdevs: Optional[float] = None, # ignored
1003
+ rg=Generator(PCG64DXSM()),
1004
+ store = None) -> Tuple[np.ndarray, float, int]:
1005
+
262
1006
  popsize = self.popsize
263
1007
  maxiter = int(self.max_eval_num(store) / (popsize * len(bounds.lb)) - 1)
264
1008
  ret = differential_evolution(fun, bounds=bounds, maxiter=maxiter,
@@ -267,7 +1011,7 @@ class Differential_evolution(Optimizer):
267
1011
 
268
1012
  class CheckBounds(object):
269
1013
 
270
- def __init__(self, bounds):
1014
+ def __init__(self, bounds: Bounds):
271
1015
  self.bounds = bounds
272
1016
 
273
1017
  def __call__(self, **kwargs):
@@ -282,7 +1026,13 @@ class Basin_hopping(Optimizer):
282
1026
  def __init__(self, max_evaluations=50000, store=None):
283
1027
  Optimizer.__init__(self, max_evaluations, 'scipy basin hopping')
284
1028
 
285
- def minimize(self, fun, bounds, guess=None, sdevs=None, rg=Generator(MT19937()), store=None):
1029
+ def minimize(self,
1030
+ fun: Callable[[ArrayLike], float],
1031
+ bounds: Optional[Bounds],
1032
+ guess: Optional[ArrayLike] = None,
1033
+ sdevs: Optional[float] = None, # ignored
1034
+ rg=Generator(PCG64DXSM()),
1035
+ store = None) -> Tuple[np.ndarray, float, int]:
286
1036
  localevals = 200
287
1037
  maxiter = int(self.max_eval_num(store) / localevals)
288
1038
  if guess is None:
@@ -301,7 +1051,14 @@ class Minimize(Optimizer):
301
1051
  def __init__(self, max_evaluations=50000, store=None):
302
1052
  Optimizer.__init__(self, max_evaluations, 'scipy minimize')
303
1053
 
304
- def minimize(self, fun, bounds, guess=None, sdevs=None, rg=Generator(MT19937()), store=None):
1054
+ def minimize(self,
1055
+ fun: Callable[[ArrayLike], float],
1056
+ bounds: Optional[Bounds],
1057
+ guess: Optional[ArrayLike] = None,
1058
+ sdevs: Optional[float] = None, # ignored
1059
+ rg=Generator(PCG64DXSM()),
1060
+ store = None) -> Tuple[np.ndarray, float, int]:
1061
+
305
1062
  if guess is None:
306
1063
  guess = rg.uniform(bounds.lb, bounds.ub)
307
1064
  ret = minimize(fun, x0=guess, bounds=bounds)
@@ -313,7 +1070,14 @@ class Shgo(Optimizer):
313
1070
  def __init__(self, max_evaluations=50000, store=None):
314
1071
  Optimizer.__init__(self, max_evaluations, 'scipy shgo')
315
1072
 
316
- def minimize(self, fun, bounds, guess=None, sdevs=None, rg=Generator(MT19937()), store=None):
1073
+ def minimize(self,
1074
+ fun: Callable[[ArrayLike], float],
1075
+ bounds: Optional[Bounds],
1076
+ guess: Optional[ArrayLike] = None,
1077
+ sdevs: Optional[float] = None, # ignored
1078
+ rg=Generator(PCG64DXSM()),
1079
+ store = None) -> Tuple[np.ndarray, float, int]:
1080
+
317
1081
  ret = shgo(fun, bounds=list(zip(bounds.lb, bounds.ub)),
318
1082
  options={'maxfev': self.max_eval_num(store)})
319
1083
  return ret.x, ret.fun, ret.nfev
@@ -341,7 +1105,14 @@ class NLopt(Optimizer):
341
1105
  Optimizer.__init__(self, max_evaluations, 'NLopt ' + algo.get_algorithm_name())
342
1106
  self.algo = algo
343
1107
 
344
- def minimize(self, fun, bounds, guess=None, sdevs=None, rg=Generator(MT19937()), store=None):
1108
+ def minimize(self,
1109
+ fun: Callable[[ArrayLike], float],
1110
+ bounds: Optional[Bounds],
1111
+ guess: Optional[ArrayLike] = None,
1112
+ sdevs: Optional[float] = None, # ignored
1113
+ rg=Generator(PCG64DXSM()),
1114
+ store = None) -> Tuple[np.ndarray, float, int]:
1115
+
345
1116
  self.fun = fun
346
1117
  opt = self.algo
347
1118
  opt.set_min_objective(self.nlfunc)
@@ -360,4 +1131,4 @@ class NLopt(Optimizer):
360
1131
  return self.fun(x)
361
1132
  except Exception as ex:
362
1133
  return sys.float_info.max
363
-
1134
+