fcmaes 1.3.17__py3-none-any.whl → 1.6.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fcmaes/__init__.py +5 -4
- fcmaes/advretry.py +135 -141
- fcmaes/astro.py +64 -40
- fcmaes/bitecpp.py +33 -32
- fcmaes/cmaes.py +69 -142
- fcmaes/cmaescpp.py +231 -39
- fcmaes/crfmnes.py +339 -0
- fcmaes/crfmnescpp.py +273 -0
- fcmaes/dacpp.py +26 -27
- fcmaes/de.py +163 -56
- fcmaes/decpp.py +188 -179
- fcmaes/diversifier.py +357 -0
- fcmaes/evaluator.py +279 -6
- fcmaes/lib/libacmalib.dll +0 -0
- fcmaes/lib/libacmalib.dylib +0 -0
- fcmaes/lib/libacmalib.so +0 -0
- fcmaes/lib/libhbv.so +0 -0
- fcmaes/lib/liblrgv.so +0 -0
- fcmaes/mapelites.py +737 -0
- fcmaes/mode.py +399 -256
- fcmaes/modecpp.py +326 -149
- fcmaes/moretry.py +107 -77
- fcmaes/multiretry.py +37 -30
- fcmaes/optimizer.py +695 -271
- fcmaes/pgpecpp.py +340 -0
- fcmaes/pygmoretry.py +8 -17
- fcmaes/retry.py +161 -139
- fcmaes/test_cma.py +45 -25
- fcmaes-1.6.9.dist-info/METADATA +47 -0
- fcmaes-1.6.9.dist-info/RECORD +36 -0
- {fcmaes-1.3.17.dist-info → fcmaes-1.6.9.dist-info}/WHEEL +1 -1
- fcmaes/csmacpp.py +0 -108
- fcmaes/gcldecpp.py +0 -148
- fcmaes/lcldecpp.py +0 -138
- fcmaes/ldecpp.py +0 -172
- fcmaes/lib/libgcc_s_seh-1.dll +0 -0
- fcmaes/lib/libgtoplib.dll +0 -0
- fcmaes/lib/libgtoplib.so +0 -0
- fcmaes/lib/libstdc++-6.dll +0 -0
- fcmaes/lib/libwinpthread-1.dll +0 -0
- fcmaes-1.3.17.dist-info/METADATA +0 -55
- fcmaes-1.3.17.dist-info/RECORD +0 -37
- {fcmaes-1.3.17.dist-info → fcmaes-1.6.9.dist-info}/LICENSE +0 -0
- {fcmaes-1.3.17.dist-info → fcmaes-1.6.9.dist-info}/top_level.txt +0 -0
fcmaes/optimizer.py
CHANGED
|
@@ -4,93 +4,88 @@
|
|
|
4
4
|
# LICENSE file in the root directory.
|
|
5
5
|
|
|
6
6
|
import numpy as np
|
|
7
|
-
from numpy.random import
|
|
7
|
+
from numpy.random import PCG64DXSM, Generator
|
|
8
8
|
from scipy.optimize import Bounds, minimize, shgo, differential_evolution, dual_annealing, basinhopping
|
|
9
9
|
import sys
|
|
10
10
|
import time
|
|
11
|
-
import
|
|
12
|
-
import logging
|
|
11
|
+
from loguru import logger
|
|
13
12
|
import ctypes as ct
|
|
14
13
|
import multiprocessing as mp
|
|
14
|
+
from fcmaes.evaluator import serial, parallel
|
|
15
|
+
from fcmaes import crfmnes, crfmnescpp, pgpecpp, cmaes, de, cmaescpp, decpp, dacpp, bitecpp
|
|
15
16
|
|
|
16
|
-
from
|
|
17
|
-
|
|
18
|
-
_logger = None
|
|
19
|
-
|
|
20
|
-
def logger(logfile = 'optimizer.log'):
|
|
21
|
-
'''default logger used by the parallel retry. Logs both to stdout and into a file.'''
|
|
22
|
-
global _logger
|
|
23
|
-
if _logger is None:
|
|
24
|
-
formatter = logging.Formatter('%(message)s')
|
|
25
|
-
file_handler = logging.FileHandler(filename=logfile)
|
|
26
|
-
file_handler.setLevel(logging.INFO)
|
|
27
|
-
stdout_handler = logging.StreamHandler(sys.stdout)
|
|
28
|
-
stdout_handler.setLevel(logging.INFO)
|
|
29
|
-
file_handler.setFormatter(formatter)
|
|
30
|
-
stdout_handler.setFormatter(formatter)
|
|
31
|
-
_logger = logging.getLogger('optimizer')
|
|
32
|
-
_logger.addHandler(file_handler)
|
|
33
|
-
_logger.addHandler(stdout_handler)
|
|
34
|
-
_logger.setLevel(logging.INFO)
|
|
35
|
-
return _logger
|
|
17
|
+
from typing import Optional, Callable, Tuple, Union
|
|
18
|
+
from numpy.typing import ArrayLike
|
|
36
19
|
|
|
37
20
|
def eprint(*args, **kwargs):
|
|
38
21
|
"""print message to stderr."""
|
|
39
22
|
print(*args, file=sys.stderr, **kwargs)
|
|
40
23
|
|
|
41
|
-
def scale(lower,
|
|
24
|
+
def scale(lower: ArrayLike,
|
|
25
|
+
upper: ArrayLike) -> np.ndarray:
|
|
42
26
|
"""scaling = 0.5 * difference of the bounds."""
|
|
43
27
|
return 0.5 * (np.asarray(upper) - np.asarray(lower))
|
|
44
28
|
|
|
45
|
-
def typical(lower,
|
|
29
|
+
def typical(lower: ArrayLike,
|
|
30
|
+
upper: ArrayLike) -> np.ndarray:
|
|
46
31
|
"""typical value = mean of the bounds."""
|
|
47
32
|
return 0.5 * (np.asarray(upper) + np.asarray(lower))
|
|
48
33
|
|
|
49
|
-
def fitting(guess
|
|
34
|
+
def fitting(guess: ArrayLike,
|
|
35
|
+
lower: ArrayLike,
|
|
36
|
+
upper: ArrayLike) -> np.ndarray:
|
|
50
37
|
"""fit a guess into the bounds."""
|
|
51
|
-
return np.
|
|
38
|
+
return np.clip(np.asarray(guess), np.asarray(upper), np.asarray(lower))
|
|
52
39
|
|
|
53
|
-
def is_terminate(runid
|
|
40
|
+
def is_terminate(runid: int,
|
|
41
|
+
iterations: int,
|
|
42
|
+
val: float) -> bool:
|
|
54
43
|
"""dummy is_terminate call back."""
|
|
55
44
|
return False
|
|
56
45
|
|
|
57
|
-
def random_x(lower, upper):
|
|
46
|
+
def random_x(lower: ArrayLike, upper: ArrayLike) -> np.ndarray:
|
|
58
47
|
"""feasible random value uniformly distributed inside the bounds."""
|
|
59
48
|
lower = np.asarray(lower)
|
|
60
49
|
upper = np.asarray(upper)
|
|
61
50
|
return lower + np.multiply(upper - lower, np.random.rand(lower.size))
|
|
62
51
|
|
|
63
|
-
def dtime(t0):
|
|
52
|
+
def dtime(t0: float) -> float:
|
|
64
53
|
"""time since t0."""
|
|
65
54
|
return round(time.perf_counter() - t0, 2)
|
|
66
55
|
|
|
67
56
|
class wrapper(object):
|
|
68
57
|
"""Fitness function wrapper for use with parallel retry."""
|
|
69
58
|
|
|
70
|
-
def __init__(self,
|
|
59
|
+
def __init__(self,
|
|
60
|
+
fit: Callable[[ArrayLike], float]):
|
|
71
61
|
self.fit = fit
|
|
72
62
|
self.evals = mp.RawValue(ct.c_int, 0)
|
|
73
|
-
self.best_y = mp.RawValue(ct.c_double,
|
|
63
|
+
self.best_y = mp.RawValue(ct.c_double, np.inf)
|
|
74
64
|
self.t0 = time.perf_counter()
|
|
75
|
-
self.logger = logger
|
|
76
|
-
|
|
77
|
-
def __call__(self, x):
|
|
78
|
-
self.evals.value += 1
|
|
79
|
-
y = self.fit(x)
|
|
80
|
-
y0 = y if np.isscalar(y) else sum(y)
|
|
81
|
-
if y0 < self.best_y.value:
|
|
82
|
-
self.best_y.value = y0
|
|
83
|
-
if not self.logger is None:
|
|
84
|
-
self.logger.info(str(dtime(self.t0)) + ' ' +
|
|
85
|
-
str(self.evals.value) + ' ' +
|
|
86
|
-
str(self.best_y.value) + ' ' +
|
|
87
|
-
str(list(x)))
|
|
88
|
-
return y
|
|
89
65
|
|
|
66
|
+
def __call__(self, x: ArrayLike) -> float:
|
|
67
|
+
try:
|
|
68
|
+
self.evals.value += 1
|
|
69
|
+
y = self.fit(x)
|
|
70
|
+
y0 = y if np.isscalar(y) else sum(y)
|
|
71
|
+
if y0 < self.best_y.value:
|
|
72
|
+
self.best_y.value = y0
|
|
73
|
+
logger.info(str(dtime(self.t0)) + ' ' +
|
|
74
|
+
str(self.evals.value) + ' ' +
|
|
75
|
+
str(round(self.evals.value/(1E-9 + dtime(self.t0)),0)) + ' ' +
|
|
76
|
+
str(self.best_y.value) + ' ' +
|
|
77
|
+
str(list(x)))
|
|
78
|
+
return y
|
|
79
|
+
except Exception as ex:
|
|
80
|
+
print(str(ex))
|
|
81
|
+
return sys.float_info.max
|
|
82
|
+
|
|
90
83
|
class Optimizer(object):
|
|
91
84
|
"""Provides different optimization methods for use with parallel retry."""
|
|
92
85
|
|
|
93
|
-
def __init__(self,
|
|
86
|
+
def __init__(self,
|
|
87
|
+
max_evaluations: Optional[int] = 50000,
|
|
88
|
+
name: Optional[str] = ''):
|
|
94
89
|
self.max_evaluations = max_evaluations
|
|
95
90
|
self.name = name
|
|
96
91
|
|
|
@@ -105,7 +100,7 @@ class Optimizer(object):
|
|
|
105
100
|
class Sequence(Optimizer):
|
|
106
101
|
"""Sequence of optimizers."""
|
|
107
102
|
|
|
108
|
-
def __init__(self, optimizers):
|
|
103
|
+
def __init__(self, optimizers: ArrayLike):
|
|
109
104
|
Optimizer.__init__(self)
|
|
110
105
|
self.optimizers = optimizers
|
|
111
106
|
self.max_evaluations = 0
|
|
@@ -114,9 +109,15 @@ class Sequence(Optimizer):
|
|
|
114
109
|
self.max_evaluations += optimizer.max_evaluations
|
|
115
110
|
self.name = self.name[:-4]
|
|
116
111
|
|
|
117
|
-
def minimize(self,
|
|
112
|
+
def minimize(self,
|
|
113
|
+
fun: Callable[[ArrayLike], float],
|
|
114
|
+
bounds: Bounds,
|
|
115
|
+
guess: Optional[ArrayLike] = None,
|
|
116
|
+
sdevs: Optional[Union[float, ArrayLike, Callable]] = None,
|
|
117
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
118
|
+
store=None) -> Tuple[np.ndarray, float, int]:
|
|
118
119
|
evals = 0
|
|
119
|
-
y =
|
|
120
|
+
y = np.inf
|
|
120
121
|
for optimizer in self.optimizers:
|
|
121
122
|
ret = optimizer.minimize(fun, bounds, guess, sdevs, rg, store)
|
|
122
123
|
if ret[1] < y:
|
|
@@ -129,7 +130,7 @@ class Sequence(Optimizer):
|
|
|
129
130
|
class Choice(Optimizer):
|
|
130
131
|
"""Random choice of optimizers."""
|
|
131
132
|
|
|
132
|
-
def __init__(self, optimizers):
|
|
133
|
+
def __init__(self, optimizers: ArrayLike):
|
|
133
134
|
Optimizer.__init__(self)
|
|
134
135
|
self.optimizers = optimizers
|
|
135
136
|
self.max_evaluations = optimizers[0].max_evaluations
|
|
@@ -137,107 +138,290 @@ class Choice(Optimizer):
|
|
|
137
138
|
self.name += optimizer.name + ' | '
|
|
138
139
|
self.name = self.name[:-3]
|
|
139
140
|
|
|
140
|
-
def minimize(self,
|
|
141
|
+
def minimize(self,
|
|
142
|
+
fun: Callable[[ArrayLike], float],
|
|
143
|
+
bounds: Bounds,
|
|
144
|
+
guess: Optional[ArrayLike] = None,
|
|
145
|
+
sdevs: Optional[Union[float, ArrayLike, Callable]] = None,
|
|
146
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
147
|
+
store=None) -> Tuple[np.ndarray, float, int]:
|
|
148
|
+
|
|
141
149
|
choice = rg.integers(0, len(self.optimizers))
|
|
142
150
|
opt = self.optimizers[choice]
|
|
143
151
|
return opt.minimize(fun, bounds, guess, sdevs, rg, store)
|
|
144
152
|
|
|
145
|
-
def de_cma(max_evaluations = 50000,
|
|
146
|
-
|
|
153
|
+
def de_cma(max_evaluations: Optional[int] = 50000,
|
|
154
|
+
popsize: Optional[int] = 31,
|
|
155
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
156
|
+
de_max_evals: Optional[int] = None,
|
|
157
|
+
cma_max_evals: Optional[int] = None,
|
|
158
|
+
ints: Optional[ArrayLike] = None,
|
|
159
|
+
workers: Optional[int] = None) -> Sequence:
|
|
147
160
|
"""Sequence differential evolution -> CMA-ES."""
|
|
148
161
|
|
|
149
|
-
|
|
162
|
+
de_evals = np.random.uniform(0.1, 0.5)
|
|
150
163
|
if de_max_evals is None:
|
|
151
|
-
de_max_evals = int(
|
|
164
|
+
de_max_evals = int(de_evals*max_evaluations)
|
|
152
165
|
if cma_max_evals is None:
|
|
153
|
-
cma_max_evals = int((1.0-
|
|
154
|
-
opt1 = De_cpp(popsize=popsize, max_evaluations = de_max_evals,
|
|
166
|
+
cma_max_evals = int((1.0-de_evals)*max_evaluations)
|
|
167
|
+
opt1 = De_cpp(popsize=popsize, max_evaluations = de_max_evals,
|
|
168
|
+
stop_fitness = stop_fitness, ints=ints, workers = workers)
|
|
155
169
|
opt2 = Cma_cpp(popsize=popsize, max_evaluations = cma_max_evals,
|
|
156
|
-
stop_fitness = stop_fitness)
|
|
170
|
+
stop_fitness = stop_fitness, workers = workers)
|
|
157
171
|
return Sequence([opt1, opt2])
|
|
158
172
|
|
|
159
|
-
def de_cma_py(max_evaluations = 50000,
|
|
160
|
-
|
|
173
|
+
def de_cma_py(max_evaluations: Optional[int] = 50000,
|
|
174
|
+
popsize: Optional[int] = 31,
|
|
175
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
176
|
+
de_max_evals: Optional[int] = None,
|
|
177
|
+
cma_max_evals: Optional[int] = None,
|
|
178
|
+
ints: Optional[ArrayLike] = None,
|
|
179
|
+
workers: Optional[int] = None) -> Sequence:
|
|
161
180
|
"""Sequence differential evolution -> CMA-ES in python."""
|
|
162
181
|
|
|
163
|
-
|
|
182
|
+
de_evals = np.random.uniform(0.1, 0.5)
|
|
164
183
|
if de_max_evals is None:
|
|
165
|
-
de_max_evals = int(
|
|
184
|
+
de_max_evals = int(de_evals*max_evaluations)
|
|
166
185
|
if cma_max_evals is None:
|
|
167
|
-
cma_max_evals = int((1.0-
|
|
168
|
-
opt1 = De_python(popsize=popsize, max_evaluations = de_max_evals,
|
|
186
|
+
cma_max_evals = int((1.0-de_evals)*max_evaluations)
|
|
187
|
+
opt1 = De_python(popsize=popsize, max_evaluations = de_max_evals,
|
|
188
|
+
stop_fitness = stop_fitness, ints=ints, workers = workers)
|
|
169
189
|
opt2 = Cma_python(popsize=popsize, max_evaluations = cma_max_evals,
|
|
170
|
-
stop_fitness = stop_fitness)
|
|
190
|
+
stop_fitness = stop_fitness, workers = workers)
|
|
171
191
|
return Sequence([opt1, opt2])
|
|
172
192
|
|
|
173
|
-
def
|
|
174
|
-
|
|
175
|
-
|
|
193
|
+
def da_cma(max_evaluations: Optional[int] = 50000,
|
|
194
|
+
popsize: Optional[int] = 31,
|
|
195
|
+
da_max_evals: Optional[int] = None,
|
|
196
|
+
cma_max_evals: Optional[int] = None,
|
|
197
|
+
stop_fitness: Optional[float] = -np.inf) -> Sequence:
|
|
198
|
+
"""Sequence dual annealing -> CMA-ES."""
|
|
176
199
|
|
|
177
|
-
|
|
178
|
-
if
|
|
179
|
-
|
|
200
|
+
da_evals = np.random.uniform(0.1, 0.5)
|
|
201
|
+
if da_max_evals is None:
|
|
202
|
+
da_max_evals = int(da_evals*max_evaluations)
|
|
180
203
|
if cma_max_evals is None:
|
|
181
|
-
cma_max_evals = int((1.0-
|
|
182
|
-
opt1 =
|
|
183
|
-
opt2 = Cma_cpp(
|
|
204
|
+
cma_max_evals = int((1.0-da_evals)*max_evaluations)
|
|
205
|
+
opt1 = Da_cpp(max_evaluations = da_max_evals, stop_fitness = stop_fitness)
|
|
206
|
+
opt2 = Cma_cpp(popsize=popsize, max_evaluations = cma_max_evals,
|
|
207
|
+
stop_fitness = stop_fitness)
|
|
184
208
|
return Sequence([opt1, opt2])
|
|
185
209
|
|
|
186
|
-
def
|
|
187
|
-
|
|
188
|
-
|
|
210
|
+
def de_crfmnes(max_evaluations: Optional[int] = 50000,
|
|
211
|
+
popsize: Optional[int] = 32,
|
|
212
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
213
|
+
de_max_evals: Optional[int] = None,
|
|
214
|
+
crfm_max_evals: Optional[int] = None,
|
|
215
|
+
ints: Optional[ArrayLike] = None,
|
|
216
|
+
workers: Optional[int] = None) -> Sequence:
|
|
217
|
+
"""Sequence differential evolution -> CRFMNES."""
|
|
189
218
|
|
|
190
|
-
|
|
219
|
+
de_evals = np.random.uniform(0.1, 0.5)
|
|
191
220
|
if de_max_evals is None:
|
|
192
|
-
de_max_evals = int(
|
|
193
|
-
if
|
|
194
|
-
|
|
195
|
-
opt1 =
|
|
196
|
-
|
|
221
|
+
de_max_evals = int(de_evals*max_evaluations)
|
|
222
|
+
if crfm_max_evals is None:
|
|
223
|
+
crfm_max_evals = int((1.0-de_evals)*max_evaluations)
|
|
224
|
+
opt1 = De_cpp(popsize=popsize, max_evaluations = de_max_evals,
|
|
225
|
+
stop_fitness = stop_fitness, ints=ints, workers = workers)
|
|
226
|
+
opt2 = Crfmnes_cpp(popsize=popsize, max_evaluations = crfm_max_evals,
|
|
227
|
+
stop_fitness = stop_fitness, workers = workers)
|
|
197
228
|
return Sequence([opt1, opt2])
|
|
198
229
|
|
|
199
|
-
def
|
|
200
|
-
|
|
201
|
-
|
|
230
|
+
def crfmnes_bite(max_evaluations: Optional[int] = 50000,
|
|
231
|
+
popsize: Optional[int] = 31,
|
|
232
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
233
|
+
crfm_max_evals: Optional[int] = None,
|
|
234
|
+
bite_max_evals: Optional[int] = None,
|
|
235
|
+
M: Optional[int] = 1) -> Sequence:
|
|
236
|
+
"""Sequence CRFMNES -> Bite."""
|
|
202
237
|
|
|
203
|
-
|
|
204
|
-
if
|
|
205
|
-
|
|
238
|
+
crfmnes_evals = np.random.uniform(0.1, 0.5)
|
|
239
|
+
if crfm_max_evals is None:
|
|
240
|
+
crfm_max_evals = int(crfmnes_evals*max_evaluations)
|
|
241
|
+
if bite_max_evals is None:
|
|
242
|
+
bite_max_evals = int((1.0-crfmnes_evals)*max_evaluations)
|
|
243
|
+
opt1 = Crfmnes_cpp(popsize=popsize, max_evaluations = crfm_max_evals,
|
|
244
|
+
stop_fitness = stop_fitness)
|
|
245
|
+
opt2 = Bite_cpp(popsize=popsize, max_evaluations = bite_max_evals,
|
|
246
|
+
stop_fitness = stop_fitness, M=M)
|
|
247
|
+
return Sequence([opt1, opt2])
|
|
248
|
+
|
|
249
|
+
def bite_cma(max_evaluations: Optional[int] = 50000,
|
|
250
|
+
popsize: Optional[int] = 31,
|
|
251
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
252
|
+
bite_max_evals: Optional[int] = None,
|
|
253
|
+
cma_max_evals: Optional[int] = None,
|
|
254
|
+
M: Optional[int] = 1) -> Sequence:
|
|
255
|
+
"""Sequence Bite -> CMA-ES."""
|
|
256
|
+
|
|
257
|
+
bite_evals = np.random.uniform(0.1, 0.5)
|
|
258
|
+
if bite_max_evals is None:
|
|
259
|
+
bite_max_evals = int(bite_evals*max_evaluations)
|
|
206
260
|
if cma_max_evals is None:
|
|
207
|
-
cma_max_evals = int((1.0-
|
|
208
|
-
opt1 =
|
|
261
|
+
cma_max_evals = int((1.0-bite_evals)*max_evaluations)
|
|
262
|
+
opt1 = Bite_cpp(popsize=popsize, max_evaluations = bite_max_evals,
|
|
263
|
+
stop_fitness = stop_fitness, M=M)
|
|
209
264
|
opt2 = Cma_cpp(popsize=popsize, max_evaluations = cma_max_evals,
|
|
210
|
-
|
|
265
|
+
stop_fitness = stop_fitness)
|
|
211
266
|
return Sequence([opt1, opt2])
|
|
212
267
|
|
|
213
|
-
def
|
|
214
|
-
|
|
215
|
-
|
|
268
|
+
def cma_bite(max_evaluations: Optional[int] = 50000,
|
|
269
|
+
popsize: Optional[int] = 32,
|
|
270
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
271
|
+
cma_max_evals: Optional[int] = None,
|
|
272
|
+
bite_max_evals: Optional[int] = None,
|
|
273
|
+
M: Optional[int] = 1) -> Sequence:
|
|
274
|
+
"""Sequence CMA-ES -> Bite."""
|
|
216
275
|
|
|
217
|
-
|
|
218
|
-
if da_max_evals is None:
|
|
219
|
-
da_max_evals = int(daEvals*max_evaluations)
|
|
276
|
+
cma_evals = np.random.uniform(0.1, 0.5)
|
|
220
277
|
if cma_max_evals is None:
|
|
221
|
-
cma_max_evals = int(
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
278
|
+
cma_max_evals = int(cma_evals*max_evaluations)
|
|
279
|
+
if bite_max_evals is None:
|
|
280
|
+
bite_max_evals = int((1.0-cma_evals)*max_evaluations)
|
|
281
|
+
opt1 = Cma_cpp(popsize=popsize, max_evaluations = cma_max_evals,
|
|
282
|
+
stop_fitness = stop_fitness, stop_hist = 0)
|
|
283
|
+
opt2 = Bite_cpp(popsize=popsize, max_evaluations = bite_max_evals,
|
|
284
|
+
stop_fitness = stop_fitness, M=M)
|
|
225
285
|
return Sequence([opt1, opt2])
|
|
226
286
|
|
|
287
|
+
class Crfmnes(Optimizer):
|
|
288
|
+
"""CRFMNES Python implementation."""
|
|
289
|
+
|
|
290
|
+
def __init__(self,
|
|
291
|
+
max_evaluations: Optional[int] = 50000,
|
|
292
|
+
popsize: Optional[int] = 32,
|
|
293
|
+
guess: Optional[ArrayLike] = None,
|
|
294
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
295
|
+
sdevs: Optional[float] = None,
|
|
296
|
+
workers: Optional[int] = None):
|
|
297
|
+
|
|
298
|
+
Optimizer.__init__(self, max_evaluations, 'crfmnes')
|
|
299
|
+
self.popsize = popsize
|
|
300
|
+
self.stop_fitness = stop_fitness
|
|
301
|
+
self.guess = guess
|
|
302
|
+
self.sdevs = sdevs
|
|
303
|
+
self.workers = workers
|
|
304
|
+
|
|
305
|
+
def minimize(self,
|
|
306
|
+
fun: Callable[[ArrayLike], float],
|
|
307
|
+
bounds: Optional[Bounds],
|
|
308
|
+
guess: Optional[ArrayLike] = None,
|
|
309
|
+
sdevs: Optional[float] = 0.3,
|
|
310
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
311
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
312
|
+
|
|
313
|
+
ret = crfmnes.minimize(fun, bounds,
|
|
314
|
+
self.guess if not self.guess is None else guess,
|
|
315
|
+
input_sigma = self.sdevs if not self.sdevs is None else sdevs,
|
|
316
|
+
max_evaluations = self.max_eval_num(store),
|
|
317
|
+
popsize=self.popsize,
|
|
318
|
+
stop_fitness = self.stop_fitness,
|
|
319
|
+
rg=rg, runid=self.get_count_runs(store),
|
|
320
|
+
workers = self.workers)
|
|
321
|
+
return ret.x, ret.fun, ret.nfev
|
|
322
|
+
|
|
323
|
+
class Crfmnes_cpp(Optimizer):
|
|
324
|
+
"""CRFMNES C++ implementation."""
|
|
325
|
+
|
|
326
|
+
def __init__(self,
|
|
327
|
+
max_evaluations: Optional[int] = 50000,
|
|
328
|
+
popsize: Optional[int] = 32,
|
|
329
|
+
guess: Optional[ArrayLike] = None,
|
|
330
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
331
|
+
sdevs: Optional[float] = None,
|
|
332
|
+
workers: Optional[int] = None):
|
|
333
|
+
|
|
334
|
+
Optimizer.__init__(self, max_evaluations, 'crfmnes cpp')
|
|
335
|
+
self.popsize = popsize
|
|
336
|
+
self.stop_fitness = stop_fitness
|
|
337
|
+
self.guess = guess
|
|
338
|
+
self.sdevs = sdevs
|
|
339
|
+
self.workers = workers
|
|
340
|
+
|
|
341
|
+
def minimize(self,
|
|
342
|
+
fun: Callable[[ArrayLike], float],
|
|
343
|
+
bounds: Optional[Bounds],
|
|
344
|
+
guess: Optional[ArrayLike] = None,
|
|
345
|
+
sdevs: Optional[float] = 0.3,
|
|
346
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
347
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
348
|
+
|
|
349
|
+
ret = crfmnescpp.minimize(fun, bounds,
|
|
350
|
+
self.guess if not self.guess is None else guess,
|
|
351
|
+
input_sigma = self.sdevs if not self.sdevs is None else sdevs,
|
|
352
|
+
max_evaluations = self.max_eval_num(store),
|
|
353
|
+
popsize=self.popsize,
|
|
354
|
+
stop_fitness = self.stop_fitness,
|
|
355
|
+
rg=rg, runid=self.get_count_runs(store),
|
|
356
|
+
workers = self.workers)
|
|
357
|
+
return ret.x, ret.fun, ret.nfev
|
|
358
|
+
|
|
359
|
+
class Pgpe_cpp(Optimizer):
|
|
360
|
+
"""PGPE C++ implementation."""
|
|
361
|
+
|
|
362
|
+
def __init__(self,
|
|
363
|
+
max_evaluations: Optional[int] = 500000,
|
|
364
|
+
popsize: Optional[int] = 640,
|
|
365
|
+
guess: Optional[ArrayLike] = None,
|
|
366
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
367
|
+
sdevs: Optional[float] = None,
|
|
368
|
+
workers: Optional[int] = None):
|
|
369
|
+
|
|
370
|
+
Optimizer.__init__(self, max_evaluations, 'pgpe cpp')
|
|
371
|
+
self.popsize = popsize
|
|
372
|
+
self.stop_fitness = stop_fitness
|
|
373
|
+
self.guess = guess
|
|
374
|
+
self.sdevs = sdevs
|
|
375
|
+
self.workers = workers
|
|
376
|
+
|
|
377
|
+
def minimize(self,
|
|
378
|
+
fun: Callable[[ArrayLike], float],
|
|
379
|
+
bounds: Optional[Bounds],
|
|
380
|
+
guess: Optional[ArrayLike] = None,
|
|
381
|
+
sdevs: Optional[float] = 0.1,
|
|
382
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
383
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
384
|
+
|
|
385
|
+
ret = pgpecpp.minimize(fun, bounds,
|
|
386
|
+
self.guess if not self.guess is None else guess,
|
|
387
|
+
input_sigma = self.sdevs if not self.sdevs is None else sdevs,
|
|
388
|
+
max_evaluations = self.max_eval_num(store),
|
|
389
|
+
popsize=self.popsize,
|
|
390
|
+
stop_fitness = self.stop_fitness,
|
|
391
|
+
rg=rg, runid=self.get_count_runs(store),
|
|
392
|
+
workers = self.workers)
|
|
393
|
+
return ret.x, ret.fun, ret.nfev
|
|
394
|
+
|
|
227
395
|
class Cma_python(Optimizer):
|
|
228
396
|
"""CMA_ES Python implementation."""
|
|
229
397
|
|
|
230
|
-
def __init__(self,
|
|
231
|
-
|
|
232
|
-
|
|
398
|
+
def __init__(self,
|
|
399
|
+
max_evaluations: Optional[int] = 50000,
|
|
400
|
+
popsize: Optional[int] = 31,
|
|
401
|
+
guess: Optional[ArrayLike] = None,
|
|
402
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
403
|
+
sdevs: Optional[float] = None,
|
|
404
|
+
workers: Optional[int] = None,
|
|
405
|
+
update_gap: Optional[int] = None,
|
|
406
|
+
normalize: Optional[bool] = True):
|
|
407
|
+
|
|
233
408
|
Optimizer.__init__(self, max_evaluations, 'cma py')
|
|
234
409
|
self.popsize = popsize
|
|
235
410
|
self.stop_fitness = stop_fitness
|
|
236
411
|
self.update_gap = update_gap
|
|
237
412
|
self.guess = guess
|
|
238
413
|
self.sdevs = sdevs
|
|
414
|
+
self.normalize = normalize
|
|
415
|
+
self.workers = workers
|
|
416
|
+
|
|
417
|
+
def minimize(self,
|
|
418
|
+
fun: Callable[[ArrayLike], float],
|
|
419
|
+
bounds: Optional[Bounds],
|
|
420
|
+
guess: Optional[ArrayLike] = None,
|
|
421
|
+
sdevs: Optional[Union[float, ArrayLike, Callable]] = 0.1,
|
|
422
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
423
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
239
424
|
|
|
240
|
-
def minimize(self, fun, bounds, guess=None, sdevs=0.3, rg=Generator(MT19937()), store=None):
|
|
241
425
|
ret = cmaes.minimize(fun, bounds,
|
|
242
426
|
self.guess if not self.guess is None else guess,
|
|
243
427
|
input_sigma= self.sdevs if not self.sdevs is None else sdevs,
|
|
@@ -245,48 +429,83 @@ class Cma_python(Optimizer):
|
|
|
245
429
|
popsize=self.popsize,
|
|
246
430
|
stop_fitness = self.stop_fitness,
|
|
247
431
|
rg=rg, runid=self.get_count_runs(store),
|
|
248
|
-
|
|
432
|
+
normalize = self.normalize,
|
|
433
|
+
update_gap = self.update_gap,
|
|
434
|
+
workers = self.workers)
|
|
249
435
|
return ret.x, ret.fun, ret.nfev
|
|
250
436
|
|
|
251
437
|
class Cma_cpp(Optimizer):
|
|
252
438
|
"""CMA_ES C++ implementation."""
|
|
253
439
|
|
|
254
|
-
def __init__(self,
|
|
255
|
-
|
|
256
|
-
|
|
440
|
+
def __init__(self,
|
|
441
|
+
max_evaluations: Optional[int] = 50000,
|
|
442
|
+
popsize: Optional[int] = 31,
|
|
443
|
+
guess: Optional[ArrayLike] = None,
|
|
444
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
445
|
+
sdevs: Optional[float] = None,
|
|
446
|
+
workers: Optional[int] = None,
|
|
447
|
+
update_gap: Optional[int] = None,
|
|
448
|
+
normalize: Optional[bool] = True,
|
|
449
|
+
delayed_update: Optional[bool] = True,
|
|
450
|
+
stop_hist: Optional[int] = -1):
|
|
451
|
+
|
|
257
452
|
Optimizer.__init__(self, max_evaluations, 'cma cpp')
|
|
258
453
|
self.popsize = popsize
|
|
259
454
|
self.stop_fitness = stop_fitness
|
|
455
|
+
self.stop_hist = stop_hist
|
|
260
456
|
self.guess = guess
|
|
261
457
|
self.sdevs = sdevs
|
|
262
458
|
self.update_gap = update_gap
|
|
459
|
+
self.delayed_update = delayed_update
|
|
460
|
+
self.normalize = normalize
|
|
263
461
|
self.workers = workers
|
|
264
462
|
|
|
265
|
-
def minimize(self,
|
|
266
|
-
|
|
463
|
+
def minimize(self,
|
|
464
|
+
fun: Callable[[ArrayLike], float],
|
|
465
|
+
bounds: Optional[Bounds],
|
|
466
|
+
guess: Optional[ArrayLike] = None,
|
|
467
|
+
sdevs: Optional[Union[float, ArrayLike, Callable]] = 0.1,
|
|
468
|
+
rg=Generator(PCG64DXSM()),
|
|
469
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
470
|
+
|
|
267
471
|
ret = cmaescpp.minimize(fun, bounds,
|
|
268
472
|
self.guess if not self.guess is None else guess,
|
|
269
|
-
input_sigma= self.sdevs if not self.sdevs is None else sdevs,
|
|
270
|
-
max_evaluations=self.max_eval_num(store),
|
|
271
|
-
popsize=self.popsize,
|
|
272
|
-
stop_fitness=self.stop_fitness,
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
473
|
+
input_sigma = self.sdevs if not self.sdevs is None else sdevs,
|
|
474
|
+
max_evaluations =self.max_eval_num(store),
|
|
475
|
+
popsize = self.popsize,
|
|
476
|
+
stop_fitness = self.stop_fitness,
|
|
477
|
+
stop_hist = self.stop_hist,
|
|
478
|
+
rg = rg, runid = self.get_count_runs(store),
|
|
479
|
+
update_gap = self.update_gap,
|
|
480
|
+
normalize = self.normalize,
|
|
481
|
+
delayed_update = self.delayed_update,
|
|
482
|
+
workers = self.workers)
|
|
276
483
|
return ret.x, ret.fun, ret.nfev
|
|
277
484
|
|
|
278
485
|
class Cma_orig(Optimizer):
|
|
279
486
|
"""CMA_ES original implementation."""
|
|
280
487
|
|
|
281
|
-
def __init__(self,
|
|
282
|
-
|
|
488
|
+
def __init__(self,
|
|
489
|
+
max_evaluations: Optional[int] = 50000,
|
|
490
|
+
popsize: Optional[int] = 31,
|
|
491
|
+
guess: Optional[ArrayLike] = None,
|
|
492
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
493
|
+
sdevs: Optional[float] = None):
|
|
494
|
+
|
|
283
495
|
Optimizer.__init__(self, max_evaluations, 'cma orig')
|
|
284
496
|
self.popsize = popsize
|
|
285
497
|
self.stop_fitness = stop_fitness
|
|
286
498
|
self.guess = guess
|
|
287
499
|
self.sdevs = sdevs
|
|
288
500
|
|
|
289
|
-
def minimize(self,
|
|
501
|
+
def minimize(self,
|
|
502
|
+
fun: Callable[[ArrayLike], float],
|
|
503
|
+
bounds: Optional[Bounds],
|
|
504
|
+
guess: Optional[ArrayLike] = None,
|
|
505
|
+
sdevs: Optional[Union[float, ArrayLike]] = 0.3,
|
|
506
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
507
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
508
|
+
|
|
290
509
|
lower = bounds.lb
|
|
291
510
|
upper = bounds.ub
|
|
292
511
|
guess = self.guess if not self.guess is None else guess
|
|
@@ -318,62 +537,285 @@ class Cma_orig(Optimizer):
|
|
|
318
537
|
return es.result.xbest, es.result.fbest, evals
|
|
319
538
|
except Exception as ex:
|
|
320
539
|
print(ex)
|
|
321
|
-
|
|
540
|
+
|
|
541
|
+
class Cma_lw(Optimizer):
|
|
542
|
+
"""CMA lightweight Python implementation. See https://github.com/CyberAgentAILab/cmaes """
|
|
543
|
+
|
|
544
|
+
def __init__(self,
|
|
545
|
+
max_evaluations: Optional[int] = 50000,
|
|
546
|
+
popsize: Optional[int] = 31,
|
|
547
|
+
guess: Optional[ArrayLike] = None,
|
|
548
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
549
|
+
sdevs: Optional[Union[float, ArrayLike]] = None,
|
|
550
|
+
workers: Optional[int] = None):
|
|
551
|
+
|
|
552
|
+
Optimizer.__init__(self, max_evaluations, 'cma_lw')
|
|
553
|
+
self.popsize = popsize
|
|
554
|
+
self.stop_fitness = stop_fitness
|
|
555
|
+
self.guess = guess
|
|
556
|
+
self.sdevs = sdevs
|
|
557
|
+
self.workers = workers
|
|
558
|
+
|
|
559
|
+
def minimize(self,
|
|
560
|
+
fun: Callable[[ArrayLike], float],
|
|
561
|
+
bounds: Optional[Bounds],
|
|
562
|
+
guess: Optional[ArrayLike] = None,
|
|
563
|
+
sdevs: Optional[Union[float, ArrayLike]] = 0.3,
|
|
564
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
565
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
566
|
+
|
|
567
|
+
try:
|
|
568
|
+
import cmaes
|
|
569
|
+
except ImportError as e:
|
|
570
|
+
raise ImportError("Please install cmaes (pip install cmaes)")
|
|
571
|
+
|
|
572
|
+
if guess is None:
|
|
573
|
+
guess = self.guess
|
|
574
|
+
if guess is None:
|
|
575
|
+
guess = rg.uniform(bounds.lb, bounds.ub)
|
|
576
|
+
bds = np.array([t for t in zip(bounds.lb, bounds.ub)])
|
|
577
|
+
seed = int(rg.uniform(0, 2**32 - 1))
|
|
578
|
+
optimizer = cmaes.CMA(mean=guess, sigma=np.mean(sdevs), bounds=bds, seed=seed, population_size=self.popsize)
|
|
579
|
+
best_y = np.inf
|
|
580
|
+
evals = 0
|
|
581
|
+
fun = serial(fun) if (self.workers is None or self.workers <= 1) else parallel(fun, self.workers)
|
|
582
|
+
while evals < self.max_evaluations and not optimizer.should_stop():
|
|
583
|
+
xs = [optimizer.ask() for _ in range(optimizer.population_size)]
|
|
584
|
+
ys = fun(xs)
|
|
585
|
+
solutions = []
|
|
586
|
+
for i in range(optimizer.population_size):
|
|
587
|
+
x = xs[i]
|
|
588
|
+
y = ys[i]
|
|
589
|
+
solutions.append((x, y))
|
|
590
|
+
if y < best_y:
|
|
591
|
+
best_y = y
|
|
592
|
+
best_x = x
|
|
593
|
+
optimizer.tell(solutions)
|
|
594
|
+
evals += optimizer.population_size
|
|
595
|
+
if isinstance(fun, parallel):
|
|
596
|
+
fun.stop()
|
|
597
|
+
return best_x, best_y, evals
|
|
598
|
+
|
|
599
|
+
class Cma_awm(Optimizer):
|
|
600
|
+
"""CMA awm Python implementation. See https://github.com/CyberAgentAILab/cmaes """
|
|
601
|
+
|
|
602
|
+
def __init__(self,
|
|
603
|
+
max_evaluations: Optional[int] = 50000,
|
|
604
|
+
popsize: Optional[int] = 31,
|
|
605
|
+
guess: Optional[ArrayLike] = None,
|
|
606
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
607
|
+
sdevs: Optional[Union[float, ArrayLike]] = None,
|
|
608
|
+
continuous_space = None,
|
|
609
|
+
discrete_space = None,
|
|
610
|
+
workers: Optional[int] = None):
|
|
611
|
+
|
|
612
|
+
Optimizer.__init__(self, max_evaluations, 'cma_awm')
|
|
613
|
+
self.popsize = popsize
|
|
614
|
+
self.stop_fitness = stop_fitness
|
|
615
|
+
self.guess = guess
|
|
616
|
+
self.sdevs = sdevs
|
|
617
|
+
self.workers = workers
|
|
618
|
+
self.continuous_space = continuous_space
|
|
619
|
+
self.discrete_space = discrete_space
|
|
620
|
+
|
|
621
|
+
def minimize(self,
|
|
622
|
+
fun: Callable[[ArrayLike], float],
|
|
623
|
+
bounds: Optional[Bounds],
|
|
624
|
+
guess: Optional[ArrayLike] = None,
|
|
625
|
+
sdevs: Optional[Union[float, ArrayLike]] = 0.3,
|
|
626
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
627
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
628
|
+
try:
|
|
629
|
+
import cmaes
|
|
630
|
+
except ImportError as e:
|
|
631
|
+
raise ImportError("Please install cmaes (pip install cmaes)")
|
|
632
|
+
|
|
633
|
+
if guess is None:
|
|
634
|
+
guess = self.guess
|
|
635
|
+
if guess is None:
|
|
636
|
+
guess = rg.uniform(bounds.lb, bounds.ub)
|
|
637
|
+
seed = int(rg.uniform(0, 2**32 - 1))
|
|
638
|
+
optimizer = cmaes.CMAwM(mean=guess, sigma=np.mean(sdevs),
|
|
639
|
+
continuous_space=self.continuous_space,
|
|
640
|
+
discrete_space=self.discrete_space,
|
|
641
|
+
seed=seed, population_size=self.popsize)
|
|
642
|
+
best_y = 1E99
|
|
643
|
+
evals = 0
|
|
644
|
+
fun = serial(fun) if (self.workers is None or self.workers <= 1) else parallel(fun, self.workers)
|
|
645
|
+
while evals < self.max_evaluations and not optimizer.should_stop():
|
|
646
|
+
asks = [optimizer.ask() for _ in range(optimizer.population_size)]
|
|
647
|
+
ys = fun([x[0] for x in asks])
|
|
648
|
+
solutions = []
|
|
649
|
+
for i in range(optimizer.population_size):
|
|
650
|
+
x = asks[i][1]
|
|
651
|
+
y = ys[i]
|
|
652
|
+
solutions.append((x, y))
|
|
653
|
+
if y < best_y:
|
|
654
|
+
best_y = y
|
|
655
|
+
best_x = x
|
|
656
|
+
optimizer.tell(solutions)
|
|
657
|
+
evals += optimizer.population_size
|
|
658
|
+
if isinstance(fun, parallel):
|
|
659
|
+
fun.stop()
|
|
660
|
+
return best_x, best_y, evals
|
|
661
|
+
|
|
662
|
+
class Cma_sep(Optimizer):
|
|
663
|
+
"""CMA sep Python implementation. See https://github.com/CyberAgentAILab/cmaes """
|
|
664
|
+
|
|
665
|
+
def __init__(self,
|
|
666
|
+
max_evaluations: Optional[int] = 50000,
|
|
667
|
+
popsize: Optional[int] = 31,
|
|
668
|
+
guess: Optional[ArrayLike] = None,
|
|
669
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
670
|
+
sdevs: Optional[Union[float, ArrayLike]] = None,
|
|
671
|
+
workers: Optional[int] = None):
|
|
672
|
+
|
|
673
|
+
Optimizer.__init__(self, max_evaluations, 'cma_sep')
|
|
674
|
+
self.popsize = popsize
|
|
675
|
+
self.stop_fitness = stop_fitness
|
|
676
|
+
self.guess = guess
|
|
677
|
+
self.sdevs = sdevs
|
|
678
|
+
self.workers = workers
|
|
679
|
+
|
|
680
|
+
def minimize(self,
|
|
681
|
+
fun: Callable[[ArrayLike], float],
|
|
682
|
+
bounds: Optional[Bounds],
|
|
683
|
+
guess: Optional[ArrayLike] = None,
|
|
684
|
+
sdevs: Optional[Union[float, ArrayLike]] = 0.3,
|
|
685
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
686
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
687
|
+
try:
|
|
688
|
+
import cmaes
|
|
689
|
+
except ImportError as e:
|
|
690
|
+
raise ImportError("Please install cmaes (pip install cmaes)")
|
|
691
|
+
|
|
692
|
+
if guess is None:
|
|
693
|
+
guess = self.guess
|
|
694
|
+
if guess is None:
|
|
695
|
+
guess = rg.uniform(bounds.lb, bounds.ub)
|
|
696
|
+
bds = np.array([t for t in zip(bounds.lb, bounds.ub)])
|
|
697
|
+
seed = int(rg.uniform(0, 2**32 - 1))
|
|
698
|
+
optimizer = cmaes.SepCMA(mean=guess, sigma=np.mean(sdevs), bounds=bds, seed=seed, population_size=self.popsize)
|
|
699
|
+
best_y = np.inf
|
|
700
|
+
evals = 0
|
|
701
|
+
fun = serial(fun) if (self.workers is None or self.workers <= 1) else parallel(fun, self.workers)
|
|
702
|
+
while evals < self.max_evaluations and not optimizer.should_stop():
|
|
703
|
+
xs = [optimizer.ask() for _ in range(optimizer.population_size)]
|
|
704
|
+
ys = fun(xs)
|
|
705
|
+
solutions = []
|
|
706
|
+
for i in range(optimizer.population_size):
|
|
707
|
+
x = xs[i]
|
|
708
|
+
y = ys[i]
|
|
709
|
+
solutions.append((x, y))
|
|
710
|
+
if y < best_y:
|
|
711
|
+
best_y = y
|
|
712
|
+
best_x = x
|
|
713
|
+
optimizer.tell(solutions)
|
|
714
|
+
evals += optimizer.population_size
|
|
715
|
+
if isinstance(fun, parallel):
|
|
716
|
+
fun.stop()
|
|
717
|
+
return best_x, best_y, evals
|
|
718
|
+
|
|
322
719
|
class De_cpp(Optimizer):
|
|
323
720
|
"""Differential Evolution C++ implementation."""
|
|
324
721
|
|
|
325
|
-
def __init__(self,
|
|
326
|
-
|
|
327
|
-
|
|
722
|
+
def __init__(self,
|
|
723
|
+
max_evaluations: Optional[int] = 50000,
|
|
724
|
+
popsize: Optional[int] = None,
|
|
725
|
+
guess: Optional[ArrayLike] = None,
|
|
726
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
727
|
+
keep: Optional[int] = 200,
|
|
728
|
+
f: Optional[float] = 0.5,
|
|
729
|
+
cr: Optional[float] = 0.9,
|
|
730
|
+
ints: Optional[ArrayLike] = None,
|
|
731
|
+
workers: Optional[int] = None):
|
|
732
|
+
|
|
328
733
|
Optimizer.__init__(self, max_evaluations, 'de cpp')
|
|
329
734
|
self.popsize = popsize
|
|
735
|
+
self.guess = guess
|
|
330
736
|
self.stop_fitness = stop_fitness
|
|
331
737
|
self.keep = keep
|
|
332
738
|
self.f = f
|
|
333
739
|
self.cr = cr
|
|
740
|
+
self.ints = ints
|
|
741
|
+
self.workers = workers
|
|
334
742
|
|
|
335
|
-
def minimize(self,
|
|
743
|
+
def minimize(self,
|
|
744
|
+
fun: Callable[[ArrayLike], float],
|
|
745
|
+
bounds: Optional[Bounds],
|
|
746
|
+
guess: Optional[ArrayLike] = None,
|
|
747
|
+
sdevs: Optional[float] = None, # ignored
|
|
748
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
749
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
750
|
+
|
|
751
|
+
if guess is None:
|
|
752
|
+
guess = self.guess
|
|
753
|
+
|
|
336
754
|
ret = decpp.minimize(fun, None, bounds,
|
|
337
755
|
popsize=self.popsize,
|
|
338
756
|
max_evaluations = self.max_eval_num(store),
|
|
339
757
|
stop_fitness = self.stop_fitness,
|
|
340
|
-
keep = self.keep, f = self.f, cr = self.cr,
|
|
341
|
-
rg=rg, runid = self.get_count_runs(store)
|
|
758
|
+
keep = self.keep, f = self.f, cr = self.cr, ints=self.ints,
|
|
759
|
+
rg=rg, runid = self.get_count_runs(store),
|
|
760
|
+
workers = self.workers, x0 = guess)
|
|
342
761
|
return ret.x, ret.fun, ret.nfev
|
|
343
762
|
|
|
344
763
|
class De_python(Optimizer):
|
|
345
764
|
"""Differential Evolution Python implementation."""
|
|
346
765
|
|
|
347
|
-
def __init__(self,
|
|
348
|
-
|
|
349
|
-
|
|
766
|
+
def __init__(self,
|
|
767
|
+
max_evaluations: Optional[int] = 50000,
|
|
768
|
+
popsize: Optional[int] = None,
|
|
769
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
770
|
+
keep: Optional[int] = 200,
|
|
771
|
+
f: Optional[float] = 0.5,
|
|
772
|
+
cr: Optional[float] = 0.9,
|
|
773
|
+
ints: Optional[ArrayLike] = None,
|
|
774
|
+
workers: Optional[int] = None):
|
|
775
|
+
|
|
350
776
|
Optimizer.__init__(self, max_evaluations, 'de py')
|
|
351
777
|
self.popsize = popsize
|
|
352
778
|
self.stop_fitness = stop_fitness
|
|
353
779
|
self.keep = keep
|
|
354
780
|
self.f = f
|
|
355
781
|
self.cr = cr
|
|
782
|
+
self.ints = ints
|
|
783
|
+
self.workers = workers
|
|
356
784
|
|
|
357
|
-
def minimize(self,
|
|
785
|
+
def minimize(self,
|
|
786
|
+
fun: Callable[[ArrayLike], float],
|
|
787
|
+
bounds: Optional[Bounds],
|
|
788
|
+
guess: Optional[ArrayLike] = None,
|
|
789
|
+
sdevs: Optional[float] = None, # ignored
|
|
790
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
791
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
792
|
+
|
|
358
793
|
ret = de.minimize(fun, None,
|
|
359
|
-
bounds, self.popsize, self.max_eval_num(store),
|
|
794
|
+
bounds, self.popsize, self.max_eval_num(store),
|
|
360
795
|
stop_fitness = self.stop_fitness,
|
|
361
|
-
keep = self.keep, f = self.f, cr = self.cr,
|
|
362
|
-
rg=rg)
|
|
796
|
+
keep = self.keep, f = self.f, cr = self.cr, ints=self.ints,
|
|
797
|
+
rg=rg, workers = self.workers)
|
|
363
798
|
return ret.x, ret.fun, ret.nfev
|
|
364
799
|
|
|
365
800
|
class Cma_ask_tell(Optimizer):
|
|
366
801
|
"""CMA ask tell implementation."""
|
|
367
802
|
|
|
368
803
|
def __init__(self, max_evaluations=50000,
|
|
369
|
-
popsize = 31, guess=None, stop_fitness =
|
|
804
|
+
popsize = 31, guess=None, stop_fitness = -np.inf, sdevs = None):
|
|
370
805
|
Optimizer.__init__(self, max_evaluations, 'cma at')
|
|
371
806
|
self.popsize = popsize
|
|
372
807
|
self.stop_fitness = stop_fitness
|
|
373
808
|
self.guess = guess
|
|
374
809
|
self.sdevs = sdevs
|
|
375
810
|
|
|
376
|
-
def minimize(self,
|
|
811
|
+
def minimize(self,
|
|
812
|
+
fun: Callable[[ArrayLike], float],
|
|
813
|
+
bounds: Optional[Bounds],
|
|
814
|
+
guess: Optional[ArrayLike] = None,
|
|
815
|
+
sdevs: Optional[float] = None, # ignored
|
|
816
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
817
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
818
|
+
|
|
377
819
|
es = cmaes.Cmaes(bounds,
|
|
378
820
|
popsize = self.popsize,
|
|
379
821
|
input_sigma = self.sdevs if not self.sdevs is None else sdevs,
|
|
@@ -392,9 +834,13 @@ class Cma_ask_tell(Optimizer):
|
|
|
392
834
|
class De_ask_tell(Optimizer):
|
|
393
835
|
"""Differential Evolution ask tell implementation."""
|
|
394
836
|
|
|
395
|
-
def __init__(self,
|
|
396
|
-
|
|
397
|
-
|
|
837
|
+
def __init__(self,
|
|
838
|
+
max_evaluations: Optional[int] = 50000,
|
|
839
|
+
popsize: Optional[int] = None,
|
|
840
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
841
|
+
keep: Optional[int] = 200,
|
|
842
|
+
f: Optional[float] = 0.5,
|
|
843
|
+
cr: Optional[float] = 0.9):
|
|
398
844
|
Optimizer.__init__(self, max_evaluations, 'de at')
|
|
399
845
|
self.popsize = popsize
|
|
400
846
|
self.stop_fitness = stop_fitness
|
|
@@ -402,7 +848,14 @@ class De_ask_tell(Optimizer):
|
|
|
402
848
|
self.f = f
|
|
403
849
|
self.cr = cr
|
|
404
850
|
|
|
405
|
-
def minimize(self,
|
|
851
|
+
def minimize(self,
|
|
852
|
+
fun: Callable[[ArrayLike], float],
|
|
853
|
+
bounds: Optional[Bounds],
|
|
854
|
+
guess: Optional[ArrayLike] = None,
|
|
855
|
+
sdevs: Optional[float] = None, # ignored
|
|
856
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
857
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
858
|
+
|
|
406
859
|
dim = len(bounds.lb)
|
|
407
860
|
popsize = 31 if self.popsize is None else self.popsize
|
|
408
861
|
es = de.DE(dim, bounds, popsize = popsize, rg = rg, keep = self.keep, F = self.f, Cr = self.cr)
|
|
@@ -422,7 +875,14 @@ class random_search(Optimizer):
|
|
|
422
875
|
def __init__(self, max_evaluations=50000):
|
|
423
876
|
Optimizer.__init__(self, max_evaluations, 'random')
|
|
424
877
|
|
|
425
|
-
def minimize(self,
|
|
878
|
+
def minimize(self,
|
|
879
|
+
fun: Callable[[ArrayLike], float],
|
|
880
|
+
bounds: Optional[Bounds],
|
|
881
|
+
guess: Optional[ArrayLike] = None,
|
|
882
|
+
sdevs: Optional[float] = None, # ignored
|
|
883
|
+
rg: Optional[Generator] = Generator(PCG64DXSM()),
|
|
884
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
885
|
+
|
|
426
886
|
dim, x_min, y_min = len(bounds.lb), None, None
|
|
427
887
|
max_chunk_size = 1 + 4e4 / dim
|
|
428
888
|
evals = self.max_eval_num(store)
|
|
@@ -437,98 +897,29 @@ class random_search(Optimizer):
|
|
|
437
897
|
budget -= chunk
|
|
438
898
|
return x_min, y_min, evals
|
|
439
899
|
|
|
440
|
-
class LDe_cpp(Optimizer):
|
|
441
|
-
"""Local Differential Evolution C++ implementation."""
|
|
442
|
-
|
|
443
|
-
def __init__(self, max_evaluations=50000,
|
|
444
|
-
popsize = None, stop_fitness = None,
|
|
445
|
-
keep = 200, f = 0.5, cr = 0.9, guess = None, sdevs = None):
|
|
446
|
-
Optimizer.__init__(self, max_evaluations, 'lde cpp')
|
|
447
|
-
self.popsize = popsize
|
|
448
|
-
self.stop_fitness = stop_fitness
|
|
449
|
-
self.keep = keep
|
|
450
|
-
self.f = f
|
|
451
|
-
self.cr = cr
|
|
452
|
-
self.guess = guess
|
|
453
|
-
self.sdevs = sdevs
|
|
454
|
-
|
|
455
|
-
def minimize(self, fun, bounds, guess=None, sdevs=0.3, rg=Generator(MT19937()), store=None):
|
|
456
|
-
ret = ldecpp.minimize(fun, bounds,
|
|
457
|
-
self.guess if not self.guess is None else guess,
|
|
458
|
-
self.sdevs if not self.sdevs is None else sdevs,
|
|
459
|
-
popsize=self.popsize,
|
|
460
|
-
max_evaluations = self.max_eval_num(store),
|
|
461
|
-
stop_fitness = self.stop_fitness,
|
|
462
|
-
keep = self.keep, f = self.f, cr = self.cr,
|
|
463
|
-
rg=rg, runid = self.get_count_runs(store))
|
|
464
|
-
return ret.x, ret.fun, ret.nfev
|
|
465
|
-
|
|
466
|
-
class GCLDE_cpp(Optimizer):
|
|
467
|
-
"""GCL-Differential Evolution C++ implementation."""
|
|
468
|
-
|
|
469
|
-
def __init__(self, max_evaluations=50000,
|
|
470
|
-
popsize = None, stop_fitness = None,
|
|
471
|
-
pbest = 0.7, f0 = 0.0, cr0 = 0.0, workers = None):
|
|
472
|
-
Optimizer.__init__(self, max_evaluations, 'gclde cpp')
|
|
473
|
-
self.popsize = popsize
|
|
474
|
-
self.stop_fitness = stop_fitness
|
|
475
|
-
self.pbest = pbest
|
|
476
|
-
self.f0 = f0
|
|
477
|
-
self.cr0 = cr0
|
|
478
|
-
self.workers = workers
|
|
479
|
-
|
|
480
|
-
def minimize(self, fun, bounds, guess=None, sdevs=None, rg=Generator(MT19937()),
|
|
481
|
-
store=None, workers = None):
|
|
482
|
-
ret = gcldecpp.minimize(fun, None, bounds,
|
|
483
|
-
popsize=self.popsize,
|
|
484
|
-
max_evaluations = self.max_eval_num(store),
|
|
485
|
-
stop_fitness = self.stop_fitness,
|
|
486
|
-
pbest = self.pbest, f0 = self.f0, cr0 = self.cr0,
|
|
487
|
-
rg=rg, runid = self.get_count_runs(store),
|
|
488
|
-
workers = self.workers if workers is None else workers)
|
|
489
|
-
return ret.x, ret.fun, ret.nfev
|
|
490
|
-
|
|
491
|
-
class LCLDE_cpp(Optimizer):
|
|
492
|
-
"""LCL-Differential Evolution C++ implementation."""
|
|
493
|
-
|
|
494
|
-
def __init__(self, max_evaluations=50000,
|
|
495
|
-
popsize = None, stop_fitness = None,
|
|
496
|
-
pbest = 0.7, f0 = 0.0, cr0 = 0.0, workers = None, guess = None, sdevs = None):
|
|
497
|
-
Optimizer.__init__(self, max_evaluations, 'lclde cpp')
|
|
498
|
-
self.popsize = popsize
|
|
499
|
-
self.stop_fitness = stop_fitness
|
|
500
|
-
self.pbest = pbest
|
|
501
|
-
self.f0 = f0
|
|
502
|
-
self.cr0 = cr0
|
|
503
|
-
self.workers = workers
|
|
504
|
-
self.guess = guess
|
|
505
|
-
self.sdevs = sdevs
|
|
506
|
-
|
|
507
|
-
def minimize(self, fun, bounds, guess=None, sdevs=0.3, rg=Generator(MT19937()),
|
|
508
|
-
store=None, workers = None):
|
|
509
|
-
ret = lcldecpp.minimize(fun, bounds,
|
|
510
|
-
self.guess if not self.guess is None else guess,
|
|
511
|
-
self.sdevs if not self.sdevs is None else sdevs,
|
|
512
|
-
popsize=self.popsize,
|
|
513
|
-
max_evaluations = self.max_eval_num(store),
|
|
514
|
-
stop_fitness = self.stop_fitness,
|
|
515
|
-
pbest = self.pbest, f0 = self.f0, cr0 = self.cr0,
|
|
516
|
-
rg=rg, runid = self.get_count_runs(store),
|
|
517
|
-
workers = self.workers if workers is None else workers)
|
|
518
|
-
|
|
519
|
-
return ret.x, ret.fun, ret.nfev
|
|
520
900
|
|
|
521
901
|
class Da_cpp(Optimizer):
|
|
522
902
|
"""Dual Annealing C++ implementation."""
|
|
523
903
|
|
|
524
|
-
def __init__(self,
|
|
525
|
-
|
|
904
|
+
def __init__(self,
|
|
905
|
+
max_evaluations: Optional[int] = 50000,
|
|
906
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
907
|
+
use_local_search: Optional[bool] = True,
|
|
908
|
+
guess: Optional[ArrayLike] = None):
|
|
909
|
+
|
|
526
910
|
Optimizer.__init__(self, max_evaluations, 'da cpp',)
|
|
527
911
|
self.stop_fitness = stop_fitness
|
|
528
912
|
self.use_local_search = use_local_search
|
|
529
913
|
self.guess = guess
|
|
530
914
|
|
|
531
|
-
def minimize(self,
|
|
915
|
+
def minimize(self,
|
|
916
|
+
fun: Callable[[ArrayLike], float],
|
|
917
|
+
bounds: Optional[Bounds],
|
|
918
|
+
guess: Optional[ArrayLike] = None,
|
|
919
|
+
sdevs: Optional[float] = None, # ignored
|
|
920
|
+
rg=Generator(PCG64DXSM()),
|
|
921
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
922
|
+
|
|
532
923
|
ret = dacpp.minimize(fun, bounds,
|
|
533
924
|
self.guess if guess is None else guess,
|
|
534
925
|
max_evaluations = self.max_eval_num(store),
|
|
@@ -536,60 +927,57 @@ class Da_cpp(Optimizer):
|
|
|
536
927
|
rg=rg, runid = self.get_count_runs(store))
|
|
537
928
|
return ret.x, ret.fun, ret.nfev
|
|
538
929
|
|
|
539
|
-
class Csma_cpp(Optimizer):
|
|
540
|
-
"""SCMA C++ implementation."""
|
|
541
|
-
|
|
542
|
-
def __init__(self, max_evaluations=50000,
|
|
543
|
-
popsize = None, guess=None, stop_fitness = None, workers = None, sdevs = None):
|
|
544
|
-
Optimizer.__init__(self, max_evaluations, 'scma cpp')
|
|
545
|
-
self.popsize = popsize
|
|
546
|
-
self.stop_fitness = stop_fitness
|
|
547
|
-
self.guess = guess
|
|
548
|
-
self.workers = workers
|
|
549
|
-
self.sdevs = sdevs
|
|
550
|
-
|
|
551
|
-
def minimize(self, fun, bounds, guess=None, sdevs=0.16, rg=Generator(MT19937()),
|
|
552
|
-
store=None, workers = None):
|
|
553
|
-
ret = csmacpp.minimize(fun, bounds,
|
|
554
|
-
self.guess if guess is None else guess,
|
|
555
|
-
self.sdevs if not self.sdevs is None else sdevs,
|
|
556
|
-
max_evaluations = self.max_eval_num(store),
|
|
557
|
-
stop_fitness = self.stop_fitness,
|
|
558
|
-
rg=rg, runid = self.get_count_runs(store))
|
|
559
|
-
return ret.x, ret.fun, ret.nfev
|
|
560
|
-
|
|
561
930
|
class Bite_cpp(Optimizer):
|
|
562
931
|
"""Bite C++ implementation."""
|
|
563
932
|
|
|
564
|
-
def __init__(self,
|
|
565
|
-
|
|
933
|
+
def __init__(self,
|
|
934
|
+
max_evaluations: Optional[int] = 50000,
|
|
935
|
+
guess: Optional[ArrayLike] = None,
|
|
936
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
937
|
+
M: Optional[int] = None,
|
|
938
|
+
popsize: Optional[int] = None,
|
|
939
|
+
stall_criterion: Optional[int] = None):
|
|
940
|
+
|
|
566
941
|
Optimizer.__init__(self, max_evaluations, 'bite cpp')
|
|
567
|
-
self.popsize = popsize
|
|
568
|
-
self.stop_fitness = stop_fitness
|
|
569
942
|
self.guess = guess
|
|
570
|
-
self.
|
|
571
|
-
self.
|
|
943
|
+
self.stop_fitness = stop_fitness
|
|
944
|
+
self.M = 1 if M is None else M
|
|
945
|
+
self.popsize = 0 if popsize is None else popsize
|
|
946
|
+
self.stall_criterion = 0 if stall_criterion is None else stall_criterion
|
|
947
|
+
|
|
948
|
+
def minimize(self,
|
|
949
|
+
fun: Callable[[ArrayLike], float],
|
|
950
|
+
bounds: Optional[Bounds],
|
|
951
|
+
guess: Optional[ArrayLike] = None,
|
|
952
|
+
sdevs: Optional[float] = None, # ignored
|
|
953
|
+
rg=Generator(PCG64DXSM()),
|
|
954
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
572
955
|
|
|
573
|
-
def minimize(self, fun, bounds, guess=None, sdevs=None, rg=Generator(MT19937()),
|
|
574
|
-
store=None, workers = None):
|
|
575
|
-
|
|
576
956
|
ret = bitecpp.minimize(fun, bounds,
|
|
577
957
|
self.guess if guess is None else guess,
|
|
578
958
|
max_evaluations = self.max_eval_num(store),
|
|
579
|
-
popsize=self.popsize,
|
|
580
|
-
|
|
959
|
+
stop_fitness = self.stop_fitness, M = self.M, popsize = self.popsize,
|
|
960
|
+
stall_criterion = self.stall_criterion,
|
|
581
961
|
rg=rg, runid = self.get_count_runs(store))
|
|
582
962
|
return ret.x, ret.fun, ret.nfev
|
|
583
963
|
|
|
584
964
|
class Dual_annealing(Optimizer):
|
|
585
965
|
"""scipy dual_annealing."""
|
|
586
966
|
|
|
587
|
-
def __init__(self,
|
|
588
|
-
|
|
967
|
+
def __init__(self,
|
|
968
|
+
max_evaluations: Optional[int] = 50000,
|
|
969
|
+
use_local_search: Optional[bool] = True):
|
|
970
|
+
|
|
589
971
|
Optimizer.__init__(self, max_evaluations, 'scipy da')
|
|
590
972
|
self.no_local_search = not use_local_search
|
|
591
973
|
|
|
592
|
-
def minimize(self,
|
|
974
|
+
def minimize(self,
|
|
975
|
+
fun: Callable[[ArrayLike], float],
|
|
976
|
+
bounds: Optional[Bounds],
|
|
977
|
+
guess: Optional[ArrayLike] = None,
|
|
978
|
+
sdevs: Optional[float] = None, # ignored
|
|
979
|
+
rg=Generator(PCG64DXSM()),
|
|
980
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
593
981
|
ret = dual_annealing(fun, bounds=list(zip(bounds.lb, bounds.ub)),
|
|
594
982
|
maxfun = self.max_eval_num(store),
|
|
595
983
|
no_local_search = self.no_local_search,
|
|
@@ -600,12 +988,21 @@ class Dual_annealing(Optimizer):
|
|
|
600
988
|
class Differential_evolution(Optimizer):
|
|
601
989
|
"""scipy differential_evolution."""
|
|
602
990
|
|
|
603
|
-
def __init__(self,
|
|
604
|
-
|
|
991
|
+
def __init__(self,
|
|
992
|
+
max_evaluations: Optional[int] = 50000,
|
|
993
|
+
popsize: Optional[int] = 31):
|
|
994
|
+
|
|
605
995
|
Optimizer.__init__(self, max_evaluations, 'scipy de')
|
|
606
996
|
self.popsize = popsize
|
|
607
997
|
|
|
608
|
-
def minimize(self,
|
|
998
|
+
def minimize(self,
|
|
999
|
+
fun: Callable[[ArrayLike], float],
|
|
1000
|
+
bounds: Optional[Bounds],
|
|
1001
|
+
guess: Optional[ArrayLike] = None,
|
|
1002
|
+
sdevs: Optional[float] = None, # ignored
|
|
1003
|
+
rg=Generator(PCG64DXSM()),
|
|
1004
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
1005
|
+
|
|
609
1006
|
popsize = self.popsize
|
|
610
1007
|
maxiter = int(self.max_eval_num(store) / (popsize * len(bounds.lb)) - 1)
|
|
611
1008
|
ret = differential_evolution(fun, bounds=bounds, maxiter=maxiter,
|
|
@@ -614,7 +1011,7 @@ class Differential_evolution(Optimizer):
|
|
|
614
1011
|
|
|
615
1012
|
class CheckBounds(object):
|
|
616
1013
|
|
|
617
|
-
def __init__(self, bounds):
|
|
1014
|
+
def __init__(self, bounds: Bounds):
|
|
618
1015
|
self.bounds = bounds
|
|
619
1016
|
|
|
620
1017
|
def __call__(self, **kwargs):
|
|
@@ -629,7 +1026,13 @@ class Basin_hopping(Optimizer):
|
|
|
629
1026
|
def __init__(self, max_evaluations=50000, store=None):
|
|
630
1027
|
Optimizer.__init__(self, max_evaluations, 'scipy basin hopping')
|
|
631
1028
|
|
|
632
|
-
def minimize(self,
|
|
1029
|
+
def minimize(self,
|
|
1030
|
+
fun: Callable[[ArrayLike], float],
|
|
1031
|
+
bounds: Optional[Bounds],
|
|
1032
|
+
guess: Optional[ArrayLike] = None,
|
|
1033
|
+
sdevs: Optional[float] = None, # ignored
|
|
1034
|
+
rg=Generator(PCG64DXSM()),
|
|
1035
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
633
1036
|
localevals = 200
|
|
634
1037
|
maxiter = int(self.max_eval_num(store) / localevals)
|
|
635
1038
|
if guess is None:
|
|
@@ -648,7 +1051,14 @@ class Minimize(Optimizer):
|
|
|
648
1051
|
def __init__(self, max_evaluations=50000, store=None):
|
|
649
1052
|
Optimizer.__init__(self, max_evaluations, 'scipy minimize')
|
|
650
1053
|
|
|
651
|
-
def minimize(self,
|
|
1054
|
+
def minimize(self,
|
|
1055
|
+
fun: Callable[[ArrayLike], float],
|
|
1056
|
+
bounds: Optional[Bounds],
|
|
1057
|
+
guess: Optional[ArrayLike] = None,
|
|
1058
|
+
sdevs: Optional[float] = None, # ignored
|
|
1059
|
+
rg=Generator(PCG64DXSM()),
|
|
1060
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
1061
|
+
|
|
652
1062
|
if guess is None:
|
|
653
1063
|
guess = rg.uniform(bounds.lb, bounds.ub)
|
|
654
1064
|
ret = minimize(fun, x0=guess, bounds=bounds)
|
|
@@ -660,7 +1070,14 @@ class Shgo(Optimizer):
|
|
|
660
1070
|
def __init__(self, max_evaluations=50000, store=None):
|
|
661
1071
|
Optimizer.__init__(self, max_evaluations, 'scipy shgo')
|
|
662
1072
|
|
|
663
|
-
def minimize(self,
|
|
1073
|
+
def minimize(self,
|
|
1074
|
+
fun: Callable[[ArrayLike], float],
|
|
1075
|
+
bounds: Optional[Bounds],
|
|
1076
|
+
guess: Optional[ArrayLike] = None,
|
|
1077
|
+
sdevs: Optional[float] = None, # ignored
|
|
1078
|
+
rg=Generator(PCG64DXSM()),
|
|
1079
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
1080
|
+
|
|
664
1081
|
ret = shgo(fun, bounds=list(zip(bounds.lb, bounds.ub)),
|
|
665
1082
|
options={'maxfev': self.max_eval_num(store)})
|
|
666
1083
|
return ret.x, ret.fun, ret.nfev
|
|
@@ -688,7 +1105,14 @@ class NLopt(Optimizer):
|
|
|
688
1105
|
Optimizer.__init__(self, max_evaluations, 'NLopt ' + algo.get_algorithm_name())
|
|
689
1106
|
self.algo = algo
|
|
690
1107
|
|
|
691
|
-
def minimize(self,
|
|
1108
|
+
def minimize(self,
|
|
1109
|
+
fun: Callable[[ArrayLike], float],
|
|
1110
|
+
bounds: Optional[Bounds],
|
|
1111
|
+
guess: Optional[ArrayLike] = None,
|
|
1112
|
+
sdevs: Optional[float] = None, # ignored
|
|
1113
|
+
rg=Generator(PCG64DXSM()),
|
|
1114
|
+
store = None) -> Tuple[np.ndarray, float, int]:
|
|
1115
|
+
|
|
692
1116
|
self.fun = fun
|
|
693
1117
|
opt = self.algo
|
|
694
1118
|
opt.set_min_objective(self.nlfunc)
|