fcmaes 1.1.3__py3-none-any.whl → 1.6.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fcmaes/__init__.py +12 -2
- fcmaes/advretry.py +217 -159
- fcmaes/astro.py +143 -27
- fcmaes/bitecpp.py +107 -0
- fcmaes/cmaes.py +204 -173
- fcmaes/cmaescpp.py +253 -87
- fcmaes/crfmnes.py +339 -0
- fcmaes/crfmnescpp.py +273 -0
- fcmaes/dacpp.py +39 -51
- fcmaes/de.py +472 -0
- fcmaes/decpp.py +222 -64
- fcmaes/diversifier.py +357 -0
- fcmaes/evaluator.py +297 -14
- fcmaes/lib/libacmalib.dll +0 -0
- fcmaes/lib/libacmalib.dylib +0 -0
- fcmaes/lib/libacmalib.so +0 -0
- fcmaes/lib/libhbv.so +0 -0
- fcmaes/lib/liblrgv.so +0 -0
- fcmaes/lib/librw_top_trumps.dll +0 -0
- fcmaes/lib/librw_top_trumps.so +0 -0
- fcmaes/mapelites.py +737 -0
- fcmaes/mode.py +719 -0
- fcmaes/modecpp.py +470 -0
- fcmaes/moretry.py +270 -0
- fcmaes/multiretry.py +195 -0
- fcmaes/optimizer.py +883 -112
- fcmaes/pgpecpp.py +340 -0
- fcmaes/pygmoretry.py +10 -19
- fcmaes/retry.py +248 -121
- fcmaes/test_cma.py +207 -30
- fcmaes/testfun.py +38 -1
- {fcmaes-1.1.3.dist-info → fcmaes-1.6.9.dist-info}/METADATA +22 -12
- fcmaes-1.6.9.dist-info/RECORD +36 -0
- {fcmaes-1.1.3.dist-info → fcmaes-1.6.9.dist-info}/WHEEL +1 -1
- fcmaes/hhcpp.py +0 -114
- fcmaes/lib/libgtoplib.dll +0 -0
- fcmaes/lib/libgtoplib.so +0 -0
- fcmaes-1.1.3.dist-info/RECORD +0 -23
- {fcmaes-1.1.3.dist-info → fcmaes-1.6.9.dist-info}/LICENSE +0 -0
- {fcmaes-1.1.3.dist-info → fcmaes-1.6.9.dist-info}/top_level.txt +0 -0
fcmaes/__init__.py
CHANGED
|
@@ -3,16 +3,26 @@
|
|
|
3
3
|
# This source code is licensed under the MIT license found in the
|
|
4
4
|
# LICENSE file in the root directory.
|
|
5
5
|
|
|
6
|
-
__version__ = '1.
|
|
6
|
+
__version__ = '1.6.9'
|
|
7
7
|
|
|
8
8
|
__all__ = [
|
|
9
9
|
'cmaes',
|
|
10
10
|
'cmaescpp',
|
|
11
|
+
'crmfnes',
|
|
12
|
+
'crfmnescpp',
|
|
13
|
+
'de',
|
|
11
14
|
'dacpp',
|
|
12
15
|
'decpp',
|
|
13
|
-
'
|
|
16
|
+
'diversifier',
|
|
17
|
+
'bitecpp',
|
|
18
|
+
'csmacpp',
|
|
14
19
|
'retry',
|
|
15
20
|
'advretry',
|
|
21
|
+
'mapelites',
|
|
22
|
+
'multiretry',
|
|
23
|
+
'mode',
|
|
24
|
+
'modecpp',
|
|
25
|
+
'moretry',
|
|
16
26
|
'pygmoretry',
|
|
17
27
|
'optimizer',
|
|
18
28
|
'astro',
|
fcmaes/advretry.py
CHANGED
|
@@ -3,52 +3,58 @@
|
|
|
3
3
|
# This source code is licensed under the MIT license found in the
|
|
4
4
|
# LICENSE file in the root directory.
|
|
5
5
|
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
6
9
|
import time
|
|
7
10
|
import os
|
|
8
|
-
import sys
|
|
9
11
|
import math
|
|
10
|
-
import
|
|
12
|
+
import threadpoolctl
|
|
13
|
+
import _pickle as cPickle
|
|
14
|
+
import bz2
|
|
11
15
|
import ctypes as ct
|
|
12
16
|
import numpy as np
|
|
13
17
|
from numpy.linalg import norm
|
|
14
|
-
|
|
18
|
+
import random
|
|
15
19
|
import multiprocessing as mp
|
|
16
20
|
from multiprocessing import Process
|
|
17
|
-
from numpy.random import Generator,
|
|
21
|
+
from numpy.random import Generator, PCG64DXSM, SeedSequence
|
|
18
22
|
from scipy.optimize import OptimizeResult, Bounds
|
|
23
|
+
from loguru import logger
|
|
24
|
+
from fcmaes.retry import _convertBounds, plot, Shared2d
|
|
25
|
+
from fcmaes.optimizer import Optimizer, dtime, fitting, de_cma
|
|
19
26
|
|
|
20
|
-
from
|
|
21
|
-
from
|
|
27
|
+
from typing import Optional, Callable, List, Tuple
|
|
28
|
+
from numpy.typing import ArrayLike
|
|
22
29
|
|
|
23
30
|
os.environ['MKL_DEBUG_CPU_TYPE'] = '5'
|
|
24
31
|
os.environ['MKL_NUM_THREADS'] = '1'
|
|
25
32
|
os.environ['OPENBLAS_NUM_THREADS'] = '1'
|
|
26
33
|
|
|
27
|
-
def minimize(fun,
|
|
28
|
-
bounds,
|
|
29
|
-
value_limit =
|
|
30
|
-
num_retries = 5000,
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
34
|
+
def minimize(fun: Callable[[ArrayLike], float],
|
|
35
|
+
bounds: Bounds,
|
|
36
|
+
value_limit: Optional[float] = np.inf,
|
|
37
|
+
num_retries: Optional[int] = 5000,
|
|
38
|
+
workers: Optional[int] = mp.cpu_count(),
|
|
39
|
+
popsize: Optional[int] = 31,
|
|
40
|
+
min_evaluations: Optional[int] = 1500,
|
|
41
|
+
max_eval_fac: Optional[int] = None,
|
|
42
|
+
check_interval: Optional[int] = 100,
|
|
43
|
+
capacity: Optional[int] = 500,
|
|
44
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
45
|
+
optimizer: Optional[Optimizer] = None,
|
|
46
|
+
statistic_num: Optional[int] = 0,
|
|
47
|
+
datafile: Optional[str] = None
|
|
48
|
+
) -> OptimizeResult:
|
|
41
49
|
"""Minimization of a scalar function of one or more variables using
|
|
42
|
-
|
|
50
|
+
smart parallel optimization retry.
|
|
43
51
|
|
|
44
52
|
Parameters
|
|
45
53
|
----------
|
|
46
54
|
fun : callable
|
|
47
55
|
The objective function to be minimized.
|
|
48
|
-
``fun(x
|
|
49
|
-
where ``x`` is an 1-D array with shape (n,)
|
|
50
|
-
is a tuple of the fixed parameters needed to completely
|
|
51
|
-
specify the function.
|
|
56
|
+
``fun(x) -> float``
|
|
57
|
+
where ``x`` is an 1-D array with shape (n,)
|
|
52
58
|
bounds : sequence or `Bounds`, optional
|
|
53
59
|
Bounds on variables. There are two ways to specify the bounds:
|
|
54
60
|
1. Instance of the `scipy.Bounds` class.
|
|
@@ -62,10 +68,6 @@ def minimize(fun,
|
|
|
62
68
|
cause the algorithm to get stuck at local minima.
|
|
63
69
|
num_retries : int, optional
|
|
64
70
|
Number of optimization retries.
|
|
65
|
-
logger : logger, optional
|
|
66
|
-
logger for log output of the retry mechanism. If None, logging
|
|
67
|
-
is switched off. Default is a logger which logs both to stdout and
|
|
68
|
-
appends to a file ``optimizer.log``.
|
|
69
71
|
workers : int, optional
|
|
70
72
|
number of parallel processes used. Default is mp.cpu_count()
|
|
71
73
|
popsize = int, optional
|
|
@@ -82,12 +84,14 @@ def minimize(fun,
|
|
|
82
84
|
is incremented by ``evals_step_size``
|
|
83
85
|
capacity : int, optional
|
|
84
86
|
capacity of the evaluation store. Higher value means broader search.
|
|
85
|
-
|
|
87
|
+
stop_fitness : float, optional
|
|
86
88
|
Limit for fitness value. optimization runs terminate if this value is reached.
|
|
87
89
|
optimizer : optimizer.Optimizer, optional
|
|
88
90
|
optimizer to use. Default is a sequence of differential evolution and CMA-ES.
|
|
89
91
|
Since advanced retry sets the initial step size it works best if CMA-ES is
|
|
90
92
|
used / in the sequence of optimizers.
|
|
93
|
+
datafile, optional
|
|
94
|
+
file to persist / retrieve the internal state of the optimizations.
|
|
91
95
|
|
|
92
96
|
Returns
|
|
93
97
|
-------
|
|
@@ -98,40 +102,78 @@ def minimize(fun,
|
|
|
98
102
|
``success`` a Boolean flag indicating if the optimizer exited successfully. """
|
|
99
103
|
|
|
100
104
|
if optimizer is None:
|
|
101
|
-
optimizer = de_cma(min_evaluations, popsize,
|
|
105
|
+
optimizer = de_cma(min_evaluations, popsize, stop_fitness)
|
|
102
106
|
if max_eval_fac is None:
|
|
103
107
|
max_eval_fac = int(min(50, 1 + num_retries // check_interval))
|
|
104
|
-
store = Store(bounds, max_eval_fac, check_interval, capacity,
|
|
105
|
-
|
|
108
|
+
store = Store(fun, bounds, max_eval_fac, check_interval, capacity, num_retries,
|
|
109
|
+
statistic_num, datafile)
|
|
110
|
+
if not datafile is None:
|
|
111
|
+
try:
|
|
112
|
+
store.load(datafile)
|
|
113
|
+
except:
|
|
114
|
+
pass
|
|
115
|
+
return retry(store, optimizer.minimize, value_limit, workers, stop_fitness)
|
|
106
116
|
|
|
107
|
-
def retry(
|
|
117
|
+
def retry(store: Store,
|
|
118
|
+
optimize: Callable,
|
|
119
|
+
value_limit:Optional[float] = np.inf,
|
|
120
|
+
workers=mp.cpu_count(),
|
|
121
|
+
stop_fitness = -np.inf) -> OptimizeResult:
|
|
122
|
+
|
|
108
123
|
sg = SeedSequence()
|
|
109
|
-
rgs = [Generator(
|
|
124
|
+
rgs = [Generator(PCG64DXSM(s)) for s in sg.spawn(workers)]
|
|
110
125
|
proc=[Process(target=_retry_loop,
|
|
111
|
-
args=(pid, rgs,
|
|
126
|
+
args=(pid, rgs, store, optimize, value_limit, stop_fitness)) for pid in range(workers)]
|
|
112
127
|
[p.start() for p in proc]
|
|
113
128
|
[p.join() for p in proc]
|
|
114
129
|
store.sort()
|
|
115
130
|
store.dump()
|
|
116
131
|
return OptimizeResult(x=store.get_x_best(), fun=store.get_y_best(),
|
|
117
132
|
nfev=store.get_count_evals(), success=True)
|
|
133
|
+
|
|
134
|
+
def minimize_plot(name: str,
|
|
135
|
+
optimizer: Optimizer,
|
|
136
|
+
fun: Callable[[ArrayLike], float],
|
|
137
|
+
bounds: Bounds,
|
|
138
|
+
value_limit: Optional[float] = np.inf,
|
|
139
|
+
plot_limit: Optional[float] = np.inf,
|
|
140
|
+
num_retries: Optional[int] = 1024,
|
|
141
|
+
workers: Optional[int] = mp.cpu_count(),
|
|
142
|
+
stop_fitness: Optional[float] = -np.inf,
|
|
143
|
+
statistic_num: Optional[int] = 5000) -> OptimizeResult:
|
|
144
|
+
|
|
145
|
+
time0 = time.perf_counter() # optimization start time
|
|
146
|
+
name += '_' + optimizer.name
|
|
147
|
+
logger.info('optimize ' + name)
|
|
148
|
+
store = Store(fun, bounds, capacity = 500, statistic_num = statistic_num,
|
|
149
|
+
num_retries=num_retries)
|
|
150
|
+
ret = retry(store, optimizer.minimize, value_limit, workers, stop_fitness)
|
|
151
|
+
impr = store.get_improvements()
|
|
152
|
+
np.savez_compressed(name, ys=impr)
|
|
153
|
+
filtered = np.array([imp for imp in impr if imp[1] < plot_limit])
|
|
154
|
+
if len(filtered) > 0: impr = filtered
|
|
155
|
+
logger.info(name + ' time ' + str(dtime(time0)))
|
|
156
|
+
plot(impr, 'progress_aret.' + name + '.png', label = name,
|
|
157
|
+
xlabel = 'time in sec', ylabel = r'$f$')
|
|
158
|
+
return ret
|
|
118
159
|
|
|
119
160
|
class Store(object):
|
|
120
161
|
"""thread safe storage for optimization retry results;
|
|
121
162
|
delivers boundary and initial step size vectors for advanced retry crossover operation."""
|
|
122
163
|
|
|
123
164
|
def __init__(self,
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
num_retries = None
|
|
165
|
+
fun: Callable[[ArrayLike], float], # fitness function
|
|
166
|
+
bounds: Bounds, # bounds of the objective function arguments
|
|
167
|
+
max_eval_fac: Optional[int] = None, # maximal number of evaluations factor
|
|
168
|
+
check_interval: Optional[int] = 100, # sort evaluation store after check_interval iterations
|
|
169
|
+
capacity: Optional[int] = 500, # capacity of the evaluation store
|
|
170
|
+
num_retries: Optional[int] = None,
|
|
171
|
+
statistic_num: Optional[int] = 0,
|
|
172
|
+
datafile: Optional[str] = None
|
|
130
173
|
):
|
|
131
|
-
|
|
174
|
+
self.fun = fun
|
|
132
175
|
self.lower, self.upper = _convertBounds(bounds)
|
|
133
|
-
self.delta = self.upper - self.lower
|
|
134
|
-
self.logger = logger
|
|
176
|
+
self.delta = self.upper - self.lower
|
|
135
177
|
self.capacity = capacity
|
|
136
178
|
if max_eval_fac is None:
|
|
137
179
|
if num_retries is None:
|
|
@@ -140,48 +182,90 @@ class Store(object):
|
|
|
140
182
|
max_eval_fac = int(min(50, 1 + num_retries // check_interval))
|
|
141
183
|
if num_retries == None:
|
|
142
184
|
num_retries = max_eval_fac * check_interval
|
|
185
|
+
self.num_retries = num_retries
|
|
143
186
|
# increment eval_fac so that max_eval_fac is reached at last retry
|
|
144
187
|
self.eval_fac_incr = max_eval_fac / (num_retries/check_interval)
|
|
145
188
|
self.max_eval_fac = max_eval_fac
|
|
146
189
|
self.check_interval = check_interval
|
|
147
190
|
self.dim = len(self.lower)
|
|
148
|
-
self.random = Random()
|
|
149
191
|
self.t0 = time.perf_counter()
|
|
192
|
+
self.statistic_num = statistic_num
|
|
193
|
+
self.datafile = datafile
|
|
194
|
+
self.rg = random.Random()
|
|
195
|
+
#self.rg = Generator(PCG64DXSM()))
|
|
196
|
+
#self.rg = Generator(PCG64DXSM(random.randint(0, 2**63 - 1)))
|
|
150
197
|
|
|
151
198
|
#shared between processes
|
|
152
199
|
self.add_mutex = mp.Lock()
|
|
153
200
|
self.check_mutex = mp.Lock()
|
|
154
201
|
self.xs = mp.RawArray(ct.c_double, capacity * self.dim)
|
|
155
|
-
self.lowers = mp.RawArray(ct.c_double, capacity * self.dim)
|
|
156
|
-
self.uppers = mp.RawArray(ct.c_double, capacity * self.dim)
|
|
157
202
|
self.ys = mp.RawArray(ct.c_double, capacity)
|
|
158
203
|
self.eval_fac = mp.RawValue(ct.c_double, 1)
|
|
159
204
|
self.count_evals = mp.RawValue(ct.c_long, 0)
|
|
160
205
|
self.count_runs = mp.RawValue(ct.c_int, 0)
|
|
161
|
-
self.num_stored = mp.RawValue(ct.c_int, 0)
|
|
162
|
-
self.
|
|
163
|
-
self.
|
|
164
|
-
self.worst_y = mp.RawValue(ct.c_double, math.inf)
|
|
206
|
+
self.num_stored = mp.RawValue(ct.c_int, 0)
|
|
207
|
+
self.best_y = mp.RawValue(ct.c_double, np.inf)
|
|
208
|
+
self.worst_y = mp.RawValue(ct.c_double, np.inf)
|
|
165
209
|
self.best_x = mp.RawArray(ct.c_double, self.dim)
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
210
|
+
|
|
211
|
+
if statistic_num > 0: # enable statistics
|
|
212
|
+
self.statistic_num = statistic_num
|
|
213
|
+
self.time = mp.RawArray(ct.c_double, self.statistic_num)
|
|
214
|
+
self.val = mp.RawArray(ct.c_double, self.statistic_num)
|
|
215
|
+
self.si = mp.RawValue(ct.c_int, 0)
|
|
216
|
+
self.sevals = mp.RawValue(ct.c_long, 0)
|
|
217
|
+
self.bval = mp.RawValue(ct.c_double, np.inf)
|
|
218
|
+
|
|
219
|
+
# register improvement - time and value
|
|
220
|
+
def wrapper(self, x: ArrayLike) -> float:
|
|
221
|
+
y = self.fun(x)
|
|
222
|
+
self.sevals.value += 1
|
|
223
|
+
if y < self.bval.value:
|
|
224
|
+
self.bval.value = y
|
|
225
|
+
si = self.si.value
|
|
226
|
+
if si < self.statistic_num - 1:
|
|
227
|
+
self.si.value = si + 1
|
|
228
|
+
self.time[si] = dtime(self.t0)
|
|
229
|
+
self.val[si] = y
|
|
230
|
+
logger.info(str(self.time[si]) + ' ' +
|
|
231
|
+
str(self.sevals.value) + ' ' +
|
|
232
|
+
str(y) + ' ' +
|
|
233
|
+
str(list(x)))
|
|
234
|
+
return y
|
|
235
|
+
|
|
236
|
+
# persist store
|
|
237
|
+
def save(self, name: str):
|
|
238
|
+
with bz2.BZ2File(name + '.pbz2', 'w') as f:
|
|
239
|
+
cPickle.dump(self.get_data(), f)
|
|
171
240
|
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
241
|
+
def load(self, name: str):
|
|
242
|
+
data = cPickle.load(bz2.BZ2File(name + '.pbz2', 'rb'))
|
|
243
|
+
self.set_data(data)
|
|
244
|
+
|
|
245
|
+
def get_data(self) -> List:
|
|
246
|
+
data = []
|
|
247
|
+
data.append(self.get_xs())
|
|
248
|
+
data.append(self.get_ys())
|
|
249
|
+
data.append(self.get_x_best())
|
|
250
|
+
data.append(self.get_y_best())
|
|
251
|
+
data.append(self.num_stored.value)
|
|
252
|
+
return data
|
|
179
253
|
|
|
180
|
-
def
|
|
181
|
-
|
|
254
|
+
def set_data(self, data: ArrayLike):
|
|
255
|
+
xs = data[0]
|
|
256
|
+
ys = data[1]
|
|
257
|
+
for i in range(len(ys)):
|
|
258
|
+
self.replace(i, ys[i], xs[i])
|
|
259
|
+
self.best_x[:] = data[2][:]
|
|
260
|
+
self.best_y.value = data[3]
|
|
261
|
+
self.num_stored.value = data[4]
|
|
262
|
+
self.sort()
|
|
263
|
+
|
|
264
|
+
def get_improvements(self) -> np.ndarray:
|
|
265
|
+
return np.array(list(zip(self.time[:self.si.value], self.val[:self.si.value])))
|
|
182
266
|
|
|
183
267
|
# get num best values at evenly distributed times
|
|
184
|
-
def get_statistics(self, num):
|
|
268
|
+
def get_statistics(self, num: int) -> List:
|
|
185
269
|
ts = self.time[:self.si.value]
|
|
186
270
|
vs = self.val[:self.si.value]
|
|
187
271
|
mt = ts[-1]
|
|
@@ -196,58 +280,56 @@ class Store(object):
|
|
|
196
280
|
stats.append(val)
|
|
197
281
|
return stats
|
|
198
282
|
|
|
199
|
-
def eval_num(self, max_evals):
|
|
283
|
+
def eval_num(self, max_evals: int) -> int:
|
|
200
284
|
return int(self.eval_fac.value * max_evals)
|
|
201
285
|
|
|
202
|
-
def limits(self):
|
|
286
|
+
def limits(self) -> Tuple[float, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
|
203
287
|
"""guess, boundaries and initial step size for crossover operation."""
|
|
204
|
-
diff_fac = self.
|
|
205
|
-
lim_fac = self.
|
|
288
|
+
diff_fac = self.rg.uniform(0.5, 1.0)
|
|
289
|
+
lim_fac = self.rg.uniform(2.0, 4.0) * diff_fac
|
|
206
290
|
with self.add_mutex:
|
|
207
291
|
i, j = self.crossover()
|
|
208
292
|
if i < 0:
|
|
209
293
|
return math.inf, None, None, None, None
|
|
210
294
|
x0 = np.asarray(self.get_x(i))
|
|
211
295
|
x1 = np.asarray(self.get_x(j))
|
|
212
|
-
y0 =
|
|
296
|
+
y0 = self.get_y(i)
|
|
213
297
|
|
|
214
298
|
deltax = np.abs(x1 - x0)
|
|
215
299
|
delta_bound = np.maximum(0.0001, lim_fac * deltax)
|
|
216
300
|
lower = np.maximum(self.lower, x0 - delta_bound)
|
|
217
301
|
upper = np.minimum(self.upper, x0 + delta_bound)
|
|
218
|
-
sdev = np.
|
|
302
|
+
sdev = np.clip(diff_fac * deltax / self.delta, 0.001, 0.5)
|
|
219
303
|
return y0, x1, lower, upper, sdev
|
|
220
304
|
|
|
221
|
-
def distance(self, xprev, x):
|
|
305
|
+
def distance(self, xprev: np.ndarray, x: np.ndarray) -> float:
|
|
222
306
|
"""distance between entries in store."""
|
|
223
307
|
return norm((x - xprev) / self.delta) / math.sqrt(self.dim)
|
|
224
308
|
|
|
225
|
-
def replace(self, i, y
|
|
309
|
+
def replace(self, i: int, y: float, x: np.ndarray):
|
|
226
310
|
"""replace entry in store."""
|
|
227
311
|
self.set_y(i, y)
|
|
228
|
-
self.set_x(i,
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
def crossover(self): # Choose two good entries for recombination
|
|
312
|
+
self.set_x(i, x)
|
|
313
|
+
|
|
314
|
+
def crossover(self) -> Tuple[int,int]: # Choose two good entries for recombination
|
|
233
315
|
"""indices of store entries to be used for crossover operation."""
|
|
234
|
-
n = self.
|
|
316
|
+
n = self.num_stored.value
|
|
235
317
|
if n < 2:
|
|
236
318
|
return -1, -1
|
|
237
|
-
lim = self.
|
|
319
|
+
lim = self.rg.uniform(min(0.1*n, 1), 0.2*n)/n
|
|
238
320
|
for _ in range(100):
|
|
239
321
|
i1 = -1
|
|
240
322
|
i2 = -1
|
|
241
323
|
for j in range(n):
|
|
242
|
-
if self.
|
|
324
|
+
if self.rg.random() < lim:
|
|
243
325
|
if i1 < 0:
|
|
244
326
|
i1 = j
|
|
245
327
|
else:
|
|
246
328
|
i2 = j
|
|
247
329
|
return i1, i2
|
|
248
330
|
return -1, -1
|
|
249
|
-
|
|
250
|
-
def sort(self):
|
|
331
|
+
|
|
332
|
+
def sort(self) -> int:
|
|
251
333
|
"""sorts all store entries, keep only the 90% best to make room for new ones;
|
|
252
334
|
skip entries having similar x values than their neighbors to preserve diversity"""
|
|
253
335
|
ns = self.num_stored.value
|
|
@@ -256,138 +338,114 @@ class Store(object):
|
|
|
256
338
|
|
|
257
339
|
ys = np.asarray(self.ys[:ns])
|
|
258
340
|
yi = ys.argsort()
|
|
259
|
-
sortRuns = []
|
|
260
341
|
|
|
261
|
-
|
|
342
|
+
ys2 = []
|
|
343
|
+
xs2 = []
|
|
262
344
|
for i in range(ns):
|
|
263
345
|
y = ys[yi[i]]
|
|
264
|
-
x = np.asarray(self.get_x(yi[i]))
|
|
265
|
-
if (
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
xprev2 = xprev
|
|
269
|
-
xprev = x
|
|
346
|
+
x = np.asarray(self.get_x(yi[i])) # preserve diversity
|
|
347
|
+
if np.all([self.distance(xp, x) > 0.15 for xp in xs2[-2:]]):
|
|
348
|
+
ys2.append(y)
|
|
349
|
+
xs2.append(x)
|
|
270
350
|
|
|
271
|
-
|
|
272
|
-
for i in range(
|
|
273
|
-
self.replace(i,
|
|
274
|
-
self.
|
|
275
|
-
self.
|
|
276
|
-
|
|
277
|
-
return numStored
|
|
351
|
+
ns = min(len(ys2),int(0.9*self.capacity)) # keep 90% best
|
|
352
|
+
for i in range(ns):
|
|
353
|
+
self.replace(i, ys2[i], xs2[i])
|
|
354
|
+
self.num_stored.value = ns
|
|
355
|
+
self.worst_y.value = self.get_y(ns-1)
|
|
356
|
+
return ns
|
|
278
357
|
|
|
279
|
-
def add_result(self, y
|
|
358
|
+
def add_result(self, y: float, x: np.ndarray, evals: int, limit: Optional[float] = np.inf):
|
|
280
359
|
"""registers an optimization result at the store."""
|
|
281
360
|
with self.add_mutex:
|
|
282
|
-
self.
|
|
361
|
+
self.count_evals.value += evals
|
|
283
362
|
if y < limit:
|
|
284
363
|
if y < self.best_y.value:
|
|
285
364
|
self.best_y.value = y
|
|
286
|
-
self.best_x[:] =
|
|
287
|
-
self.add_statistics()
|
|
365
|
+
self.best_x[:] = x[:]
|
|
288
366
|
self.dump()
|
|
367
|
+
if not self.datafile is None:
|
|
368
|
+
self.save(self.datafile)
|
|
369
|
+
|
|
289
370
|
if self.num_stored.value >= self.capacity - 1:
|
|
290
371
|
self.sort()
|
|
291
372
|
ns = self.num_stored.value
|
|
373
|
+
self.replace(ns, y, x)
|
|
292
374
|
self.num_stored.value = ns + 1
|
|
293
|
-
self.replace(ns, y, xs, lower, upper)
|
|
294
375
|
|
|
295
|
-
def
|
|
296
|
-
return self.
|
|
376
|
+
def get_x_best(self) -> np.ndarray:
|
|
377
|
+
return np.array(self.best_x[:])
|
|
297
378
|
|
|
298
|
-
def
|
|
299
|
-
return [self.
|
|
379
|
+
def get_x(self, pid) -> np.ndarray:
|
|
380
|
+
return self.xs[pid*self.dim:(pid+1)*self.dim]
|
|
300
381
|
|
|
301
|
-
def
|
|
302
|
-
return self.
|
|
382
|
+
def get_xs(self)-> np.ndarray:
|
|
383
|
+
return np.array([self.get_x(i) for i in range(self.num_stored.value)])
|
|
303
384
|
|
|
304
|
-
def get_y(self, pid):
|
|
385
|
+
def get_y(self, pid: int) -> float:
|
|
305
386
|
return self.ys[pid]
|
|
306
387
|
|
|
307
|
-
def get_ys(self):
|
|
308
|
-
return self.ys[:self.num_stored.value]
|
|
388
|
+
def get_ys(self) -> np.ndarray:
|
|
389
|
+
return np.array(self.ys[:self.num_stored.value])
|
|
309
390
|
|
|
310
|
-
def get_y_best(self):
|
|
391
|
+
def get_y_best(self) -> float:
|
|
311
392
|
return self.best_y.value
|
|
312
393
|
|
|
313
|
-
def
|
|
314
|
-
return self.lowers[pid*self.dim:(pid+1)*self.dim]
|
|
315
|
-
|
|
316
|
-
def get_upper(self, pid):
|
|
317
|
-
return self.uppers[pid*self.dim:(pid+1)*self.dim]
|
|
318
|
-
|
|
319
|
-
def get_count_evals(self):
|
|
394
|
+
def get_count_evals(self) -> int:
|
|
320
395
|
return self.count_evals.value
|
|
321
396
|
|
|
322
|
-
def get_count_runs(self):
|
|
397
|
+
def get_count_runs(self) -> int:
|
|
323
398
|
return self.count_runs.value
|
|
324
399
|
|
|
325
400
|
def set_x(self, pid, xs):
|
|
326
401
|
self.xs[pid*self.dim:(pid+1)*self.dim] = xs[:]
|
|
327
402
|
|
|
328
403
|
def set_y(self, pid, y):
|
|
329
|
-
self.ys[pid] = y
|
|
404
|
+
self.ys[pid] = y
|
|
330
405
|
|
|
331
|
-
def
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
def set_upper(self, pid, upper):
|
|
335
|
-
self.uppers[pid*self.dim:(pid+1)*self.dim] = upper[:]
|
|
336
|
-
|
|
337
|
-
def get_runs_compare_incr(self, limit):
|
|
406
|
+
def get_runs_compare_incr(self, limit: float) -> bool:
|
|
407
|
+
"""trigger sorting after check_interval calls. """
|
|
338
408
|
with self.add_mutex:
|
|
339
409
|
if self.count_runs.value < limit:
|
|
340
410
|
self.count_runs.value += 1
|
|
411
|
+
if self.count_runs.value % self.check_interval == self.check_interval-1:
|
|
412
|
+
if self.eval_fac.value < self.max_eval_fac:
|
|
413
|
+
self.eval_fac.value += self.eval_fac_incr
|
|
414
|
+
self.sort()
|
|
341
415
|
return True
|
|
342
416
|
else:
|
|
343
417
|
return False
|
|
344
418
|
|
|
345
|
-
def incr_count_evals(self, evals):
|
|
346
|
-
"""registers the number of evaluations of an optimization run;
|
|
347
|
-
trigger sorting after check_interval calls. """
|
|
348
|
-
if self.count_runs.value % self.check_interval == self.check_interval-1:
|
|
349
|
-
if self.eval_fac.value < self.max_eval_fac:
|
|
350
|
-
self.eval_fac.value += self.eval_fac_incr
|
|
351
|
-
#print(self.eval_fac.value)
|
|
352
|
-
self.sort()
|
|
353
|
-
self.count_evals.value += evals
|
|
354
|
-
|
|
355
419
|
def dump(self):
|
|
356
420
|
"""logs the current status of the store if logger defined."""
|
|
357
|
-
if self.logger is None:
|
|
358
|
-
return
|
|
359
421
|
Ys = self.get_ys()
|
|
360
422
|
vals = []
|
|
361
423
|
for i in range(min(20, len(Ys))):
|
|
362
424
|
vals.append(round(Ys[i],2))
|
|
363
|
-
dt = dtime(self.t0)
|
|
425
|
+
dt = dtime(self.t0)+.000001
|
|
364
426
|
message = '{0} {1} {2} {3} {4:.6f} {5:.2f} {6} {7} {8!s} {9!s}'.format(
|
|
365
427
|
dt, int(self.count_evals.value / dt), self.count_runs.value, self.count_evals.value,
|
|
366
428
|
self.best_y.value, self.worst_y.value, self.num_stored.value, int(self.eval_fac.value),
|
|
367
429
|
vals, self.best_x[:])
|
|
368
|
-
|
|
430
|
+
logger.info(message)
|
|
369
431
|
|
|
370
|
-
def _retry_loop(pid, rgs,
|
|
371
|
-
|
|
372
|
-
#
|
|
373
|
-
|
|
374
|
-
store.logger = logger()
|
|
375
|
-
|
|
376
|
-
while store.get_runs_compare_incr(num_retries):
|
|
432
|
+
def _retry_loop(pid, rgs, store, optimize, value_limit, stop_fitness = -np.inf):
|
|
433
|
+
fun = store.wrapper if store.statistic_num > 0 else store.fun
|
|
434
|
+
#with threadpoolctl.threadpool_limits(limits=1, user_api="blas"):
|
|
435
|
+
while store.get_runs_compare_incr(store.num_retries) and store.best_y.value > stop_fitness:
|
|
377
436
|
if _crossover(fun, store, optimize, rgs[pid]):
|
|
378
437
|
continue
|
|
379
438
|
try:
|
|
439
|
+
rg = rgs[pid]
|
|
380
440
|
dim = len(store.lower)
|
|
381
|
-
sol, y, evals = optimize(fun, Bounds(store.lower, store.upper), None,
|
|
382
|
-
[
|
|
383
|
-
store.add_result(y, sol,
|
|
441
|
+
sol, y, evals = optimize(fun, Bounds(store.lower, store.upper), None,
|
|
442
|
+
[rg.uniform(0.05, 0.1)]*dim, rg, store)
|
|
443
|
+
store.add_result(y, sol, evals, value_limit)
|
|
384
444
|
except Exception as ex:
|
|
385
445
|
continue
|
|
386
|
-
# if pid == 0:
|
|
387
|
-
# store.dump()
|
|
388
446
|
|
|
389
447
|
def _crossover(fun, store, optimize, rg):
|
|
390
|
-
if
|
|
448
|
+
if rg.uniform(0,1) < 0.5:
|
|
391
449
|
return False
|
|
392
450
|
y0, guess, lower, upper, sdev = store.limits()
|
|
393
451
|
if guess is None:
|
|
@@ -395,7 +453,7 @@ def _crossover(fun, store, optimize, rg):
|
|
|
395
453
|
guess = fitting(guess, lower, upper) # take X from lower
|
|
396
454
|
try:
|
|
397
455
|
sol, y, evals = optimize(fun, Bounds(lower, upper), guess, sdev, rg, store)
|
|
398
|
-
store.add_result(y, sol,
|
|
456
|
+
store.add_result(y, sol, evals, y0) # limit to y0
|
|
399
457
|
except:
|
|
400
458
|
return False
|
|
401
459
|
return True
|