fcmaes 1.3.17__py3-none-any.whl → 1.6.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fcmaes/retry.py CHANGED
@@ -2,40 +2,43 @@
2
2
  #
3
3
  # This source code is licensed under the MIT license found in the
4
4
  # LICENSE file in the root directory.
5
+ from __future__ import annotations
5
6
 
6
7
  import time
7
8
  import math
8
9
  import os
9
10
  import sys
11
+ import threadpoolctl
10
12
  import ctypes as ct
11
13
  from scipy import interpolate
12
14
  import numpy as np
13
- from numpy.random import Generator, MT19937, SeedSequence
15
+ from numpy.random import Generator, PCG64DXSM, SeedSequence
14
16
  from scipy.optimize._constraints import new_bounds_to_old
15
17
  from scipy.optimize import OptimizeResult, Bounds
16
18
  import multiprocessing as mp
17
19
  from multiprocessing import Process
18
- from fcmaes.optimizer import de_cma, dtime, logger
19
-
20
+ from fcmaes.optimizer import de_cma, dtime, Optimizer
21
+ from fcmaes.evaluator import is_debug_active, is_trace_active
22
+ from loguru import logger
23
+ from typing import Optional, Callable, List
24
+ from numpy.typing import ArrayLike
20
25
 
21
26
  os.environ['MKL_DEBUG_CPU_TYPE'] = '5'
22
27
  os.environ['MKL_NUM_THREADS'] = '1'
23
28
  os.environ['OPENBLAS_NUM_THREADS'] = '1'
24
29
 
25
- def minimize(fun,
26
- bounds = None,
27
- value_limit = math.inf,
28
- num_retries = 1024,
29
- logger = None,
30
- workers = mp.cpu_count(),
31
- popsize = 31,
32
- max_evaluations = 50000,
33
- capacity = 500,
34
- stop_fitness = -math.inf,
35
- optimizer = None,
36
- statistic_num = 0,
37
- plot_name = None
38
- ):
30
+ def minimize(fun: Callable[[ArrayLike], float],
31
+ bounds: Bounds,
32
+ value_limit: Optional[float] = np.inf,
33
+ num_retries: Optional[int] = 1024,
34
+ workers: Optional[int] = mp.cpu_count(),
35
+ popsize: Optional[int] = 31,
36
+ max_evaluations: Optional[int] = 50000,
37
+ capacity: Optional[int] = 500,
38
+ stop_fitness: Optional[float] = -np.inf,
39
+ optimizer: Optional[Optimizer] = None,
40
+ statistic_num: Optional[int] = 0,
41
+ ) -> OptimizeResult:
39
42
  """Minimization of a scalar function of one or more variables using parallel
40
43
  optimization retry.
41
44
 
@@ -43,10 +46,8 @@ def minimize(fun,
43
46
  ----------
44
47
  fun : callable
45
48
  The objective function to be minimized.
46
- ``fun(x, *args) -> float``
47
- where ``x`` is an 1-D array with shape (n,) and ``args``
48
- is a tuple of the fixed parameters needed to completely
49
- specify the function.
49
+ ``fun(x) -> float``
50
+ where ``x`` is an 1-D array with shape (n,)
50
51
  bounds : sequence or `Bounds`, optional
51
52
  Bounds on variables. There are two ways to specify the bounds:
52
53
  1. Instance of the `scipy.Bounds` class.
@@ -56,10 +57,6 @@ def minimize(fun,
56
57
  Upper limit for optimized function values to be stored.
57
58
  num_retries : int, optional
58
59
  Number of optimization retries.
59
- logger : logger, optional
60
- logger for log output of the retry mechanism. If None, logging
61
- is switched off. Default is a logger which logs both to stdout and
62
- appends to a file ``optimizer.log``.
63
60
  workers : int, optional
64
61
  number of parallel processes used. Default is mp.cpu_count()
65
62
  popsize = int, optional
@@ -78,10 +75,6 @@ def minimize(fun,
78
75
  optimizer to use. Default is a sequence of differential evolution and CMA-ES.
79
76
  statistic_num: int, optional
80
77
  if > 0 stores the progress of the optimization. Defines the size of this store.
81
- plot_name : String, optional
82
- if defined plots are generated during the optimization to monitor progress.
83
- Requires statistic_num > 100.
84
-
85
78
 
86
79
  Returns
87
80
  -------
@@ -93,14 +86,18 @@ def minimize(fun,
93
86
 
94
87
  if optimizer is None:
95
88
  optimizer = de_cma(max_evaluations, popsize, stop_fitness)
96
- store = Store(fun, bounds, capacity = capacity, logger = logger, statistic_num = statistic_num,
97
- plot_name = plot_name)
89
+ store = Store(fun, bounds, capacity = capacity, statistic_num = statistic_num)
98
90
  return retry(store, optimizer.minimize, num_retries, value_limit, workers, stop_fitness)
99
-
100
- def retry(store, optimize, num_retries, value_limit = math.inf,
101
- workers=mp.cpu_count(), stop_fitness = -math.inf):
91
+
92
+ def retry(store: Store,
93
+ optimize: Callable,
94
+ num_retries: int,
95
+ value_limit: Optional[float] = np.inf,
96
+ workers: Optional[int] = mp.cpu_count(),
97
+ stop_fitness: Optional[float] = -np.inf) -> OptimizeResult:
98
+
102
99
  sg = SeedSequence()
103
- rgs = [Generator(MT19937(s)) for s in sg.spawn(workers)]
100
+ rgs = [Generator(PCG64DXSM(s)) for s in sg.spawn(workers)]
104
101
  proc=[Process(target=_retry_loop,
105
102
  args=(pid, rgs, store, optimize, num_retries, value_limit, stop_fitness)) for pid in range(workers)]
106
103
  [p.start() for p in proc]
@@ -110,27 +107,41 @@ def retry(store, optimize, num_retries, value_limit = math.inf,
110
107
  return OptimizeResult(x=store.get_x_best(), fun=store.get_y_best(),
111
108
  nfev=store.get_count_evals(), success=True)
112
109
 
113
- def minimize_plot(name, optimizer, fun, bounds, value_limit = math.inf,
114
- plot_limit = math.inf, num_retries = 1024,
115
- workers = mp.cpu_count(), logger=logger(),
116
- stop_fitness = -math.inf, statistic_num = 5000, plot_name = None):
110
+ def minimize_plot(name: str,
111
+ optimizer: Optimizer,
112
+ fun: Callable[[ArrayLike], float],
113
+ bounds: Bounds,
114
+ value_limit: Optional[float] = np.inf,
115
+ plot_limit: Optional[float] = np.inf,
116
+ num_retries: Optional[int] = 1024,
117
+ workers: Optional[int] = mp.cpu_count(),
118
+ stop_fitness: Optional[float] = -np.inf,
119
+ statistic_num: Optional[int] = 5000) -> OptimizeResult:
120
+
117
121
  time0 = time.perf_counter() # optimization start time
118
122
  name += '_' + optimizer.name
119
123
  logger.info('optimize ' + name)
120
- store = Store(fun, bounds, capacity = 500, logger = logger,
121
- statistic_num = statistic_num, plot_name = plot_name)
124
+ store = Store(fun, bounds, capacity = 500, statistic_num = statistic_num)
122
125
  ret = retry(store, optimizer.minimize, num_retries, value_limit, workers, stop_fitness)
123
126
  impr = store.get_improvements()
124
127
  np.savez_compressed(name, ys=impr)
125
- filtered = np.array([imp for imp in impr if imp[1] < plot_limit])
126
- if len(filtered) > 0: impr = filtered
128
+ for _ in range(10):
129
+ filtered = np.array([imp for imp in impr if imp[1] < plot_limit])
130
+ if len(filtered) > 0:
131
+ impr = filtered
132
+ break
133
+ else:
134
+ plot_limit *= 3
127
135
  logger.info(name + ' time ' + str(dtime(time0)))
128
136
  plot(impr, 'progress_ret.' + name + '.png', label = name,
129
137
  xlabel = 'time in sec', ylabel = r'$f$')
130
138
  return ret
131
139
 
132
- def plot(front, fname, interp=True, label=r'$\chi$',
133
- xlabel = r'$f_1$', ylabel = r'$f_2$', zlabel = r'$f_3$', plot3d=False):
140
+ def plot(front: ArrayLike, fname: str, interp: Optional[bool] = True,
141
+ label: Optional[str] = r'$\chi$',
142
+ xlabel: Optional[str] = r'$f_1$', ylabel:Optional[str] = r'$f_2$',
143
+ zlabel: Optional[str] = r'$f_3$', plot3d: Optional[bool] = False,
144
+ s = 1, dpi=300):
134
145
  if len(front[0]) == 3 and plot3d:
135
146
  plot3(front, fname, label, xlabel, ylabel, zlabel)
136
147
  return
@@ -139,6 +150,11 @@ def plot(front, fname, interp=True, label=r'$\chi$',
139
150
  plot(front.T[np.array([0,i])].T, str(i) + '_' + fname,
140
151
  interp=interp, ylabel = r'$f_{0}$'.format(i+1))
141
152
  return
153
+ if len(front[0]) == 1:
154
+ ys = np.array(list(zip(range(100), [front[0][0]]*100)))
155
+ plot(ys, str(1) + '_' + fname,
156
+ interp=interp, xlabel = '', ylabel = r'$f_{0}$'.format(1))
157
+ return
142
158
  import matplotlib.pyplot as pl
143
159
  fig, ax = pl.subplots(1, 1)
144
160
  x = front[:, 0]; y = front[:, 1]
@@ -152,16 +168,17 @@ def plot(front, fname, interp=True, label=r'$\chi$',
152
168
  tck = interpolate.InterpolatedUnivariateSpline(x,y,k=1)
153
169
  x = np.linspace(min(x),max(x),1000)
154
170
  y = [tck(xi) for xi in x]
155
- ax.scatter(x, y, label=label, s=1)
171
+ ax.scatter(x, y, label=label, s=s)
156
172
  ax.grid()
157
173
  ax.set_xlabel(xlabel)
158
174
  ax.set_ylabel(ylabel)
159
175
  ax.legend()
160
- fig.savefig(fname, dpi=300)
176
+ fig.savefig(fname, dpi=dpi)
161
177
  pl.close('all')
162
178
 
163
- def plot3(front, fname, label=r'$\chi$',
164
- xlabel = r'$f_1$', ylabel = r'$f_2$', zlabel = r'$f_3$'):
179
+ def plot3(front: ArrayLike, fname: str, label: Optional[str] =r'$\chi$',
180
+ xlabel: Optional[str] = r'$f_1$', ylabel: Optional[str] = r'$f_2$',
181
+ zlabel: Optional[str] = r'$f_3$'):
165
182
  import matplotlib.pyplot as pl
166
183
  fig = pl.figure()
167
184
  ax = fig.add_subplot(projection='3d')
@@ -175,22 +192,44 @@ def plot3(front, fname, label=r'$\chi$',
175
192
  #pl.show()
176
193
  fig.savefig(fname, dpi=300)
177
194
  pl.close('all')
195
+
196
+
197
+ dtype_map = {
198
+ 'int32': ct.c_int32,
199
+ 'int64': ct.c_int64,
200
+ 'float32': ct.c_float,
201
+ 'float64': ct.c_double,
202
+ }
203
+
204
+ class Shared2d():
205
+
206
+ def __init__(self, xs):
207
+ self.rows, self.cols = xs.shape
208
+ self.dtype = xs.dtype
209
+ self.ra = mp.RawArray(dtype_map[str(xs.dtype)], self.rows*self.cols)
210
+ self.set(xs)
211
+
212
+ def set_i(self, i, x):
213
+ self.view()[i, :] = x
214
+
215
+ def view(self):
216
+ return np.frombuffer(self.ra, dtype=self.dtype).reshape((self.rows, self.cols))
217
+
218
+ def set(self, xs):
219
+ np.copyto(self.view(), xs)
178
220
 
179
221
  class Store(object):
180
222
  """thread safe storage for optimization retry results."""
181
223
 
182
224
  def __init__(self,
183
- fun, # fitness function
184
- bounds, # bounds of the objective function arguments
185
- check_interval = 10, # sort evaluation memory after check_interval iterations
186
- capacity = 500, # capacity of the evaluation store
187
- logger = None, # if None logging is switched off
188
- statistic_num = 0,
189
- plot_name = None # requires statistic_num > 500
225
+ fun: Callable[[ArrayLike], float], # fitness function
226
+ bounds: Bounds, # bounds of the objective function arguments
227
+ check_interval: Optional[int] = 10, # sort evaluation memory after check_interval iterations
228
+ capacity: Optional[int] = 500, # capacity of the evaluation store
229
+ statistic_num: Optional[int] = 0
190
230
  ):
191
231
  self.fun = fun
192
232
  self.lower, self.upper = _convertBounds(bounds)
193
- self.logger = logger
194
233
  self.capacity = capacity
195
234
  self.check_interval = check_interval
196
235
  self.dim = len(self.lower)
@@ -200,31 +239,30 @@ class Store(object):
200
239
 
201
240
  #shared between processes
202
241
  self.add_mutex = mp.Lock()
203
- self.xs = mp.RawArray(ct.c_double, self.capacity * self.dim)
242
+ self.xs = Shared2d(np.empty((self.capacity, self.dim), dtype = np.float64))
243
+ self.create_xs_view()
204
244
  self.ys = mp.RawArray(ct.c_double, self.capacity)
205
245
  self.count_evals = mp.RawValue(ct.c_long, 0)
206
246
  self.count_runs = mp.RawValue(ct.c_int, 0)
207
- self.num_stored = mp.RawValue(ct.c_int, 0)
208
- self.num_sorted = mp.RawValue(ct.c_int, 0)
247
+ self.num_stored = mp.RawValue(ct.c_int, 0)
209
248
  self.count_stat_runs = mp.RawValue(ct.c_int, 0)
210
249
  self.t0 = time.perf_counter()
211
250
  self.mean = mp.RawValue(ct.c_double, 0)
212
251
  self.qmean = mp.RawValue(ct.c_double, 0)
213
- self.best_y = mp.RawValue(ct.c_double, math.inf)
252
+ self.best_y = mp.RawValue(ct.c_double, np.inf)
214
253
  self.best_x = mp.RawArray(ct.c_double, self.dim)
215
254
  self.statistic_num = statistic_num
216
- self.plot_name = plot_name
217
- # statistics
255
+ # statistics
256
+ self.statistic_num = statistic_num
218
257
  if statistic_num > 0: # enable statistics
219
- self.statistic_num = statistic_num
220
258
  self.time = mp.RawArray(ct.c_double, self.statistic_num)
221
259
  self.val = mp.RawArray(ct.c_double, self.statistic_num)
222
260
  self.si = mp.RawValue(ct.c_int, 0)
223
261
  self.sevals = mp.RawValue(ct.c_long, 0)
224
- self.bval = mp.RawValue(ct.c_double, math.inf)
262
+ self.bval = mp.RawValue(ct.c_double, np.inf)
225
263
 
226
264
  # register improvement - time and value
227
- def wrapper(self, x):
265
+ def wrapper(self, x: ArrayLike):
228
266
  y = self.fun(x)
229
267
  self.sevals.value += 1
230
268
  if y < self.bval.value:
@@ -234,18 +272,18 @@ class Store(object):
234
272
  self.si.value = si + 1
235
273
  self.time[si] = dtime(self.t0)
236
274
  self.val[si] = y
237
- if not self.logger is None:
238
- self.logger.info(str(self.time[si]) + ' ' +
239
- str(self.sevals.value) + ' ' +
240
- str(y) + ' ' +
241
- str(list(x)))
275
+ logger.trace(str(self.time[si]) + ' ' +
276
+ str(self.sevals.value) + ' ' +
277
+ str(int(self.sevals.value / self.time[si])) + ' ' +
278
+ str(y) + ' ' +
279
+ str(list(x)))
242
280
  return y
243
281
 
244
282
  def get_improvements(self):
245
283
  return np.array(list(zip(self.time[:self.si.value], self.val[:self.si.value])))
246
284
 
247
285
  # get num best values at evenly distributed times
248
- def get_statistics(self, num):
286
+ def get_statistics(self, num: int) -> List:
249
287
  ts = self.time[:self.si.value]
250
288
  ys = self.val[:self.si.value]
251
289
  mt = ts[-1]
@@ -260,88 +298,83 @@ class Store(object):
260
298
  conv.append(val)
261
299
  return conv
262
300
 
263
- def eval_num(self, max_evals):
301
+ def eval_num(self, max_evals: int) -> int:
264
302
  return max_evals
265
303
 
266
- def replace(self, i, y, xs):
304
+ def replace(self, i: int, y: float, xs: ArrayLike):
267
305
  self.set_y(i, y)
268
306
  self.set_x(i, xs)
269
307
 
270
- def sort(self): # sort all entries to make room for new ones, determine best and worst
308
+ def sort(self) -> int: # sort all entries to make room for new ones, determine best and worst
271
309
  """sorts all store entries, keep only the 90% best to make room for new ones."""
272
310
  ns = self.num_stored.value
273
311
  ys = np.asarray(self.ys[:ns])
274
312
  yi = ys.argsort()
275
- sortRuns = []
276
- for i in range(len(yi)):
277
- y = ys[yi[i]]
278
- xs = self.get_x(yi[i])
279
- sortRuns.append((y, xs))
280
- numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best
281
- for i in range(numStored):
282
- self.replace(i, sortRuns[i][0], sortRuns[i][1])
283
- self.num_sorted.value = numStored
313
+ numStored = min(ns, int(0.9*self.capacity)) # keep 90% best
314
+ self.xs_view[:numStored] = self.xs_view[yi][:numStored]
315
+ self.ys[:numStored] = ys[yi][:numStored]
284
316
  self.num_stored.value = numStored
285
317
  return numStored
286
318
 
287
- def add_result(self, y, xs, evals, limit=math.inf):
288
- """registers an optimization result at the score."""
319
+ def add_result(self, y: float, x: ArrayLike, evals: int, limit=np.inf):
320
+ """registers an optimization result at the store."""
289
321
  with self.add_mutex:
290
322
  self.incr_count_evals(evals)
291
323
  if y < limit:
292
324
  self.count_stat_runs.value += 1
293
325
  if y < self.best_y.value:
294
326
  self.best_y.value = y
295
- self.best_x[:] = xs[:]
327
+ self.best_x[:] = x[:]
296
328
  self.dump()
297
329
  if self.num_stored.value >= self.capacity-1:
298
330
  self.sort()
299
331
  cnt = self.count_stat_runs.value
300
- diff = y - self.mean.value
301
- self.qmean.value += (cnt - 1) * diff*diff / cnt;
332
+ diff = min(1E20, y - self.mean.value) # avoid overflow
333
+ self.qmean.value += (cnt - 1)/ cnt * diff*diff ;
302
334
  self.mean.value += diff / cnt
303
335
  ns = self.num_stored.value
304
336
  self.num_stored.value = ns + 1
305
- self.replace(ns, y, xs)
306
-
307
- def get_x(self, pid):
308
- return self.xs[pid*self.dim:(pid+1)*self.dim]
337
+ self.xs_view[self.num_stored.value, :] = x
338
+ self.ys[self.num_stored.value] = y
339
+ if is_debug_active():
340
+ dt = dtime(self.t0)
341
+ message = '{0} {1} {2} {3} {4:.6f} {5:.6f} {6:.2f} {7:.2f}'.format(
342
+ dt, int(self.count_evals.value / dt), self.count_runs.value, self.count_evals.value, \
343
+ y, self.best_y.value, self.get_y_mean(), self.get_y_standard_dev())
344
+ logger.debug(message)
309
345
 
310
- def get_x_best(self):
346
+ def get_x_best(self) -> np.ndarray:
311
347
  return np.array(self.best_x[:])
312
348
 
313
- def get_xs(self):
314
- return np.array([self.get_x(i) for i in range(self.num_stored.value)])
315
-
316
- def get_y(self, pid):
349
+ def create_xs_view(self): # needs to be called in the target process
350
+ self.xs_view = self.xs.view()
351
+
352
+ def get_xs(self) -> np.ndarray:
353
+ return self.xs.view()[:self.num_stored.value]
354
+
355
+ def get_y(self, pid: int) -> float:
317
356
  return self.ys[pid]
318
357
 
319
- def get_y_best(self):
358
+ def get_y_best(self) -> float:
320
359
  return self.best_y.value
321
360
 
322
- def get_ys(self):
361
+ def get_ys(self) -> np.ndarray:
323
362
  return np.array(self.ys[:self.num_stored.value])
324
363
 
325
- def get_y_mean(self):
364
+ def get_y_mean(self) -> float:
326
365
  return self.mean.value
327
366
 
328
- def get_y_standard_dev(self):
367
+ def get_y_standard_dev(self) -> float:
329
368
  cnt = self.count_stat_runs.value
330
369
  return 0 if cnt <= 0 else math.sqrt(self.qmean.value / cnt)
331
370
 
332
- def get_count_evals(self):
371
+ def get_count_evals(self) -> int:
333
372
  return self.count_evals.value
334
373
 
335
- def get_count_runs(self):
374
+ def get_count_runs(self) -> int:
336
375
  return self.count_runs.value
337
-
338
- def set_x(self, pid, xs):
339
- self.xs[pid*self.dim:(pid+1)*self.dim] = xs[:]
340
-
341
- def set_y(self, pid, y):
342
- self.ys[pid] = y
343
-
344
- def get_runs_compare_incr(self, limit):
376
+
377
+ def get_runs_compare_incr(self, limit: float):
345
378
  with self.add_mutex:
346
379
  if self.count_runs.value < limit:
347
380
  self.count_runs.value += 1
@@ -356,43 +389,32 @@ class Store(object):
356
389
 
357
390
  def dump(self):
358
391
  """logs the current status of the store if logger defined."""
359
- if self.logger is None:
392
+ if not is_debug_active():
360
393
  return
361
394
  Ys = self.get_ys()
362
395
  vals = []
363
396
  for i in range(min(20, len(Ys))):
364
- vals.append(round(Ys[i],2))
365
- dt = dtime(self.t0)
366
-
397
+ vals.append(round(Ys[i],4))
398
+ dt = dtime(self.t0)
367
399
  message = '{0} {1} {2} {3} {4:.6f} {5:.2f} {6:.2f} {7!s} {8!s}'.format(
368
400
  dt, int(self.count_evals.value / dt), self.count_runs.value, self.count_evals.value, \
369
401
  self.best_y.value, self.get_y_mean(), self.get_y_standard_dev(), vals, self.best_x[:])
370
- self.logger.info(message)
402
+ logger.debug(message)
371
403
 
372
404
 
373
- def _retry_loop(pid, rgs, store, optimize, num_retries, value_limit, stop_fitness = -math.inf):
374
- fun = store.wrapper if store.statistic_num > 0 else store.fun
375
- #reinitialize logging config for windows - multi threading fix
376
- if 'win' in sys.platform and not store.logger is None:
377
- store.logger = logger()
378
-
405
+ def _retry_loop(pid, rgs, store, optimize, num_retries, value_limit, stop_fitness = -np.inf):
406
+ store.create_xs_view()
407
+ fun = store.wrapper if store.statistic_num > 0 else store.fun
379
408
  lower = store.lower
380
- while store.get_runs_compare_incr(num_retries) and store.best_y.value > stop_fitness:
381
- try:
382
- rg = rgs[pid]
383
- sol, y, evals = optimize(fun, Bounds(store.lower, store.upper), None,
384
- [rg.uniform(0.05, 0.1)]*len(lower), rg, store)
385
- store.add_result(y, sol, evals, value_limit)
386
- if not store.plot_name is None:
387
- name = store.plot_name + "_retry_" + str(store.get_count_evals())
388
- xs = np.array(store.get_xs())
389
- ys = np.array(store.get_ys())
390
- np.savez_compressed(name, xs=xs, ys=ys)
391
- plot(y, name, interp=False)
392
- except Exception as ex:
393
- print(str(ex))
394
- # if pid == 0:
395
- # store.dump()
409
+ with threadpoolctl.threadpool_limits(limits=1, user_api="blas"):
410
+ while store.get_runs_compare_incr(num_retries) and store.best_y.value > stop_fitness:
411
+ try:
412
+ rg = rgs[pid]
413
+ sol, y, evals = optimize(fun, Bounds(store.lower, store.upper), None,
414
+ [rg.uniform(0.05, 0.1)]*len(lower), rg, store)
415
+ store.add_result(y, sol, evals, value_limit)
416
+ except Exception as ex:
417
+ print(str(ex))
396
418
 
397
419
  def _convertBounds(bounds):
398
420
  if bounds is None:
fcmaes/test_cma.py CHANGED
@@ -3,12 +3,12 @@
3
3
  # This source code is licensed under the MIT license found in the
4
4
  # LICENSE file in the root directory.
5
5
 
6
- import sys
7
6
  import multiprocessing as mp
8
7
  import numpy as np
9
8
  from scipy.optimize import OptimizeResult
10
9
  from fcmaes.testfun import Wrapper, Rosen, Rastrigin, Eggholder
11
- from fcmaes import cmaes, de, decpp, cmaescpp, gcldecpp, retry, advretry
10
+ from fcmaes import cmaes, de, decpp, cmaescpp, retry, advretry
11
+ from fcmaes.optimizer import de_cma_py
12
12
 
13
13
  def almost_equal(X1, X2, eps = 1E-5):
14
14
  if np.isscalar(X1):
@@ -168,27 +168,6 @@ def test_rosen_cpp_parallel():
168
168
  assert(almost_equal(ret.x, wrapper.get_best_x())) # wrong best X returned
169
169
  assert(almost_equal(ret.fun, wrapper.get_best_y(), eps = 1E-1)) # wrong best y returned
170
170
 
171
- def test_rosen_gclde_parallel():
172
- popsize = 8
173
- dim = 2
174
- testfun = Rosen(dim)
175
- max_eval = 10000
176
- limit = 0.00001
177
- for _ in range(5):
178
- wrapper = Wrapper(testfun.fun, dim)
179
- ret = gcldecpp.minimize(wrapper.eval, dim, testfun.bounds,
180
- max_evaluations = max_eval,
181
- popsize=popsize, workers = mp.cpu_count())
182
- if limit > ret.fun:
183
- break
184
-
185
- assert(limit > ret.fun) # optimization target not reached
186
- assert(max_eval + popsize >= ret.nfev) # too much function calls
187
- assert(max_eval // popsize + 2 > ret.nit) # too much iterations
188
- assert(ret.nfev == wrapper.get_count()) # wrong number of function calls returned
189
- assert(almost_equal(ret.x, wrapper.get_best_x())) # wrong best X returned
190
- assert(ret.fun == wrapper.get_best_y()) # wrong best y returned
191
-
192
171
  def test_rosen_de():
193
172
  popsize = 8
194
173
  dim = 2
@@ -339,15 +318,54 @@ def test_eggholder_retry():
339
318
  assert(almost_equal(ret.x, wrapper.get_best_x())) # wrong best X returned
340
319
  assert(ret.fun == wrapper.get_best_y()) # wrong best y returned
341
320
 
321
+ from fcmaes.optimizer import de_cma
322
+
342
323
  def test_eggholder_advanced_retry():
343
324
  dim = 2
344
325
  testfun = Eggholder()
326
+
327
+ limit = -956
328
+ for _ in range(5):
329
+ wrapper = Wrapper(testfun.fun, dim)
330
+ ret = advretry.minimize(wrapper.eval, testfun.bounds,
331
+ num_retries=96)
332
+ if limit > ret.fun:
333
+ break
334
+
335
+ assert(limit > ret.fun) # optimization target not reached
336
+ assert(ret.nfev == wrapper.get_count()) # wrong number of function calls returned
337
+ assert(almost_equal(ret.x, wrapper.get_best_x())) # wrong best X returned
338
+ assert(almost_equal(ret.fun, wrapper.get_best_y())) # wrong best y returned
339
+
340
+ def test_eggholder_retry_python():
341
+ dim = 2
342
+ testfun = Eggholder()
343
+
344
+ optimizer = de_cma_py(10000)
345
+ limit = -956
346
+ for _ in range(5):
347
+ wrapper = Wrapper(testfun.fun, dim)
345
348
 
349
+ ret = retry.minimize(wrapper.eval, testfun.bounds,
350
+ num_retries=32, optimizer = optimizer)
351
+ if limit > ret.fun:
352
+ break
353
+
354
+ assert(limit > ret.fun) # optimization target not reached
355
+ assert(ret.nfev == wrapper.get_count()) # wrong number of function calls returned
356
+ assert(almost_equal(ret.x, wrapper.get_best_x())) # wrong best X returned
357
+ assert(ret.fun == wrapper.get_best_y()) # wrong best y returned
358
+
359
+ def test_eggholder_advanced_retry_python():
360
+ dim = 2
361
+ testfun = Eggholder()
362
+
363
+ optimizer = de_cma_py(10000)
346
364
  limit = -956
347
365
  for _ in range(5):
348
366
  wrapper = Wrapper(testfun.fun, dim)
349
367
  ret = advretry.minimize(wrapper.eval, testfun.bounds,
350
- num_retries=300)
368
+ num_retries=32, optimizer = optimizer)
351
369
  if limit > ret.fun:
352
370
  break
353
371
 
@@ -355,4 +373,6 @@ def test_eggholder_advanced_retry():
355
373
  assert(ret.nfev == wrapper.get_count()) # wrong number of function calls returned
356
374
  assert(almost_equal(ret.x, wrapper.get_best_x())) # wrong best X returned
357
375
  assert(almost_equal(ret.fun, wrapper.get_best_y())) # wrong best y returned
358
-
376
+
377
+ #test_rosen_decpp_parallel()
378
+
@@ -0,0 +1,47 @@
1
+ Metadata-Version: 2.1
2
+ Name: fcmaes
3
+ Version: 1.6.9
4
+ Summary: A Python 3 gradient-free optimization library.
5
+ Home-page: https://github.com/dietmarwo/fast-cma-es
6
+ Author: Dietmar Wolz
7
+ Author-email: drdietmarwolz@yahoo.de
8
+ License: MIT
9
+ Keywords: optimization,multi-objective,parallel
10
+ Classifier: Intended Audience :: Manufacturing
11
+ Classifier: Intended Audience :: Financial and Insurance Industry
12
+ Classifier: Intended Audience :: Healthcare Industry
13
+ Classifier: Intended Audience :: Telecommunications Industry
14
+ Classifier: Intended Audience :: Information Technology
15
+ Classifier: Intended Audience :: Science/Research
16
+ Classifier: Intended Audience :: Education
17
+ Classifier: Topic :: Office/Business :: Financial
18
+ Classifier: Topic :: Office/Business :: Scheduling
19
+ Classifier: Topic :: Scientific/Engineering
20
+ Classifier: Topic :: Scientific/Engineering :: Information Analysis
21
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
22
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
23
+ Classifier: Operating System :: OS Independent
24
+ Classifier: Programming Language :: Python :: 3
25
+ Classifier: Development Status :: 5 - Production/Stable
26
+ Classifier: Environment :: Console
27
+ Classifier: License :: OSI Approved :: MIT License
28
+ Requires-Python: >=3.7
29
+ Description-Content-Type: text/markdown
30
+ License-File: LICENSE
31
+ Requires-Dist: numpy
32
+ Requires-Dist: scipy
33
+ Requires-Dist: scikit-learn
34
+ Requires-Dist: threadpoolctl
35
+ Requires-Dist: numba
36
+ Requires-Dist: loguru
37
+
38
+ # fcmaes
39
+ A Python 3 gradient-free optimization library.
40
+
41
+ - [README](https://github.com/dietmarwo/fast-cma-es/blob/master/README.adoc)
42
+ - [Tutorials](https://github.com/dietmarwo/fast-cma-es/blob/master/tutorials/Tutorials.adoc)
43
+
44
+
45
+
46
+
47
+