pymoo 0.6.1.5.dev0__cp313-cp313-macosx_10_13_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pymoo might be problematic. Click here for more details.
- pymoo/__init__.py +3 -0
- pymoo/algorithms/__init__.py +0 -0
- pymoo/algorithms/base/__init__.py +0 -0
- pymoo/algorithms/base/bracket.py +38 -0
- pymoo/algorithms/base/genetic.py +109 -0
- pymoo/algorithms/base/line.py +62 -0
- pymoo/algorithms/base/local.py +39 -0
- pymoo/algorithms/base/meta.py +79 -0
- pymoo/algorithms/hyperparameters.py +89 -0
- pymoo/algorithms/moo/__init__.py +0 -0
- pymoo/algorithms/moo/age.py +310 -0
- pymoo/algorithms/moo/age2.py +194 -0
- pymoo/algorithms/moo/ctaea.py +298 -0
- pymoo/algorithms/moo/dnsga2.py +76 -0
- pymoo/algorithms/moo/kgb.py +446 -0
- pymoo/algorithms/moo/moead.py +183 -0
- pymoo/algorithms/moo/nsga2.py +113 -0
- pymoo/algorithms/moo/nsga3.py +358 -0
- pymoo/algorithms/moo/pinsga2.py +370 -0
- pymoo/algorithms/moo/rnsga2.py +188 -0
- pymoo/algorithms/moo/rnsga3.py +246 -0
- pymoo/algorithms/moo/rvea.py +214 -0
- pymoo/algorithms/moo/sms.py +195 -0
- pymoo/algorithms/moo/spea2.py +190 -0
- pymoo/algorithms/moo/unsga3.py +47 -0
- pymoo/algorithms/soo/__init__.py +0 -0
- pymoo/algorithms/soo/convex/__init__.py +0 -0
- pymoo/algorithms/soo/nonconvex/__init__.py +0 -0
- pymoo/algorithms/soo/nonconvex/brkga.py +161 -0
- pymoo/algorithms/soo/nonconvex/cmaes.py +554 -0
- pymoo/algorithms/soo/nonconvex/de.py +279 -0
- pymoo/algorithms/soo/nonconvex/direct.py +149 -0
- pymoo/algorithms/soo/nonconvex/es.py +203 -0
- pymoo/algorithms/soo/nonconvex/g3pcx.py +94 -0
- pymoo/algorithms/soo/nonconvex/ga.py +93 -0
- pymoo/algorithms/soo/nonconvex/ga_niching.py +223 -0
- pymoo/algorithms/soo/nonconvex/isres.py +74 -0
- pymoo/algorithms/soo/nonconvex/nelder.py +251 -0
- pymoo/algorithms/soo/nonconvex/optuna.py +80 -0
- pymoo/algorithms/soo/nonconvex/pattern.py +183 -0
- pymoo/algorithms/soo/nonconvex/pso.py +399 -0
- pymoo/algorithms/soo/nonconvex/pso_ep.py +297 -0
- pymoo/algorithms/soo/nonconvex/random_search.py +25 -0
- pymoo/algorithms/soo/nonconvex/sres.py +56 -0
- pymoo/algorithms/soo/univariate/__init__.py +0 -0
- pymoo/algorithms/soo/univariate/backtracking.py +59 -0
- pymoo/algorithms/soo/univariate/exp.py +46 -0
- pymoo/algorithms/soo/univariate/golden.py +65 -0
- pymoo/algorithms/soo/univariate/quadr_interp.py +81 -0
- pymoo/algorithms/soo/univariate/wolfe.py +163 -0
- pymoo/config.py +33 -0
- pymoo/constraints/__init__.py +3 -0
- pymoo/constraints/adaptive.py +62 -0
- pymoo/constraints/as_obj.py +56 -0
- pymoo/constraints/as_penalty.py +41 -0
- pymoo/constraints/eps.py +26 -0
- pymoo/constraints/from_bounds.py +36 -0
- pymoo/core/__init__.py +0 -0
- pymoo/core/algorithm.py +394 -0
- pymoo/core/callback.py +38 -0
- pymoo/core/crossover.py +77 -0
- pymoo/core/decision_making.py +102 -0
- pymoo/core/decomposition.py +76 -0
- pymoo/core/duplicate.py +163 -0
- pymoo/core/evaluator.py +116 -0
- pymoo/core/indicator.py +34 -0
- pymoo/core/individual.py +784 -0
- pymoo/core/infill.py +64 -0
- pymoo/core/initialization.py +42 -0
- pymoo/core/mating.py +39 -0
- pymoo/core/meta.py +21 -0
- pymoo/core/mixed.py +165 -0
- pymoo/core/mutation.py +44 -0
- pymoo/core/operator.py +40 -0
- pymoo/core/parameters.py +134 -0
- pymoo/core/plot.py +210 -0
- pymoo/core/population.py +180 -0
- pymoo/core/problem.py +460 -0
- pymoo/core/recorder.py +99 -0
- pymoo/core/repair.py +23 -0
- pymoo/core/replacement.py +96 -0
- pymoo/core/result.py +52 -0
- pymoo/core/sampling.py +43 -0
- pymoo/core/selection.py +61 -0
- pymoo/core/solution.py +10 -0
- pymoo/core/survival.py +103 -0
- pymoo/core/termination.py +70 -0
- pymoo/core/variable.py +399 -0
- pymoo/cython/__init__.py +0 -0
- pymoo/cython/calc_perpendicular_distance.cpython-313-darwin.so +0 -0
- pymoo/cython/calc_perpendicular_distance.pyx +67 -0
- pymoo/cython/decomposition.cpython-313-darwin.so +0 -0
- pymoo/cython/decomposition.pyx +165 -0
- pymoo/cython/hv.cpython-313-darwin.so +0 -0
- pymoo/cython/hv.pyx +18 -0
- pymoo/cython/info.cpython-313-darwin.so +0 -0
- pymoo/cython/info.pyx +5 -0
- pymoo/cython/mnn.cpython-313-darwin.so +0 -0
- pymoo/cython/mnn.pyx +273 -0
- pymoo/cython/non_dominated_sorting.cpython-313-darwin.so +0 -0
- pymoo/cython/non_dominated_sorting.pyx +645 -0
- pymoo/cython/pruning_cd.cpython-313-darwin.so +0 -0
- pymoo/cython/pruning_cd.pyx +197 -0
- pymoo/cython/stochastic_ranking.cpython-313-darwin.so +0 -0
- pymoo/cython/stochastic_ranking.pyx +49 -0
- pymoo/cython/utils.pxd +129 -0
- pymoo/cython/vendor/__init__.py +0 -0
- pymoo/cython/vendor/hypervolume.cpp +1621 -0
- pymoo/cython/vendor/hypervolume.h +63 -0
- pymoo/decomposition/__init__.py +0 -0
- pymoo/decomposition/aasf.py +24 -0
- pymoo/decomposition/asf.py +10 -0
- pymoo/decomposition/pbi.py +13 -0
- pymoo/decomposition/perp_dist.py +13 -0
- pymoo/decomposition/tchebicheff.py +11 -0
- pymoo/decomposition/util.py +13 -0
- pymoo/decomposition/weighted_sum.py +8 -0
- pymoo/docs.py +187 -0
- pymoo/experimental/__init__.py +0 -0
- pymoo/experimental/algorithms/__init__.py +0 -0
- pymoo/experimental/algorithms/gde3.py +57 -0
- pymoo/gradient/__init__.py +21 -0
- pymoo/gradient/automatic.py +57 -0
- pymoo/gradient/grad_autograd.py +105 -0
- pymoo/gradient/grad_complex.py +35 -0
- pymoo/gradient/grad_jax.py +51 -0
- pymoo/gradient/toolbox/__init__.py +6 -0
- pymoo/indicators/__init__.py +0 -0
- pymoo/indicators/distance_indicator.py +55 -0
- pymoo/indicators/gd.py +7 -0
- pymoo/indicators/gd_plus.py +7 -0
- pymoo/indicators/hv/__init__.py +63 -0
- pymoo/indicators/hv/exact.py +71 -0
- pymoo/indicators/hv/exact_2d.py +102 -0
- pymoo/indicators/hv/monte_carlo.py +74 -0
- pymoo/indicators/igd.py +7 -0
- pymoo/indicators/igd_plus.py +7 -0
- pymoo/indicators/kktpm.py +151 -0
- pymoo/indicators/migd.py +55 -0
- pymoo/indicators/rmetric.py +203 -0
- pymoo/indicators/spacing.py +52 -0
- pymoo/mcdm/__init__.py +0 -0
- pymoo/mcdm/compromise_programming.py +19 -0
- pymoo/mcdm/high_tradeoff.py +40 -0
- pymoo/mcdm/pseudo_weights.py +32 -0
- pymoo/operators/__init__.py +0 -0
- pymoo/operators/control.py +187 -0
- pymoo/operators/crossover/__init__.py +0 -0
- pymoo/operators/crossover/binx.py +45 -0
- pymoo/operators/crossover/dex.py +122 -0
- pymoo/operators/crossover/erx.py +162 -0
- pymoo/operators/crossover/expx.py +51 -0
- pymoo/operators/crossover/hux.py +37 -0
- pymoo/operators/crossover/nox.py +13 -0
- pymoo/operators/crossover/ox.py +84 -0
- pymoo/operators/crossover/pcx.py +82 -0
- pymoo/operators/crossover/pntx.py +49 -0
- pymoo/operators/crossover/sbx.py +125 -0
- pymoo/operators/crossover/spx.py +5 -0
- pymoo/operators/crossover/ux.py +20 -0
- pymoo/operators/mutation/__init__.py +0 -0
- pymoo/operators/mutation/bitflip.py +17 -0
- pymoo/operators/mutation/gauss.py +58 -0
- pymoo/operators/mutation/inversion.py +42 -0
- pymoo/operators/mutation/nom.py +7 -0
- pymoo/operators/mutation/pm.py +94 -0
- pymoo/operators/mutation/rm.py +23 -0
- pymoo/operators/repair/__init__.py +0 -0
- pymoo/operators/repair/bounce_back.py +32 -0
- pymoo/operators/repair/bounds_repair.py +95 -0
- pymoo/operators/repair/inverse_penalty.py +89 -0
- pymoo/operators/repair/rounding.py +18 -0
- pymoo/operators/repair/to_bound.py +31 -0
- pymoo/operators/repair/vtype.py +11 -0
- pymoo/operators/sampling/__init__.py +0 -0
- pymoo/operators/sampling/lhs.py +73 -0
- pymoo/operators/sampling/rnd.py +50 -0
- pymoo/operators/selection/__init__.py +0 -0
- pymoo/operators/selection/rnd.py +72 -0
- pymoo/operators/selection/tournament.py +76 -0
- pymoo/operators/survival/__init__.py +0 -0
- pymoo/operators/survival/rank_and_crowding/__init__.py +1 -0
- pymoo/operators/survival/rank_and_crowding/classes.py +209 -0
- pymoo/operators/survival/rank_and_crowding/metrics.py +208 -0
- pymoo/optimize.py +72 -0
- pymoo/problems/__init__.py +157 -0
- pymoo/problems/dyn.py +47 -0
- pymoo/problems/dynamic/__init__.py +0 -0
- pymoo/problems/dynamic/cec2015.py +108 -0
- pymoo/problems/dynamic/df.py +452 -0
- pymoo/problems/dynamic/misc.py +167 -0
- pymoo/problems/functional.py +48 -0
- pymoo/problems/many/__init__.py +5 -0
- pymoo/problems/many/cdtlz.py +159 -0
- pymoo/problems/many/dcdtlz.py +88 -0
- pymoo/problems/many/dtlz.py +264 -0
- pymoo/problems/many/wfg.py +550 -0
- pymoo/problems/multi/__init__.py +14 -0
- pymoo/problems/multi/bnh.py +34 -0
- pymoo/problems/multi/carside.py +48 -0
- pymoo/problems/multi/clutch.py +104 -0
- pymoo/problems/multi/csi.py +55 -0
- pymoo/problems/multi/ctp.py +198 -0
- pymoo/problems/multi/dascmop.py +213 -0
- pymoo/problems/multi/kursawe.py +25 -0
- pymoo/problems/multi/modact.py +68 -0
- pymoo/problems/multi/mw.py +400 -0
- pymoo/problems/multi/omnitest.py +48 -0
- pymoo/problems/multi/osy.py +32 -0
- pymoo/problems/multi/srn.py +28 -0
- pymoo/problems/multi/sympart.py +94 -0
- pymoo/problems/multi/tnk.py +24 -0
- pymoo/problems/multi/truss2d.py +83 -0
- pymoo/problems/multi/welded_beam.py +41 -0
- pymoo/problems/multi/wrm.py +36 -0
- pymoo/problems/multi/zdt.py +151 -0
- pymoo/problems/multi_to_single.py +22 -0
- pymoo/problems/single/__init__.py +12 -0
- pymoo/problems/single/ackley.py +24 -0
- pymoo/problems/single/cantilevered_beam.py +34 -0
- pymoo/problems/single/flowshop_scheduling.py +112 -0
- pymoo/problems/single/g.py +874 -0
- pymoo/problems/single/griewank.py +18 -0
- pymoo/problems/single/himmelblau.py +15 -0
- pymoo/problems/single/knapsack.py +48 -0
- pymoo/problems/single/mopta08.py +26 -0
- pymoo/problems/single/multimodal.py +20 -0
- pymoo/problems/single/pressure_vessel.py +30 -0
- pymoo/problems/single/rastrigin.py +20 -0
- pymoo/problems/single/rosenbrock.py +22 -0
- pymoo/problems/single/schwefel.py +18 -0
- pymoo/problems/single/simple.py +13 -0
- pymoo/problems/single/sphere.py +19 -0
- pymoo/problems/single/traveling_salesman.py +79 -0
- pymoo/problems/single/zakharov.py +19 -0
- pymoo/problems/static.py +14 -0
- pymoo/problems/util.py +42 -0
- pymoo/problems/zero_to_one.py +27 -0
- pymoo/termination/__init__.py +23 -0
- pymoo/termination/collection.py +12 -0
- pymoo/termination/cv.py +48 -0
- pymoo/termination/default.py +45 -0
- pymoo/termination/delta.py +64 -0
- pymoo/termination/fmin.py +16 -0
- pymoo/termination/ftol.py +144 -0
- pymoo/termination/indicator.py +49 -0
- pymoo/termination/max_eval.py +14 -0
- pymoo/termination/max_gen.py +15 -0
- pymoo/termination/max_time.py +20 -0
- pymoo/termination/robust.py +34 -0
- pymoo/termination/xtol.py +33 -0
- pymoo/util/__init__.py +0 -0
- pymoo/util/archive.py +150 -0
- pymoo/util/cache.py +29 -0
- pymoo/util/clearing.py +82 -0
- pymoo/util/display/__init__.py +0 -0
- pymoo/util/display/column.py +52 -0
- pymoo/util/display/display.py +34 -0
- pymoo/util/display/multi.py +96 -0
- pymoo/util/display/output.py +53 -0
- pymoo/util/display/progress.py +54 -0
- pymoo/util/display/single.py +67 -0
- pymoo/util/dominator.py +67 -0
- pymoo/util/function_loader.py +129 -0
- pymoo/util/hv.py +23 -0
- pymoo/util/matlab_engine.py +39 -0
- pymoo/util/misc.py +460 -0
- pymoo/util/mnn.py +70 -0
- pymoo/util/nds/__init__.py +0 -0
- pymoo/util/nds/dominance_degree_non_dominated_sort.py +159 -0
- pymoo/util/nds/efficient_non_dominated_sort.py +152 -0
- pymoo/util/nds/fast_non_dominated_sort.py +70 -0
- pymoo/util/nds/naive_non_dominated_sort.py +36 -0
- pymoo/util/nds/non_dominated_sorting.py +67 -0
- pymoo/util/nds/tree_based_non_dominated_sort.py +133 -0
- pymoo/util/normalization.py +312 -0
- pymoo/util/optimum.py +42 -0
- pymoo/util/plotting.py +177 -0
- pymoo/util/pruning_cd.py +89 -0
- pymoo/util/randomized_argsort.py +60 -0
- pymoo/util/ref_dirs/__init__.py +24 -0
- pymoo/util/ref_dirs/construction.py +88 -0
- pymoo/util/ref_dirs/das_dennis.py +52 -0
- pymoo/util/ref_dirs/energy.py +319 -0
- pymoo/util/ref_dirs/energy_layer.py +119 -0
- pymoo/util/ref_dirs/genetic_algorithm.py +63 -0
- pymoo/util/ref_dirs/incremental.py +68 -0
- pymoo/util/ref_dirs/misc.py +128 -0
- pymoo/util/ref_dirs/optimizer.py +59 -0
- pymoo/util/ref_dirs/performance.py +162 -0
- pymoo/util/ref_dirs/reduction.py +85 -0
- pymoo/util/ref_dirs/sample_and_map.py +24 -0
- pymoo/util/reference_direction.py +260 -0
- pymoo/util/remote.py +55 -0
- pymoo/util/roulette.py +27 -0
- pymoo/util/running_metric.py +128 -0
- pymoo/util/sliding_window.py +25 -0
- pymoo/util/stochastic_ranking.py +32 -0
- pymoo/util/value_functions.py +719 -0
- pymoo/util/vectors.py +40 -0
- pymoo/util/vf_dominator.py +99 -0
- pymoo/vendor/__init__.py +0 -0
- pymoo/vendor/cec2018.py +398 -0
- pymoo/vendor/gta.py +617 -0
- pymoo/vendor/hv.py +267 -0
- pymoo/vendor/vendor_cmaes.py +412 -0
- pymoo/vendor/vendor_coco.py +81 -0
- pymoo/vendor/vendor_scipy.py +232 -0
- pymoo/version.py +1 -0
- pymoo/visualization/__init__.py +8 -0
- pymoo/visualization/fitness_landscape.py +127 -0
- pymoo/visualization/heatmap.py +123 -0
- pymoo/visualization/pcp.py +120 -0
- pymoo/visualization/petal.py +91 -0
- pymoo/visualization/radar.py +108 -0
- pymoo/visualization/radviz.py +68 -0
- pymoo/visualization/scatter.py +150 -0
- pymoo/visualization/star_coordinate.py +75 -0
- pymoo/visualization/util.py +123 -0
- pymoo/visualization/video/__init__.py +0 -0
- pymoo/visualization/video/callback_video.py +82 -0
- pymoo/visualization/video/one_var_one_obj.py +57 -0
- pymoo/visualization/video/two_var_one_obj.py +62 -0
- pymoo-0.6.1.5.dev0.dist-info/METADATA +187 -0
- pymoo-0.6.1.5.dev0.dist-info/RECORD +328 -0
- pymoo-0.6.1.5.dev0.dist-info/WHEEL +6 -0
- pymoo-0.6.1.5.dev0.dist-info/licenses/LICENSE +191 -0
- pymoo-0.6.1.5.dev0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from pymoo.core.sampling import Sampling
|
|
4
|
+
from pymoo.util.misc import cdist
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def criterion_maxmin(X):
|
|
8
|
+
D = cdist(X, X)
|
|
9
|
+
np.fill_diagonal(D, np.inf)
|
|
10
|
+
return np.min(D)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def criterion_corr(X):
|
|
14
|
+
M = np.corrcoef(X.T, rowvar=True)
|
|
15
|
+
return -np.sum(np.tril(M, -1) ** 2)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def sampling_lhs(n_samples, n_var, xl=0, xu=1, smooth=True, criterion=criterion_maxmin, n_iter=50):
|
|
19
|
+
|
|
20
|
+
X = sampling_lhs_unit(n_samples, n_var, smooth=smooth)
|
|
21
|
+
|
|
22
|
+
# if a criterion is selected to further improve the sampling
|
|
23
|
+
if criterion is not None:
|
|
24
|
+
|
|
25
|
+
# current best score is stored here
|
|
26
|
+
score = criterion(X)
|
|
27
|
+
|
|
28
|
+
for j in range(1, n_iter):
|
|
29
|
+
|
|
30
|
+
# create new random sample and check the score again
|
|
31
|
+
_X = sampling_lhs_unit(n_samples, n_var, smooth=smooth)
|
|
32
|
+
_score = criterion(_X)
|
|
33
|
+
|
|
34
|
+
if _score > score:
|
|
35
|
+
X, score = _X, _score
|
|
36
|
+
|
|
37
|
+
return xl + X * (xu - xl)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def sampling_lhs_unit(n_samples, n_var, smooth=True):
|
|
41
|
+
X = np.random.random(size=(n_samples, n_var))
|
|
42
|
+
Xp = X.argsort(axis=0) + 1
|
|
43
|
+
|
|
44
|
+
if smooth:
|
|
45
|
+
Xp = Xp - np.random.random(Xp.shape)
|
|
46
|
+
else:
|
|
47
|
+
Xp = Xp - 0.5
|
|
48
|
+
Xp /= n_samples
|
|
49
|
+
return Xp
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class LatinHypercubeSampling(Sampling):
|
|
53
|
+
|
|
54
|
+
def __init__(self,
|
|
55
|
+
smooth=True,
|
|
56
|
+
iterations=20,
|
|
57
|
+
criterion=criterion_maxmin) -> None:
|
|
58
|
+
super().__init__()
|
|
59
|
+
self.smooth = smooth
|
|
60
|
+
self.iterations = iterations
|
|
61
|
+
self.criterion = criterion
|
|
62
|
+
|
|
63
|
+
def _do(self, problem, n_samples, **kwargs):
|
|
64
|
+
xl, xu = problem.bounds()
|
|
65
|
+
|
|
66
|
+
X = sampling_lhs(n_samples, problem.n_var, xl=xl, xu=xu, smooth=self.smooth,
|
|
67
|
+
criterion=self.criterion, n_iter=self.iterations)
|
|
68
|
+
|
|
69
|
+
return X
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class LHS(LatinHypercubeSampling):
|
|
73
|
+
pass
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from pymoo.core.sampling import Sampling
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def random(problem, n_samples=1):
|
|
7
|
+
X = np.random.random((n_samples, problem.n_var))
|
|
8
|
+
|
|
9
|
+
if problem.has_bounds():
|
|
10
|
+
xl, xu = problem.bounds()
|
|
11
|
+
assert np.all(xu >= xl)
|
|
12
|
+
X = xl + (xu - xl) * X
|
|
13
|
+
|
|
14
|
+
return X
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class FloatRandomSampling(Sampling):
|
|
18
|
+
|
|
19
|
+
def _do(self, problem, n_samples, **kwargs):
|
|
20
|
+
X = np.random.random((n_samples, problem.n_var))
|
|
21
|
+
|
|
22
|
+
if problem.has_bounds():
|
|
23
|
+
xl, xu = problem.bounds()
|
|
24
|
+
assert np.all(xu >= xl)
|
|
25
|
+
X = xl + (xu - xl) * X
|
|
26
|
+
|
|
27
|
+
return X
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class BinaryRandomSampling(Sampling):
|
|
31
|
+
|
|
32
|
+
def _do(self, problem, n_samples, **kwargs):
|
|
33
|
+
val = np.random.random((n_samples, problem.n_var))
|
|
34
|
+
return (val < 0.5).astype(bool)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class IntegerRandomSampling(FloatRandomSampling):
|
|
38
|
+
|
|
39
|
+
def _do(self, problem, n_samples, **kwargs):
|
|
40
|
+
n, (xl, xu) = problem.n_var, problem.bounds()
|
|
41
|
+
return np.column_stack([np.random.randint(xl[k], xu[k] + 1, size=n_samples) for k in range(n)])
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class PermutationRandomSampling(Sampling):
|
|
45
|
+
|
|
46
|
+
def _do(self, problem, n_samples, **kwargs):
|
|
47
|
+
X = np.full((n_samples, problem.n_var), 0, dtype=int)
|
|
48
|
+
for i in range(n_samples):
|
|
49
|
+
X[i, :] = np.random.permutation(problem.n_var)
|
|
50
|
+
return X
|
|
File without changes
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import math
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from pymoo.core.selection import Selection
|
|
6
|
+
from pymoo.util.misc import random_permutations
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class RandomSelection(Selection):
|
|
10
|
+
|
|
11
|
+
def _do(self, _, pop, n_select, n_parents, **kwargs):
|
|
12
|
+
# number of random individuals needed
|
|
13
|
+
n_random = n_select * n_parents
|
|
14
|
+
|
|
15
|
+
# number of permutations needed
|
|
16
|
+
n_perms = math.ceil(n_random / len(pop))
|
|
17
|
+
|
|
18
|
+
# get random permutations and reshape them
|
|
19
|
+
P = random_permutations(n_perms, len(pop))[:n_random]
|
|
20
|
+
|
|
21
|
+
return np.reshape(P, (n_select, n_parents))
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def fast_fill_random(X, N, columns=None, Xp=None, n_max_attempts=10):
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
Parameters
|
|
28
|
+
----------
|
|
29
|
+
X : np.ndarray
|
|
30
|
+
The actually array to fill with random values.
|
|
31
|
+
N : int
|
|
32
|
+
The upper limit for the values. The values will be in range (0, ..., N)
|
|
33
|
+
columns : list
|
|
34
|
+
The columns which should be filled randomly. Other columns indicate duplicates
|
|
35
|
+
Xp : np.ndarray
|
|
36
|
+
If some other duplicates shall be avoided by default
|
|
37
|
+
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
_, n_cols = X.shape
|
|
41
|
+
|
|
42
|
+
if columns is None:
|
|
43
|
+
columns = range(n_cols)
|
|
44
|
+
|
|
45
|
+
# all columns set so far to be checked for duplicates
|
|
46
|
+
J = []
|
|
47
|
+
|
|
48
|
+
# for each of the columns which should be set to be no duplicates
|
|
49
|
+
for col in columns:
|
|
50
|
+
|
|
51
|
+
D = X[:, J]
|
|
52
|
+
if Xp is not None:
|
|
53
|
+
D = np.column_stack([D, Xp])
|
|
54
|
+
|
|
55
|
+
# all the remaining indices that need to be filled with no duplicates
|
|
56
|
+
rem = np.arange(len(X))
|
|
57
|
+
|
|
58
|
+
for _ in range(n_max_attempts):
|
|
59
|
+
|
|
60
|
+
if len(rem) > N:
|
|
61
|
+
X[rem, col] = np.random.choice(N, replace=True, size=len(rem))
|
|
62
|
+
else:
|
|
63
|
+
X[rem, col] = np.random.permutation(N)[:len(rem)]
|
|
64
|
+
|
|
65
|
+
rem = np.where((X[rem, col][:, None] == D[rem]).any(axis=1))[0]
|
|
66
|
+
|
|
67
|
+
if len(rem) == 0:
|
|
68
|
+
break
|
|
69
|
+
|
|
70
|
+
J.append(col)
|
|
71
|
+
|
|
72
|
+
return X
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
import math
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from pymoo.core.selection import Selection
|
|
6
|
+
from pymoo.util.misc import random_permutations
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class TournamentSelection(Selection):
|
|
10
|
+
"""
|
|
11
|
+
The Tournament selection is used to simulate a tournament between individuals. The pressure balances
|
|
12
|
+
greedy the genetic algorithm will be.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(self, func_comp=None, pressure=2, **kwargs):
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
Parameters
|
|
19
|
+
----------
|
|
20
|
+
func_comp: func
|
|
21
|
+
The function to compare two individuals. It has the shape: comp(pop, indices) and returns the winner.
|
|
22
|
+
If the function is None it is assumed the population is sorted by a criterion and only indices are compared.
|
|
23
|
+
|
|
24
|
+
pressure: int
|
|
25
|
+
The selection pressure to be applied. Default it is a binary tournament.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
super().__init__(**kwargs)
|
|
29
|
+
|
|
30
|
+
# selection pressure to be applied
|
|
31
|
+
self.pressure = pressure
|
|
32
|
+
|
|
33
|
+
self.func_comp = func_comp
|
|
34
|
+
if self.func_comp is None:
|
|
35
|
+
raise Exception("Please provide the comparing function for the tournament selection!")
|
|
36
|
+
|
|
37
|
+
def _do(self, _, pop, n_select, n_parents=1, **kwargs):
|
|
38
|
+
# number of random individuals needed
|
|
39
|
+
n_random = n_select * n_parents * self.pressure
|
|
40
|
+
|
|
41
|
+
# number of permutations needed
|
|
42
|
+
n_perms = math.ceil(n_random / len(pop))
|
|
43
|
+
|
|
44
|
+
# get random permutations and reshape them
|
|
45
|
+
P = random_permutations(n_perms, len(pop))[:n_random]
|
|
46
|
+
P = np.reshape(P, (n_select * n_parents, self.pressure))
|
|
47
|
+
|
|
48
|
+
# compare using tournament function
|
|
49
|
+
S = self.func_comp(pop, P, **kwargs)
|
|
50
|
+
|
|
51
|
+
return np.reshape(S, (n_select, n_parents))
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def compare(a, a_val, b, b_val, method, return_random_if_equal=False):
|
|
55
|
+
if method == 'larger_is_better':
|
|
56
|
+
if a_val > b_val:
|
|
57
|
+
return a
|
|
58
|
+
elif a_val < b_val:
|
|
59
|
+
return b
|
|
60
|
+
else:
|
|
61
|
+
if return_random_if_equal:
|
|
62
|
+
return np.random.choice([a, b])
|
|
63
|
+
else:
|
|
64
|
+
return None
|
|
65
|
+
elif method == 'smaller_is_better':
|
|
66
|
+
if a_val < b_val:
|
|
67
|
+
return a
|
|
68
|
+
elif a_val > b_val:
|
|
69
|
+
return b
|
|
70
|
+
else:
|
|
71
|
+
if return_random_if_equal:
|
|
72
|
+
return np.random.choice([a, b])
|
|
73
|
+
else:
|
|
74
|
+
return None
|
|
75
|
+
else:
|
|
76
|
+
raise Exception("Unknown method.")
|
|
File without changes
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from pymoo.operators.survival.rank_and_crowding.classes import RankAndCrowding, ConstrRankAndCrowding
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from pymoo.util.randomized_argsort import randomized_argsort
|
|
3
|
+
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
|
|
4
|
+
from pymoo.core.survival import Survival, split_by_feasibility
|
|
5
|
+
from pymoo.core.population import Population
|
|
6
|
+
from pymoo.operators.survival.rank_and_crowding.metrics import get_crowding_function
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class RankAndCrowding(Survival):
|
|
10
|
+
|
|
11
|
+
def __init__(self, nds=None, crowding_func="cd"):
|
|
12
|
+
"""
|
|
13
|
+
A generalization of the NSGA-II survival operator that ranks individuals by dominance criteria
|
|
14
|
+
and sorts the last front by some user-specified crowding metric. The default is NSGA-II's crowding distances
|
|
15
|
+
although others might be more effective.
|
|
16
|
+
|
|
17
|
+
For many-objective problems, try using 'mnn' or '2nn'.
|
|
18
|
+
|
|
19
|
+
For Bi-objective problems, 'pcd' is very effective.
|
|
20
|
+
|
|
21
|
+
Parameters
|
|
22
|
+
----------
|
|
23
|
+
nds : str or None, optional
|
|
24
|
+
Pymoo type of non-dominated sorting. Defaults to None.
|
|
25
|
+
|
|
26
|
+
crowding_func : str or callable, optional
|
|
27
|
+
Crowding metric. Options are:
|
|
28
|
+
|
|
29
|
+
- 'cd': crowding distances
|
|
30
|
+
- 'pcd' or 'pruning-cd': improved pruning based on crowding distances
|
|
31
|
+
- 'ce': crowding entropy
|
|
32
|
+
- 'mnn': M-Neaest Neighbors
|
|
33
|
+
- '2nn': 2-Neaest Neighbors
|
|
34
|
+
|
|
35
|
+
If callable, it has the form ``fun(F, filter_out_duplicates=None, n_remove=None, **kwargs)``
|
|
36
|
+
in which F (n, m) and must return metrics in a (n,) array.
|
|
37
|
+
|
|
38
|
+
The options 'pcd', 'cd', and 'ce' are recommended for two-objective problems, whereas 'mnn' and '2nn' for many objective.
|
|
39
|
+
When using 'pcd', 'mnn', or '2nn', individuals are already eliminated in a 'single' manner.
|
|
40
|
+
Due to Cython implementation, they are as fast as the corresponding 'cd', 'mnn-fast', or '2nn-fast',
|
|
41
|
+
although they can singnificantly improve diversity of solutions.
|
|
42
|
+
Defaults to 'cd'.
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
crowding_func_ = get_crowding_function(crowding_func)
|
|
46
|
+
|
|
47
|
+
super().__init__(filter_infeasible=True)
|
|
48
|
+
self.nds = nds if nds is not None else NonDominatedSorting()
|
|
49
|
+
self.crowding_func = crowding_func_
|
|
50
|
+
|
|
51
|
+
def _do(self,
|
|
52
|
+
problem,
|
|
53
|
+
pop,
|
|
54
|
+
*args,
|
|
55
|
+
n_survive=None,
|
|
56
|
+
**kwargs):
|
|
57
|
+
|
|
58
|
+
# get the objective space values and objects
|
|
59
|
+
F = pop.get("F").astype(float, copy=False)
|
|
60
|
+
|
|
61
|
+
# the final indices of surviving individuals
|
|
62
|
+
survivors = []
|
|
63
|
+
|
|
64
|
+
# do the non-dominated sorting until splitting front
|
|
65
|
+
fronts = self.nds.do(F, n_stop_if_ranked=n_survive)
|
|
66
|
+
|
|
67
|
+
for k, front in enumerate(fronts):
|
|
68
|
+
|
|
69
|
+
I = np.arange(len(front))
|
|
70
|
+
|
|
71
|
+
# current front sorted by crowding distance if splitting
|
|
72
|
+
if len(survivors) + len(I) > n_survive:
|
|
73
|
+
|
|
74
|
+
# Define how many will be removed
|
|
75
|
+
n_remove = len(survivors) + len(front) - n_survive
|
|
76
|
+
|
|
77
|
+
# re-calculate the crowding distance of the front
|
|
78
|
+
crowding_of_front = \
|
|
79
|
+
self.crowding_func.do(
|
|
80
|
+
F[front, :],
|
|
81
|
+
n_remove=n_remove
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
I = randomized_argsort(crowding_of_front, order='descending', method='numpy')
|
|
85
|
+
I = I[:-n_remove]
|
|
86
|
+
|
|
87
|
+
# otherwise take the whole front unsorted
|
|
88
|
+
else:
|
|
89
|
+
# calculate the crowding distance of the front
|
|
90
|
+
crowding_of_front = \
|
|
91
|
+
self.crowding_func.do(
|
|
92
|
+
F[front, :],
|
|
93
|
+
n_remove=0
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# save rank and crowding in the individual class
|
|
97
|
+
for j, i in enumerate(front):
|
|
98
|
+
pop[i].set("rank", k)
|
|
99
|
+
pop[i].set("crowding", crowding_of_front[j])
|
|
100
|
+
|
|
101
|
+
# extend the survivors by all or selected individuals
|
|
102
|
+
survivors.extend(front[I])
|
|
103
|
+
|
|
104
|
+
return pop[survivors]
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class ConstrRankAndCrowding(Survival):
|
|
108
|
+
|
|
109
|
+
def __init__(self, nds=None, crowding_func="cd"):
|
|
110
|
+
"""
|
|
111
|
+
The Rank and Crowding survival approach for handling constraints proposed on
|
|
112
|
+
GDE3 by Kukkonen, S. & Lampinen, J. (2005).
|
|
113
|
+
|
|
114
|
+
Parameters
|
|
115
|
+
----------
|
|
116
|
+
nds : str or None, optional
|
|
117
|
+
Pymoo type of non-dominated sorting. Defaults to None.
|
|
118
|
+
|
|
119
|
+
crowding_func : str or callable, optional
|
|
120
|
+
Crowding metric. Options are:
|
|
121
|
+
|
|
122
|
+
- 'cd': crowding distances
|
|
123
|
+
- 'pcd' or 'pruning-cd': improved pruning based on crowding distances
|
|
124
|
+
- 'ce': crowding entropy
|
|
125
|
+
- 'mnn': M-Neaest Neighbors
|
|
126
|
+
- '2nn': 2-Neaest Neighbors
|
|
127
|
+
|
|
128
|
+
If callable, it has the form ``fun(F, filter_out_duplicates=None, n_remove=None, **kwargs)``
|
|
129
|
+
in which F (n, m) and must return metrics in a (n,) array.
|
|
130
|
+
|
|
131
|
+
The options 'pcd', 'cd', and 'ce' are recommended for two-objective problems, whereas 'mnn' and '2nn' for many objective.
|
|
132
|
+
When using 'pcd', 'mnn', or '2nn', individuals are already eliminated in a 'single' manner.
|
|
133
|
+
Due to Cython implementation, they are as fast as the corresponding 'cd', 'mnn-fast', or '2nn-fast',
|
|
134
|
+
although they can singnificantly improve diversity of solutions.
|
|
135
|
+
Defaults to 'cd'.
|
|
136
|
+
"""
|
|
137
|
+
|
|
138
|
+
super().__init__(filter_infeasible=False)
|
|
139
|
+
self.nds = nds if nds is not None else NonDominatedSorting()
|
|
140
|
+
self.ranking = RankAndCrowding(nds=nds, crowding_func=crowding_func)
|
|
141
|
+
|
|
142
|
+
def _do(self,
|
|
143
|
+
problem,
|
|
144
|
+
pop,
|
|
145
|
+
*args,
|
|
146
|
+
n_survive=None,
|
|
147
|
+
**kwargs):
|
|
148
|
+
|
|
149
|
+
if n_survive is None:
|
|
150
|
+
n_survive = len(pop)
|
|
151
|
+
|
|
152
|
+
n_survive = min(n_survive, len(pop))
|
|
153
|
+
|
|
154
|
+
# If the split should be done beforehand
|
|
155
|
+
if problem.n_constr > 0:
|
|
156
|
+
|
|
157
|
+
# Split by feasibility
|
|
158
|
+
feas, infeas = split_by_feasibility(pop, sort_infeas_by_cv=True, sort_feas_by_obj=False, return_pop=False)
|
|
159
|
+
|
|
160
|
+
# Obtain len of feasible
|
|
161
|
+
n_feas = len(feas)
|
|
162
|
+
|
|
163
|
+
# Assure there is at least_one survivor
|
|
164
|
+
if n_feas == 0:
|
|
165
|
+
survivors = Population()
|
|
166
|
+
else:
|
|
167
|
+
survivors = self.ranking.do(problem, pop[feas], *args, n_survive=min(len(feas), n_survive), **kwargs)
|
|
168
|
+
|
|
169
|
+
# Calculate how many individuals are still remaining to be filled up with infeasible ones
|
|
170
|
+
n_remaining = n_survive - len(survivors)
|
|
171
|
+
|
|
172
|
+
# If infeasible solutions need to be added
|
|
173
|
+
if n_remaining > 0:
|
|
174
|
+
|
|
175
|
+
# Constraints to new ranking
|
|
176
|
+
G = pop[infeas].get("G")
|
|
177
|
+
G = np.maximum(G, 0)
|
|
178
|
+
H = pop[infeas].get("H")
|
|
179
|
+
H = np.absolute(H)
|
|
180
|
+
C = np.column_stack((G, H))
|
|
181
|
+
|
|
182
|
+
# Fronts in infeasible population
|
|
183
|
+
infeas_fronts = self.nds.do(C, n_stop_if_ranked=n_remaining)
|
|
184
|
+
|
|
185
|
+
# Iterate over fronts
|
|
186
|
+
for k, front in enumerate(infeas_fronts):
|
|
187
|
+
|
|
188
|
+
# Save ranks
|
|
189
|
+
pop[infeas][front].set("cv_rank", k)
|
|
190
|
+
|
|
191
|
+
# Current front sorted by CV
|
|
192
|
+
if len(survivors) + len(front) > n_survive:
|
|
193
|
+
|
|
194
|
+
# Obtain CV of front
|
|
195
|
+
CV = pop[infeas][front].get("CV").flatten()
|
|
196
|
+
I = randomized_argsort(CV, order='ascending', method='numpy')
|
|
197
|
+
I = I[:(n_survive - len(survivors))]
|
|
198
|
+
|
|
199
|
+
# Otherwise take the whole front unsorted
|
|
200
|
+
else:
|
|
201
|
+
I = np.arange(len(front))
|
|
202
|
+
|
|
203
|
+
# extend the survivors by all or selected individuals
|
|
204
|
+
survivors = Population.merge(survivors, pop[infeas][front[I]])
|
|
205
|
+
|
|
206
|
+
else:
|
|
207
|
+
survivors = self.ranking.do(problem, pop, *args, n_survive=n_survive, **kwargs)
|
|
208
|
+
|
|
209
|
+
return survivors
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy.spatial.distance import pdist, squareform
|
|
3
|
+
from pymoo.util.misc import find_duplicates
|
|
4
|
+
from pymoo.util.function_loader import load_function
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def get_crowding_function(label):
|
|
8
|
+
|
|
9
|
+
if label == "cd":
|
|
10
|
+
fun = FunctionalDiversity(calc_crowding_distance, filter_out_duplicates=False)
|
|
11
|
+
elif (label == "pcd") or (label == "pruning-cd"):
|
|
12
|
+
fun = FunctionalDiversity(load_function("calc_pcd"), filter_out_duplicates=True)
|
|
13
|
+
elif label == "ce":
|
|
14
|
+
fun = FunctionalDiversity(calc_crowding_entropy, filter_out_duplicates=True)
|
|
15
|
+
elif label == "mnn":
|
|
16
|
+
fun = FuncionalDiversityMNN(load_function("calc_mnn"), filter_out_duplicates=True)
|
|
17
|
+
elif label == "2nn":
|
|
18
|
+
fun = FuncionalDiversityMNN(load_function("calc_2nn"), filter_out_duplicates=True)
|
|
19
|
+
elif hasattr(label, "__call__"):
|
|
20
|
+
fun = FunctionalDiversity(label, filter_out_duplicates=True)
|
|
21
|
+
elif isinstance(label, CrowdingDiversity):
|
|
22
|
+
fun = label
|
|
23
|
+
else:
|
|
24
|
+
raise KeyError("Crowding function not defined")
|
|
25
|
+
return fun
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class CrowdingDiversity:
|
|
29
|
+
|
|
30
|
+
def do(self, F, n_remove=0):
|
|
31
|
+
# Converting types Python int to Cython int would fail in some cases converting to long instead
|
|
32
|
+
n_remove = np.intc(n_remove)
|
|
33
|
+
F = np.array(F, dtype=np.double)
|
|
34
|
+
return self._do(F, n_remove=n_remove)
|
|
35
|
+
|
|
36
|
+
def _do(self, F, n_remove=None):
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class FunctionalDiversity(CrowdingDiversity):
|
|
41
|
+
|
|
42
|
+
def __init__(self, function=None, filter_out_duplicates=True):
|
|
43
|
+
self.function = function
|
|
44
|
+
self.filter_out_duplicates = filter_out_duplicates
|
|
45
|
+
super().__init__()
|
|
46
|
+
|
|
47
|
+
def _do(self, F, **kwargs):
|
|
48
|
+
|
|
49
|
+
n_points, n_obj = F.shape
|
|
50
|
+
|
|
51
|
+
if n_points <= 2:
|
|
52
|
+
return np.full(n_points, np.inf)
|
|
53
|
+
|
|
54
|
+
else:
|
|
55
|
+
|
|
56
|
+
if self.filter_out_duplicates:
|
|
57
|
+
# filter out solutions which are duplicates - duplicates get a zero finally
|
|
58
|
+
is_unique = np.where(np.logical_not(find_duplicates(F, epsilon=1e-32)))[0]
|
|
59
|
+
else:
|
|
60
|
+
# set every point to be unique without checking it
|
|
61
|
+
is_unique = np.arange(n_points)
|
|
62
|
+
|
|
63
|
+
# index the unique points of the array
|
|
64
|
+
_F = F[is_unique]
|
|
65
|
+
|
|
66
|
+
_d = self.function(_F, **kwargs)
|
|
67
|
+
|
|
68
|
+
d = np.zeros(n_points)
|
|
69
|
+
d[is_unique] = _d
|
|
70
|
+
|
|
71
|
+
return d
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class FuncionalDiversityMNN(FunctionalDiversity):
|
|
75
|
+
|
|
76
|
+
def _do(self, F, **kwargs):
|
|
77
|
+
|
|
78
|
+
n_points, n_obj = F.shape
|
|
79
|
+
|
|
80
|
+
if n_points <= n_obj:
|
|
81
|
+
return np.full(n_points, np.inf)
|
|
82
|
+
|
|
83
|
+
else:
|
|
84
|
+
return super()._do(F, **kwargs)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def calc_crowding_distance(F, **kwargs):
|
|
88
|
+
n_points, n_obj = F.shape
|
|
89
|
+
|
|
90
|
+
# sort each column and get index
|
|
91
|
+
I = np.argsort(F, axis=0, kind='mergesort')
|
|
92
|
+
|
|
93
|
+
# sort the objective space values for the whole matrix
|
|
94
|
+
F = F[I, np.arange(n_obj)]
|
|
95
|
+
|
|
96
|
+
# calculate the distance from each point to the last and next
|
|
97
|
+
dist = np.row_stack([F, np.full(n_obj, np.inf)]) - np.row_stack([np.full(n_obj, -np.inf), F])
|
|
98
|
+
|
|
99
|
+
# calculate the norm for each objective - set to NaN if all values are equal
|
|
100
|
+
norm = np.max(F, axis=0) - np.min(F, axis=0)
|
|
101
|
+
norm[norm == 0] = np.nan
|
|
102
|
+
|
|
103
|
+
# prepare the distance to last and next vectors
|
|
104
|
+
dist_to_last, dist_to_next = dist, np.copy(dist)
|
|
105
|
+
dist_to_last, dist_to_next = dist_to_last[:-1] / norm, dist_to_next[1:] / norm
|
|
106
|
+
|
|
107
|
+
# if we divide by zero because all values in one columns are equal replace by none
|
|
108
|
+
dist_to_last[np.isnan(dist_to_last)] = 0.0
|
|
109
|
+
dist_to_next[np.isnan(dist_to_next)] = 0.0
|
|
110
|
+
|
|
111
|
+
# sum up the distance to next and last and norm by objectives - also reorder from sorted list
|
|
112
|
+
J = np.argsort(I, axis=0)
|
|
113
|
+
cd = np.sum(dist_to_last[J, np.arange(n_obj)] + dist_to_next[J, np.arange(n_obj)], axis=1) / n_obj
|
|
114
|
+
|
|
115
|
+
return cd
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def calc_crowding_entropy(F, **kwargs):
|
|
119
|
+
"""Wang, Y.-N., Wu, L.-H. & Yuan, X.-F., 2010. Multi-objective self-adaptive differential
|
|
120
|
+
evolution with elitist archive and crowding entropy-based diversity measure.
|
|
121
|
+
Soft Comput., 14(3), pp. 193-209.
|
|
122
|
+
|
|
123
|
+
Parameters
|
|
124
|
+
----------
|
|
125
|
+
F : 2d array like
|
|
126
|
+
Objective functions.
|
|
127
|
+
|
|
128
|
+
Returns
|
|
129
|
+
-------
|
|
130
|
+
ce : 1d array
|
|
131
|
+
Crowding Entropies
|
|
132
|
+
"""
|
|
133
|
+
n_points, n_obj = F.shape
|
|
134
|
+
|
|
135
|
+
# sort each column and get index
|
|
136
|
+
I = np.argsort(F, axis=0, kind='mergesort')
|
|
137
|
+
|
|
138
|
+
# sort the objective space values for the whole matrix
|
|
139
|
+
F = F[I, np.arange(n_obj)]
|
|
140
|
+
|
|
141
|
+
# calculate the distance from each point to the last and next
|
|
142
|
+
dist = np.row_stack([F, np.full(n_obj, np.inf)]) - np.row_stack([np.full(n_obj, -np.inf), F])
|
|
143
|
+
|
|
144
|
+
# calculate the norm for each objective - set to NaN if all values are equal
|
|
145
|
+
norm = np.max(F, axis=0) - np.min(F, axis=0)
|
|
146
|
+
norm[norm == 0] = np.nan
|
|
147
|
+
|
|
148
|
+
# prepare the distance to last and next vectors
|
|
149
|
+
dl = dist.copy()[:-1]
|
|
150
|
+
du = dist.copy()[1:]
|
|
151
|
+
|
|
152
|
+
# Fix nan
|
|
153
|
+
dl[np.isnan(dl)] = 0.0
|
|
154
|
+
du[np.isnan(du)] = 0.0
|
|
155
|
+
|
|
156
|
+
# Total distance
|
|
157
|
+
cd = dl + du
|
|
158
|
+
|
|
159
|
+
# Get relative positions
|
|
160
|
+
pl = (dl[1:-1] / cd[1:-1])
|
|
161
|
+
pu = (du[1:-1] / cd[1:-1])
|
|
162
|
+
|
|
163
|
+
# Entropy
|
|
164
|
+
entropy = np.row_stack([np.full(n_obj, np.inf),
|
|
165
|
+
-(pl * np.log2(pl) + pu * np.log2(pu)),
|
|
166
|
+
np.full(n_obj, np.inf)])
|
|
167
|
+
|
|
168
|
+
# Crowding entropy
|
|
169
|
+
J = np.argsort(I, axis=0)
|
|
170
|
+
_cej = cd[J, np.arange(n_obj)] * entropy[J, np.arange(n_obj)] / norm
|
|
171
|
+
_cej[np.isnan(_cej)] = 0.0
|
|
172
|
+
ce = _cej.sum(axis=1)
|
|
173
|
+
|
|
174
|
+
return ce
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def calc_mnn_fast(F, **kwargs):
|
|
178
|
+
return _calc_mnn_fast(F, F.shape[1], **kwargs)
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def calc_2nn_fast(F, **kwargs):
|
|
182
|
+
return _calc_mnn_fast(F, 2, **kwargs)
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def _calc_mnn_fast(F, n_neighbors, **kwargs):
|
|
186
|
+
|
|
187
|
+
# calculate the norm for each objective - set to NaN if all values are equal
|
|
188
|
+
norm = np.max(F, axis=0) - np.min(F, axis=0)
|
|
189
|
+
norm[norm == 0] = 1.0
|
|
190
|
+
|
|
191
|
+
# F normalized
|
|
192
|
+
F = (F - F.min(axis=0)) / norm
|
|
193
|
+
|
|
194
|
+
# Distances pairwise (Inefficient)
|
|
195
|
+
D = squareform(pdist(F, metric="sqeuclidean"))
|
|
196
|
+
|
|
197
|
+
# M neighbors
|
|
198
|
+
M = F.shape[1]
|
|
199
|
+
_D = np.partition(D, range(1, M+1), axis=1)[:, 1:M+1]
|
|
200
|
+
|
|
201
|
+
# Metric d
|
|
202
|
+
d = np.prod(_D, axis=1)
|
|
203
|
+
|
|
204
|
+
# Set top performers as np.inf
|
|
205
|
+
_extremes = np.concatenate((np.argmin(F, axis=0), np.argmax(F, axis=0)))
|
|
206
|
+
d[_extremes] = np.inf
|
|
207
|
+
|
|
208
|
+
return d
|