pymoo 0.6.1.6__cp312-cp312-macosx_10_13_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pymoo/__init__.py +3 -0
- pymoo/algorithms/__init__.py +0 -0
- pymoo/algorithms/base/__init__.py +0 -0
- pymoo/algorithms/base/bracket.py +38 -0
- pymoo/algorithms/base/genetic.py +110 -0
- pymoo/algorithms/base/line.py +62 -0
- pymoo/algorithms/base/local.py +39 -0
- pymoo/algorithms/base/meta.py +79 -0
- pymoo/algorithms/hyperparameters.py +91 -0
- pymoo/algorithms/moo/__init__.py +0 -0
- pymoo/algorithms/moo/age.py +310 -0
- pymoo/algorithms/moo/age2.py +194 -0
- pymoo/algorithms/moo/cmopso.py +239 -0
- pymoo/algorithms/moo/ctaea.py +305 -0
- pymoo/algorithms/moo/dnsga2.py +80 -0
- pymoo/algorithms/moo/kgb.py +450 -0
- pymoo/algorithms/moo/moead.py +183 -0
- pymoo/algorithms/moo/mopso_cd.py +309 -0
- pymoo/algorithms/moo/nsga2.py +113 -0
- pymoo/algorithms/moo/nsga3.py +361 -0
- pymoo/algorithms/moo/pinsga2.py +370 -0
- pymoo/algorithms/moo/rnsga2.py +188 -0
- pymoo/algorithms/moo/rnsga3.py +246 -0
- pymoo/algorithms/moo/rvea.py +214 -0
- pymoo/algorithms/moo/sms.py +196 -0
- pymoo/algorithms/moo/spea2.py +191 -0
- pymoo/algorithms/moo/unsga3.py +49 -0
- pymoo/algorithms/soo/__init__.py +0 -0
- pymoo/algorithms/soo/convex/__init__.py +0 -0
- pymoo/algorithms/soo/nonconvex/__init__.py +0 -0
- pymoo/algorithms/soo/nonconvex/brkga.py +162 -0
- pymoo/algorithms/soo/nonconvex/cmaes.py +556 -0
- pymoo/algorithms/soo/nonconvex/de.py +283 -0
- pymoo/algorithms/soo/nonconvex/direct.py +148 -0
- pymoo/algorithms/soo/nonconvex/es.py +213 -0
- pymoo/algorithms/soo/nonconvex/g3pcx.py +94 -0
- pymoo/algorithms/soo/nonconvex/ga.py +95 -0
- pymoo/algorithms/soo/nonconvex/ga_niching.py +223 -0
- pymoo/algorithms/soo/nonconvex/isres.py +74 -0
- pymoo/algorithms/soo/nonconvex/nelder.py +251 -0
- pymoo/algorithms/soo/nonconvex/nrbo.py +191 -0
- pymoo/algorithms/soo/nonconvex/optuna.py +80 -0
- pymoo/algorithms/soo/nonconvex/pattern.py +185 -0
- pymoo/algorithms/soo/nonconvex/pso.py +337 -0
- pymoo/algorithms/soo/nonconvex/pso_ep.py +307 -0
- pymoo/algorithms/soo/nonconvex/random_search.py +25 -0
- pymoo/algorithms/soo/nonconvex/sres.py +56 -0
- pymoo/algorithms/soo/univariate/__init__.py +0 -0
- pymoo/algorithms/soo/univariate/exp.py +46 -0
- pymoo/algorithms/soo/univariate/golden.py +65 -0
- pymoo/algorithms/soo/univariate/quadr_interp.py +81 -0
- pymoo/algorithms/soo/univariate/wolfe.py +163 -0
- pymoo/config.py +33 -0
- pymoo/constraints/__init__.py +3 -0
- pymoo/constraints/adaptive.py +66 -0
- pymoo/constraints/as_obj.py +56 -0
- pymoo/constraints/as_penalty.py +41 -0
- pymoo/constraints/eps.py +34 -0
- pymoo/constraints/from_bounds.py +36 -0
- pymoo/core/__init__.py +0 -0
- pymoo/core/algorithm.py +408 -0
- pymoo/core/callback.py +38 -0
- pymoo/core/crossover.py +79 -0
- pymoo/core/decision_making.py +102 -0
- pymoo/core/decomposition.py +76 -0
- pymoo/core/duplicate.py +163 -0
- pymoo/core/evaluator.py +116 -0
- pymoo/core/indicator.py +34 -0
- pymoo/core/individual.py +784 -0
- pymoo/core/infill.py +65 -0
- pymoo/core/initialization.py +44 -0
- pymoo/core/mating.py +39 -0
- pymoo/core/meta.py +21 -0
- pymoo/core/mixed.py +164 -0
- pymoo/core/mutation.py +44 -0
- pymoo/core/operator.py +46 -0
- pymoo/core/parameters.py +134 -0
- pymoo/core/plot.py +208 -0
- pymoo/core/population.py +180 -0
- pymoo/core/problem.py +373 -0
- pymoo/core/recorder.py +99 -0
- pymoo/core/repair.py +23 -0
- pymoo/core/replacement.py +96 -0
- pymoo/core/result.py +52 -0
- pymoo/core/sampling.py +45 -0
- pymoo/core/selection.py +61 -0
- pymoo/core/solution.py +10 -0
- pymoo/core/survival.py +107 -0
- pymoo/core/termination.py +70 -0
- pymoo/core/variable.py +415 -0
- pymoo/decomposition/__init__.py +0 -0
- pymoo/decomposition/aasf.py +24 -0
- pymoo/decomposition/asf.py +10 -0
- pymoo/decomposition/pbi.py +13 -0
- pymoo/decomposition/perp_dist.py +13 -0
- pymoo/decomposition/tchebicheff.py +11 -0
- pymoo/decomposition/util.py +13 -0
- pymoo/decomposition/weighted_sum.py +8 -0
- pymoo/docs.py +187 -0
- pymoo/experimental/__init__.py +0 -0
- pymoo/experimental/algorithms/__init__.py +0 -0
- pymoo/experimental/algorithms/gde3.py +57 -0
- pymoo/functions/__init__.py +135 -0
- pymoo/functions/compiled/__init__.py +0 -0
- pymoo/functions/compiled/calc_perpendicular_distance.cpp +27464 -0
- pymoo/functions/compiled/calc_perpendicular_distance.cpython-312-darwin.so +0 -0
- pymoo/functions/compiled/decomposition.cpp +28853 -0
- pymoo/functions/compiled/decomposition.cpython-312-darwin.so +0 -0
- pymoo/functions/compiled/info.cpp +7058 -0
- pymoo/functions/compiled/info.cpython-312-darwin.so +0 -0
- pymoo/functions/compiled/mnn.cpp +30095 -0
- pymoo/functions/compiled/mnn.cpython-312-darwin.so +0 -0
- pymoo/functions/compiled/non_dominated_sorting.cpp +35692 -0
- pymoo/functions/compiled/non_dominated_sorting.cpython-312-darwin.so +0 -0
- pymoo/functions/compiled/pruning_cd.cpp +29248 -0
- pymoo/functions/compiled/pruning_cd.cpython-312-darwin.so +0 -0
- pymoo/functions/compiled/stochastic_ranking.cpp +28042 -0
- pymoo/functions/compiled/stochastic_ranking.cpython-312-darwin.so +0 -0
- pymoo/functions/standard/__init__.py +1 -0
- pymoo/functions/standard/calc_perpendicular_distance.py +20 -0
- pymoo/functions/standard/decomposition.py +18 -0
- pymoo/functions/standard/hv.py +5 -0
- pymoo/functions/standard/mnn.py +78 -0
- pymoo/functions/standard/non_dominated_sorting.py +474 -0
- pymoo/functions/standard/pruning_cd.py +93 -0
- pymoo/functions/standard/stochastic_ranking.py +42 -0
- pymoo/gradient/__init__.py +24 -0
- pymoo/gradient/automatic.py +85 -0
- pymoo/gradient/grad_autograd.py +105 -0
- pymoo/gradient/grad_complex.py +35 -0
- pymoo/gradient/grad_jax.py +51 -0
- pymoo/gradient/numpy.py +22 -0
- pymoo/gradient/toolbox/__init__.py +19 -0
- pymoo/indicators/__init__.py +0 -0
- pymoo/indicators/distance_indicator.py +55 -0
- pymoo/indicators/gd.py +7 -0
- pymoo/indicators/gd_plus.py +7 -0
- pymoo/indicators/hv/__init__.py +59 -0
- pymoo/indicators/hv/approximate.py +105 -0
- pymoo/indicators/hv/exact.py +68 -0
- pymoo/indicators/hv/exact_2d.py +102 -0
- pymoo/indicators/igd.py +7 -0
- pymoo/indicators/igd_plus.py +7 -0
- pymoo/indicators/kktpm.py +151 -0
- pymoo/indicators/migd.py +55 -0
- pymoo/indicators/rmetric.py +203 -0
- pymoo/indicators/spacing.py +52 -0
- pymoo/mcdm/__init__.py +0 -0
- pymoo/mcdm/compromise_programming.py +19 -0
- pymoo/mcdm/high_tradeoff.py +40 -0
- pymoo/mcdm/pseudo_weights.py +32 -0
- pymoo/operators/__init__.py +0 -0
- pymoo/operators/control.py +190 -0
- pymoo/operators/crossover/__init__.py +0 -0
- pymoo/operators/crossover/binx.py +47 -0
- pymoo/operators/crossover/dex.py +125 -0
- pymoo/operators/crossover/erx.py +164 -0
- pymoo/operators/crossover/expx.py +53 -0
- pymoo/operators/crossover/hux.py +37 -0
- pymoo/operators/crossover/nox.py +25 -0
- pymoo/operators/crossover/ox.py +88 -0
- pymoo/operators/crossover/pcx.py +84 -0
- pymoo/operators/crossover/pntx.py +49 -0
- pymoo/operators/crossover/sbx.py +137 -0
- pymoo/operators/crossover/spx.py +5 -0
- pymoo/operators/crossover/ux.py +20 -0
- pymoo/operators/mutation/__init__.py +0 -0
- pymoo/operators/mutation/bitflip.py +17 -0
- pymoo/operators/mutation/gauss.py +60 -0
- pymoo/operators/mutation/inversion.py +42 -0
- pymoo/operators/mutation/nom.py +7 -0
- pymoo/operators/mutation/pm.py +96 -0
- pymoo/operators/mutation/rm.py +23 -0
- pymoo/operators/repair/__init__.py +0 -0
- pymoo/operators/repair/bounce_back.py +32 -0
- pymoo/operators/repair/bounds_repair.py +97 -0
- pymoo/operators/repair/inverse_penalty.py +91 -0
- pymoo/operators/repair/rounding.py +18 -0
- pymoo/operators/repair/to_bound.py +31 -0
- pymoo/operators/repair/vtype.py +11 -0
- pymoo/operators/sampling/__init__.py +0 -0
- pymoo/operators/sampling/lhs.py +76 -0
- pymoo/operators/sampling/rnd.py +52 -0
- pymoo/operators/selection/__init__.py +0 -0
- pymoo/operators/selection/rnd.py +75 -0
- pymoo/operators/selection/tournament.py +78 -0
- pymoo/operators/survival/__init__.py +0 -0
- pymoo/operators/survival/rank_and_crowding/__init__.py +1 -0
- pymoo/operators/survival/rank_and_crowding/classes.py +212 -0
- pymoo/operators/survival/rank_and_crowding/metrics.py +208 -0
- pymoo/optimize.py +72 -0
- pymoo/parallelization/__init__.py +15 -0
- pymoo/parallelization/dask.py +25 -0
- pymoo/parallelization/joblib.py +28 -0
- pymoo/parallelization/ray.py +31 -0
- pymoo/parallelization/starmap.py +24 -0
- pymoo/problems/__init__.py +157 -0
- pymoo/problems/dyn.py +47 -0
- pymoo/problems/dynamic/__init__.py +0 -0
- pymoo/problems/dynamic/cec2015.py +108 -0
- pymoo/problems/dynamic/df.py +451 -0
- pymoo/problems/dynamic/misc.py +167 -0
- pymoo/problems/functional.py +48 -0
- pymoo/problems/many/__init__.py +5 -0
- pymoo/problems/many/cdtlz.py +159 -0
- pymoo/problems/many/dcdtlz.py +88 -0
- pymoo/problems/many/dtlz.py +264 -0
- pymoo/problems/many/wfg.py +553 -0
- pymoo/problems/multi/__init__.py +14 -0
- pymoo/problems/multi/bnh.py +34 -0
- pymoo/problems/multi/carside.py +48 -0
- pymoo/problems/multi/clutch.py +104 -0
- pymoo/problems/multi/csi.py +55 -0
- pymoo/problems/multi/ctp.py +198 -0
- pymoo/problems/multi/dascmop.py +213 -0
- pymoo/problems/multi/kursawe.py +25 -0
- pymoo/problems/multi/modact.py +68 -0
- pymoo/problems/multi/mw.py +400 -0
- pymoo/problems/multi/omnitest.py +48 -0
- pymoo/problems/multi/osy.py +32 -0
- pymoo/problems/multi/srn.py +28 -0
- pymoo/problems/multi/sympart.py +94 -0
- pymoo/problems/multi/tnk.py +24 -0
- pymoo/problems/multi/truss2d.py +83 -0
- pymoo/problems/multi/welded_beam.py +41 -0
- pymoo/problems/multi/wrm.py +36 -0
- pymoo/problems/multi/zdt.py +151 -0
- pymoo/problems/multi_to_single.py +22 -0
- pymoo/problems/single/__init__.py +12 -0
- pymoo/problems/single/ackley.py +24 -0
- pymoo/problems/single/cantilevered_beam.py +34 -0
- pymoo/problems/single/flowshop_scheduling.py +113 -0
- pymoo/problems/single/g.py +874 -0
- pymoo/problems/single/griewank.py +18 -0
- pymoo/problems/single/himmelblau.py +15 -0
- pymoo/problems/single/knapsack.py +49 -0
- pymoo/problems/single/mopta08.py +26 -0
- pymoo/problems/single/multimodal.py +20 -0
- pymoo/problems/single/pressure_vessel.py +30 -0
- pymoo/problems/single/rastrigin.py +20 -0
- pymoo/problems/single/rosenbrock.py +22 -0
- pymoo/problems/single/schwefel.py +18 -0
- pymoo/problems/single/simple.py +13 -0
- pymoo/problems/single/sphere.py +19 -0
- pymoo/problems/single/traveling_salesman.py +79 -0
- pymoo/problems/single/zakharov.py +19 -0
- pymoo/problems/static.py +14 -0
- pymoo/problems/util.py +42 -0
- pymoo/problems/zero_to_one.py +27 -0
- pymoo/termination/__init__.py +23 -0
- pymoo/termination/collection.py +12 -0
- pymoo/termination/cv.py +48 -0
- pymoo/termination/default.py +45 -0
- pymoo/termination/delta.py +64 -0
- pymoo/termination/fmin.py +16 -0
- pymoo/termination/ftol.py +144 -0
- pymoo/termination/indicator.py +49 -0
- pymoo/termination/max_eval.py +14 -0
- pymoo/termination/max_gen.py +15 -0
- pymoo/termination/max_time.py +20 -0
- pymoo/termination/robust.py +34 -0
- pymoo/termination/xtol.py +33 -0
- pymoo/util/__init__.py +33 -0
- pymoo/util/archive.py +152 -0
- pymoo/util/cache.py +29 -0
- pymoo/util/clearing.py +82 -0
- pymoo/util/display/__init__.py +0 -0
- pymoo/util/display/column.py +52 -0
- pymoo/util/display/display.py +34 -0
- pymoo/util/display/multi.py +100 -0
- pymoo/util/display/output.py +53 -0
- pymoo/util/display/progress.py +54 -0
- pymoo/util/display/single.py +67 -0
- pymoo/util/dominator.py +67 -0
- pymoo/util/hv.py +21 -0
- pymoo/util/matlab_engine.py +39 -0
- pymoo/util/misc.py +447 -0
- pymoo/util/nds/__init__.py +0 -0
- pymoo/util/nds/dominance_degree_non_dominated_sort.py +159 -0
- pymoo/util/nds/efficient_non_dominated_sort.py +152 -0
- pymoo/util/nds/fast_non_dominated_sort.py +70 -0
- pymoo/util/nds/find_non_dominated.py +54 -0
- pymoo/util/nds/naive_non_dominated_sort.py +36 -0
- pymoo/util/nds/non_dominated_sorting.py +94 -0
- pymoo/util/nds/tree_based_non_dominated_sort.py +133 -0
- pymoo/util/normalization.py +312 -0
- pymoo/util/optimum.py +42 -0
- pymoo/util/randomized_argsort.py +63 -0
- pymoo/util/ref_dirs/__init__.py +24 -0
- pymoo/util/ref_dirs/construction.py +89 -0
- pymoo/util/ref_dirs/das_dennis.py +52 -0
- pymoo/util/ref_dirs/energy.py +317 -0
- pymoo/util/ref_dirs/energy_layer.py +119 -0
- pymoo/util/ref_dirs/genetic_algorithm.py +64 -0
- pymoo/util/ref_dirs/incremental.py +69 -0
- pymoo/util/ref_dirs/misc.py +128 -0
- pymoo/util/ref_dirs/optimizer.py +59 -0
- pymoo/util/ref_dirs/performance.py +162 -0
- pymoo/util/ref_dirs/reduction.py +85 -0
- pymoo/util/ref_dirs/sample_and_map.py +24 -0
- pymoo/util/reference_direction.py +258 -0
- pymoo/util/remote.py +55 -0
- pymoo/util/roulette.py +29 -0
- pymoo/util/running_metric.py +128 -0
- pymoo/util/sliding_window.py +25 -0
- pymoo/util/value_functions.py +720 -0
- pymoo/util/vectors.py +40 -0
- pymoo/util/vf_dominator.py +102 -0
- pymoo/vendor/__init__.py +0 -0
- pymoo/vendor/cec2018.py +398 -0
- pymoo/vendor/gta.py +617 -0
- pymoo/vendor/vendor_cmaes.py +421 -0
- pymoo/vendor/vendor_coco.py +81 -0
- pymoo/vendor/vendor_scipy.py +232 -0
- pymoo/version.py +1 -0
- pymoo/visualization/__init__.py +21 -0
- pymoo/visualization/app/__init__.py +0 -0
- pymoo/visualization/app/pso.py +61 -0
- pymoo/visualization/fitness_landscape.py +128 -0
- pymoo/visualization/heatmap.py +123 -0
- pymoo/visualization/matplotlib.py +61 -0
- pymoo/visualization/pcp.py +121 -0
- pymoo/visualization/petal.py +91 -0
- pymoo/visualization/radar.py +108 -0
- pymoo/visualization/radviz.py +68 -0
- pymoo/visualization/scatter.py +150 -0
- pymoo/visualization/star_coordinate.py +75 -0
- pymoo/visualization/util.py +296 -0
- pymoo/visualization/video/__init__.py +0 -0
- pymoo/visualization/video/callback_video.py +82 -0
- pymoo/visualization/video/one_var_one_obj.py +57 -0
- pymoo/visualization/video/two_var_one_obj.py +62 -0
- pymoo-0.6.1.6.dist-info/METADATA +209 -0
- pymoo-0.6.1.6.dist-info/RECORD +337 -0
- pymoo-0.6.1.6.dist-info/WHEEL +6 -0
- pymoo-0.6.1.6.dist-info/licenses/LICENSE +191 -0
- pymoo-0.6.1.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from pymoo.indicators.hv.exact import ExactHypervolume
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def hvc_2d_slow(ref_point, F):
|
|
7
|
+
n = len(F)
|
|
8
|
+
|
|
9
|
+
I = np.lexsort((-F[:, 1], F[:, 0]))
|
|
10
|
+
|
|
11
|
+
V = np.vstack([ref_point, F[I], ref_point])
|
|
12
|
+
|
|
13
|
+
hvi = np.zeros(n)
|
|
14
|
+
|
|
15
|
+
for k in range(1, n + 1):
|
|
16
|
+
height = V[k - 1, 1] - V[k, 1]
|
|
17
|
+
width = V[k + 1, 0] - V[k, 0]
|
|
18
|
+
|
|
19
|
+
hvi[I[k - 1]] = width * height
|
|
20
|
+
|
|
21
|
+
return np.array(hvi)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def hvc_2d_fast(ref_point, F_sorted, left=None, right=None):
|
|
25
|
+
if left is None:
|
|
26
|
+
left = [F_sorted[0, 0], ref_point[1]]
|
|
27
|
+
|
|
28
|
+
if right is None:
|
|
29
|
+
right = [ref_point[0], F_sorted[-1, 1]]
|
|
30
|
+
|
|
31
|
+
V = np.vstack([left, F_sorted, right])
|
|
32
|
+
height = (V[:-1, 1] - V[1:, 1])[:-1]
|
|
33
|
+
width = (V[1:, 0] - V[:-1, 0])[1:]
|
|
34
|
+
|
|
35
|
+
hvc = height * width
|
|
36
|
+
return hvc
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def hv_2d_fast(ref_point, F_sorted):
|
|
40
|
+
V = np.vstack([ref_point, F_sorted])
|
|
41
|
+
height = (V[:-1, 1] - V[1:, 1])
|
|
42
|
+
width = ref_point[0] - V[1:, 0]
|
|
43
|
+
return (height * width).sum()
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class ExactHypervolume2D(ExactHypervolume):
|
|
47
|
+
|
|
48
|
+
def __init__(self, ref_point, **kwargs) -> None:
|
|
49
|
+
assert len(ref_point) == 2, "This hypervolume calculation only works in 2 dimensions."
|
|
50
|
+
super().__init__(ref_point, func_hv=hv_2d_fast, func_hvc=hvc_2d_fast, **kwargs)
|
|
51
|
+
self.S = None
|
|
52
|
+
self.I = None
|
|
53
|
+
|
|
54
|
+
def _calc(self, ref_point, F):
|
|
55
|
+
if len(F) == 0:
|
|
56
|
+
self.I, self.S = [], []
|
|
57
|
+
return 0.0, np.zeros(0)
|
|
58
|
+
|
|
59
|
+
F = np.minimum(self.ref_point, self.F)
|
|
60
|
+
I = np.lexsort((-F[:, 1], F[:, 0]))
|
|
61
|
+
S = np.argsort(I)
|
|
62
|
+
|
|
63
|
+
hv, hvc = super()._calc(ref_point, F[I])
|
|
64
|
+
hvc = hvc[S]
|
|
65
|
+
self.I, self.S = I, S
|
|
66
|
+
|
|
67
|
+
return hv, hvc
|
|
68
|
+
|
|
69
|
+
# this very efficiently just recomputes hvc of the points necessary
|
|
70
|
+
# however has not shown to be much faster because of reindexing
|
|
71
|
+
# def delete(self, k):
|
|
72
|
+
# assert k < len(self.F)
|
|
73
|
+
#
|
|
74
|
+
# F, I, S, hv, hvc = self.F, self.I, self.S, self.hv, self.hvc
|
|
75
|
+
#
|
|
76
|
+
# hv -= hvc[k]
|
|
77
|
+
#
|
|
78
|
+
# i = S[k]
|
|
79
|
+
#
|
|
80
|
+
# S = np.delete(S, k, axis=0)
|
|
81
|
+
# S[S > i] -= 1
|
|
82
|
+
#
|
|
83
|
+
# I = np.delete(I, i, axis=0)
|
|
84
|
+
# I[I > k] -= 1
|
|
85
|
+
#
|
|
86
|
+
# F = np.delete(F, k, axis=0)
|
|
87
|
+
# hvc = np.delete(self.hvc, k, axis=0)
|
|
88
|
+
#
|
|
89
|
+
# v = [I[i] if 0 <= i < len(I) else None for i in np.arange(i-2, i+2)]
|
|
90
|
+
# left, middle, right = v[0], v[1:-1], v[-1]
|
|
91
|
+
#
|
|
92
|
+
# middle = [e for e in middle if e is not None]
|
|
93
|
+
#
|
|
94
|
+
# if len(middle) > 0:
|
|
95
|
+
#
|
|
96
|
+
# hvc[middle] = hvc_2d_fast(self.ref_point,
|
|
97
|
+
# F[middle],
|
|
98
|
+
# left=F[left] if left is not None else None,
|
|
99
|
+
# right=F[right] if right is not None else None,
|
|
100
|
+
# )
|
|
101
|
+
#
|
|
102
|
+
# self.F, self.I, self.S, self.hv, self.hvc = F, I, S, hv, hvc
|
pymoo/indicators/igd.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from pymoo.core.individual import calc_cv
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class KKTPM:
|
|
7
|
+
|
|
8
|
+
def calc(self, X, problem, ideal=None, utopian_eps=1e-4, rho=1e-3):
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
Returns the Karush-Kuhn-Tucker Approximate Measure.
|
|
12
|
+
|
|
13
|
+
Parameters
|
|
14
|
+
----------
|
|
15
|
+
X : np.array
|
|
16
|
+
|
|
17
|
+
problem : pymoo.core.problem
|
|
18
|
+
ideal : np.array
|
|
19
|
+
The ideal point if not in the problem defined or intentionally overwritten.
|
|
20
|
+
utopian_eps : float
|
|
21
|
+
The epsilon used for decrease the ideal point to get the utopian point.
|
|
22
|
+
rho : float
|
|
23
|
+
Since augmented achievement scalarization function is used the F for all other weights
|
|
24
|
+
- here rho - needs to be defined.
|
|
25
|
+
|
|
26
|
+
Returns
|
|
27
|
+
-------
|
|
28
|
+
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
# the final result to be returned
|
|
32
|
+
kktpm = np.full((X.shape[0], 1), np.inf)
|
|
33
|
+
fval = np.full((X.shape[0], 1), np.inf)
|
|
34
|
+
|
|
35
|
+
# set the ideal point for normalization
|
|
36
|
+
z = ideal
|
|
37
|
+
|
|
38
|
+
# if not provided take the one defined in the problem
|
|
39
|
+
if z is None:
|
|
40
|
+
z = problem.ideal_point()
|
|
41
|
+
z -= utopian_eps
|
|
42
|
+
|
|
43
|
+
# for convenience get the counts directly
|
|
44
|
+
n_solutions, n_var, n_obj, n_ieq_constr = X.shape[0], problem.n_var, problem.n_obj, problem.n_ieq_constr
|
|
45
|
+
|
|
46
|
+
F, G, dF, dG = problem.evaluate(X, return_values_of=["F", "G", "dF", "dG"])
|
|
47
|
+
CV = calc_cv(G=G)
|
|
48
|
+
|
|
49
|
+
# loop through each solution to be considered
|
|
50
|
+
for i in range(n_solutions):
|
|
51
|
+
|
|
52
|
+
# get the corresponding values for this solution
|
|
53
|
+
x, f, cv, df = X[i, :], F[i, :], CV[i], dF[i, :].swapaxes(1, 0)
|
|
54
|
+
if n_ieq_constr > 0:
|
|
55
|
+
g, dg = G[i, :], dG[i].T
|
|
56
|
+
|
|
57
|
+
# if the solution that is provided is infeasible
|
|
58
|
+
if cv > 0:
|
|
59
|
+
_kktpm = 1 + cv
|
|
60
|
+
_fval = None
|
|
61
|
+
|
|
62
|
+
else:
|
|
63
|
+
|
|
64
|
+
w = np.sqrt(np.sum(np.power(f - z, 2))) / (f - z)
|
|
65
|
+
a_m = (df * w + (rho * np.sum(df * w, axis=1))[:, None]).T
|
|
66
|
+
|
|
67
|
+
A = np.ones((problem.n_obj, problem.n_obj)) + a_m @ a_m.T
|
|
68
|
+
b = np.ones(problem.n_obj)
|
|
69
|
+
|
|
70
|
+
if n_ieq_constr > 0:
|
|
71
|
+
# a_j is just the transpose of the differential of constraints
|
|
72
|
+
a_j = dg.T
|
|
73
|
+
|
|
74
|
+
# part of the matrix for additional constraints
|
|
75
|
+
gsq = np.zeros((n_ieq_constr, n_ieq_constr))
|
|
76
|
+
np.fill_diagonal(gsq, g * g)
|
|
77
|
+
|
|
78
|
+
# now add the constraints to the optimization problem
|
|
79
|
+
A = np.vstack([np.hstack([A, a_m @ a_j.T]), np.hstack([a_j @ a_m.T, a_j @ a_j.T + gsq])])
|
|
80
|
+
b = np.hstack([b, np.zeros(n_ieq_constr)])
|
|
81
|
+
|
|
82
|
+
method = "qr"
|
|
83
|
+
u = solve(A, b, method=method)
|
|
84
|
+
|
|
85
|
+
# until all the lagrange multiplier are positive
|
|
86
|
+
while np.any(u < 0):
|
|
87
|
+
|
|
88
|
+
# go through one by one
|
|
89
|
+
for j in range(len(u)):
|
|
90
|
+
|
|
91
|
+
# if a lagrange multiplier is negative - we need to fix it
|
|
92
|
+
if u[j] < 0:
|
|
93
|
+
# modify the optimization problem
|
|
94
|
+
A[j, :], A[:, j], A[j, j] = 0, 0, 1
|
|
95
|
+
b[j] = 0
|
|
96
|
+
|
|
97
|
+
# resolve the problem and redefine u. for sure all preview u[j] are positive now
|
|
98
|
+
u = solve(A, b, method=method)
|
|
99
|
+
|
|
100
|
+
# split up the lagrange multiplier for objective and not
|
|
101
|
+
u_m, u_j = u[:n_obj], u[n_obj:]
|
|
102
|
+
|
|
103
|
+
if n_ieq_constr > 0:
|
|
104
|
+
_kktpm = (1 - np.sum(u_m)) ** 2 + np.sum((np.vstack([a_m, a_j]).T @ u) ** 2)
|
|
105
|
+
_fval = _kktpm + np.sum((u_j * g.T) ** 2)
|
|
106
|
+
else:
|
|
107
|
+
_kktpm = (1 - np.sum(u_m)) ** 2 + np.sum((a_m.T @ u) ** 2)
|
|
108
|
+
_fval = _kktpm
|
|
109
|
+
|
|
110
|
+
ujgj = -g @ u_j
|
|
111
|
+
if np.sum(u_m) + ujgj * (1 + ujgj) > 1:
|
|
112
|
+
adjusted_kktpm = - (u_j @ g.T)
|
|
113
|
+
projected_kktpm = (_kktpm * g @ g.T - g @ u_j) / (1 + g @ g.T)
|
|
114
|
+
_kktpm = (_kktpm + adjusted_kktpm + projected_kktpm) / 3
|
|
115
|
+
|
|
116
|
+
# assign to the values to be returned
|
|
117
|
+
kktpm[i] = _kktpm
|
|
118
|
+
fval[i] = _fval
|
|
119
|
+
|
|
120
|
+
return kktpm[:, 0]
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def solve(A, b, method="elim"):
|
|
124
|
+
if method == "elim":
|
|
125
|
+
return np.linalg.solve(A, b)
|
|
126
|
+
|
|
127
|
+
elif method == "qr":
|
|
128
|
+
Q, R = np.linalg.qr(A)
|
|
129
|
+
y = np.dot(Q.T, b)
|
|
130
|
+
return np.linalg.solve(R, y)
|
|
131
|
+
|
|
132
|
+
elif method == "svd":
|
|
133
|
+
U, s, V = np.linalg.svd(A) # SVD decomposition of A
|
|
134
|
+
A_inv = np.dot(np.dot(V.T, np.linalg.inv(np.diag(s))), U.T)
|
|
135
|
+
return A_inv @ b
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
if __name__ == '__main__':
|
|
139
|
+
from pymoo.problems import get_problem
|
|
140
|
+
from pymoo.gradient.automatic import AutomaticDifferentiation
|
|
141
|
+
|
|
142
|
+
from pymoo.constraints.from_bounds import ConstraintsFromBounds
|
|
143
|
+
problem = ConstraintsFromBounds(AutomaticDifferentiation(get_problem("zdt2", n_var=30)))
|
|
144
|
+
|
|
145
|
+
# X = (0.5 * np.ones(10))[None, :]
|
|
146
|
+
X = np.array(
|
|
147
|
+
[0.394876, 0.963263, 0.173956, 0.126330, 0.135079, 0.505662, 0.021525, 0.947970, 0.827115, 0.015019, 0.176196,
|
|
148
|
+
0.332064, 0.130997, 0.809491, 0.344737, 0.940107, 0.582014, 0.878832, 0.844734, 0.905392, 0.459880, 0.546347,
|
|
149
|
+
0.798604, 0.285719, 0.490254, 0.599110, 0.015533, 0.593481, 0.433676, 0.807361])
|
|
150
|
+
|
|
151
|
+
print(KKTPM().calc(X[None, :], problem))
|
pymoo/indicators/migd.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from pymoo.core.callback import Callback
|
|
4
|
+
from pymoo.indicators.igd import IGD
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class MIGD(Callback):
|
|
8
|
+
|
|
9
|
+
def __init__(self, reevaluate=True) -> None:
|
|
10
|
+
"""
|
|
11
|
+
Mean Inverted Generational Distance (MIGD)
|
|
12
|
+
|
|
13
|
+
For dynamic optimization problems the performance metric needs to involve the IGD value over time as the
|
|
14
|
+
problem is changing. Thus, the performance needs to be evaluated in each iteration for which
|
|
15
|
+
defining a callback is ideal.
|
|
16
|
+
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
super().__init__()
|
|
20
|
+
|
|
21
|
+
# whether the MIGD should be based on reevaluated solutions
|
|
22
|
+
self.reevaluate = reevaluate
|
|
23
|
+
|
|
24
|
+
# the list where each of the recordings are stored: timesteps and igd
|
|
25
|
+
self.records = []
|
|
26
|
+
|
|
27
|
+
def update(self, algorithm, **kwargs):
|
|
28
|
+
|
|
29
|
+
# the problem to be solved
|
|
30
|
+
problem = algorithm.problem
|
|
31
|
+
assert problem.n_constr == 0, "The current implementation only works for unconstrained problems!"
|
|
32
|
+
|
|
33
|
+
# the current time
|
|
34
|
+
t = problem.time
|
|
35
|
+
|
|
36
|
+
# the current pareto-front of the problem (at the specific time step)
|
|
37
|
+
pf = problem.pareto_front()
|
|
38
|
+
|
|
39
|
+
# the current population of the algorithm
|
|
40
|
+
pop = algorithm.pop
|
|
41
|
+
|
|
42
|
+
# if the callback should reevaluate to match the current time step and avoid deprecated values
|
|
43
|
+
if self.reevaluate:
|
|
44
|
+
X = pop.get("X")
|
|
45
|
+
F = problem.evaluate(X, return_values_of=["F"])
|
|
46
|
+
else:
|
|
47
|
+
F = pop.get("F")
|
|
48
|
+
|
|
49
|
+
# calculate the current igd values
|
|
50
|
+
igd = IGD(pf).do(F)
|
|
51
|
+
|
|
52
|
+
self.records.append((t, igd))
|
|
53
|
+
|
|
54
|
+
def value(self):
|
|
55
|
+
return np.array([igd for _, igd in self.records]).mean()
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy.spatial.distance import cdist
|
|
3
|
+
|
|
4
|
+
from pymoo.core.indicator import Indicator
|
|
5
|
+
from pymoo.indicators.hv import Hypervolume
|
|
6
|
+
from pymoo.indicators.igd import IGD
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class RMetric(Indicator):
|
|
10
|
+
|
|
11
|
+
def __init__(self, problem, ref_points, w=None, delta=0.2, pf=None):
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
Parameters
|
|
15
|
+
----------
|
|
16
|
+
|
|
17
|
+
problem : class
|
|
18
|
+
problem instance
|
|
19
|
+
|
|
20
|
+
ref_points : numpy.array
|
|
21
|
+
list of reference points
|
|
22
|
+
|
|
23
|
+
w : numpy.array
|
|
24
|
+
weights for each objective
|
|
25
|
+
|
|
26
|
+
delta : float
|
|
27
|
+
The delta value representing the region of interest
|
|
28
|
+
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
Indicator.__init__(self)
|
|
32
|
+
self.ref_points = ref_points
|
|
33
|
+
self.problem = problem
|
|
34
|
+
w_ = np.ones(self.ref_points.shape[1]) if not w else w
|
|
35
|
+
self.w_points = self.ref_points + 2 * w_
|
|
36
|
+
self.delta = delta
|
|
37
|
+
|
|
38
|
+
self.pf = pf
|
|
39
|
+
self.F = None
|
|
40
|
+
self.others = None
|
|
41
|
+
|
|
42
|
+
def _filter(self):
|
|
43
|
+
|
|
44
|
+
def check_dominance(a, b, n_obj):
|
|
45
|
+
flag1 = False
|
|
46
|
+
flag2 = False
|
|
47
|
+
for i in range(n_obj):
|
|
48
|
+
if a[i] < b[i]:
|
|
49
|
+
flag1 = True
|
|
50
|
+
else:
|
|
51
|
+
if a[i] > b[i]:
|
|
52
|
+
flag2 = True
|
|
53
|
+
if flag1 and not flag2:
|
|
54
|
+
return 1
|
|
55
|
+
elif not flag1 and flag2:
|
|
56
|
+
return -1
|
|
57
|
+
else:
|
|
58
|
+
return 0
|
|
59
|
+
|
|
60
|
+
num_objs = np.size(self.F, axis=1)
|
|
61
|
+
index_array = np.zeros(np.size(self.F, axis=0))
|
|
62
|
+
|
|
63
|
+
# filter out all solutions that are dominated by solutions found by other algorithms
|
|
64
|
+
if self.others is not None:
|
|
65
|
+
for i in range(np.size(self.F, 0)):
|
|
66
|
+
for j in range(np.size(self.others, 0)):
|
|
67
|
+
flag = check_dominance(self.F[i, :], self.others[j, :], num_objs)
|
|
68
|
+
if flag == -1:
|
|
69
|
+
index_array[i] = 1
|
|
70
|
+
break
|
|
71
|
+
|
|
72
|
+
final_index = np.logical_not(index_array)
|
|
73
|
+
filtered_pop = self.F[final_index, :]
|
|
74
|
+
|
|
75
|
+
return filtered_pop
|
|
76
|
+
|
|
77
|
+
def _preprocess(self, data, ref_point, w_point):
|
|
78
|
+
|
|
79
|
+
datasize = np.size(data, 0)
|
|
80
|
+
|
|
81
|
+
# Identify representative point
|
|
82
|
+
ref_matrix = np.tile(ref_point, (datasize, 1))
|
|
83
|
+
w_matrix = np.tile(w_point, (datasize, 1))
|
|
84
|
+
# ratio of distance to the ref point over the distance between the w_point and the ref_point
|
|
85
|
+
diff_matrix = (data - ref_matrix) / (w_matrix - ref_matrix)
|
|
86
|
+
agg_value = np.amax(diff_matrix, axis=1)
|
|
87
|
+
idx = np.argmin(agg_value)
|
|
88
|
+
zp = [data[idx, :]]
|
|
89
|
+
|
|
90
|
+
return zp,
|
|
91
|
+
|
|
92
|
+
def _translate(self, zp, trimmed_data, ref_point, w_point):
|
|
93
|
+
# Solution translation - Matlab reproduction
|
|
94
|
+
# find k
|
|
95
|
+
temp = ((zp[0] - ref_point) / (w_point - ref_point))
|
|
96
|
+
kIdx = np.argmax(temp)
|
|
97
|
+
|
|
98
|
+
# find zl
|
|
99
|
+
temp = (zp[0][kIdx] - ref_point[kIdx]) / (w_point[kIdx] - ref_point[kIdx])
|
|
100
|
+
zl = ref_point + temp * (w_point - ref_point)
|
|
101
|
+
|
|
102
|
+
temp = zl - zp
|
|
103
|
+
shift_direction = np.tile(temp, (trimmed_data.shape[0], 1))
|
|
104
|
+
# new_size = self.curr_pop.shape[0]
|
|
105
|
+
return trimmed_data + shift_direction
|
|
106
|
+
|
|
107
|
+
def _trim(self, pop, centeroid, range=0.2):
|
|
108
|
+
popsize, objDim = pop.shape
|
|
109
|
+
diff_matrix = pop - np.tile(centeroid, (popsize, 1))[0]
|
|
110
|
+
flags = np.sum(abs(diff_matrix) < range / 2, axis=1)
|
|
111
|
+
filtered_matrix = pop[np.where(flags == objDim)]
|
|
112
|
+
return filtered_matrix
|
|
113
|
+
|
|
114
|
+
def _trim_fast(self, pop, centeroid, range=0.2):
|
|
115
|
+
centeroid_matrix = cdist(pop, centeroid, metric='euclidean')
|
|
116
|
+
filtered_matrix = pop[np.where(centeroid_matrix < range / 2), :][0]
|
|
117
|
+
return filtered_matrix
|
|
118
|
+
|
|
119
|
+
def do(self, F, others=None, calc_hv=True):
|
|
120
|
+
"""
|
|
121
|
+
|
|
122
|
+
This method calculates the R-IGD and R-HV based off of the values provided.
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
Parameters
|
|
126
|
+
----------
|
|
127
|
+
|
|
128
|
+
F : numpy.ndarray
|
|
129
|
+
The objective space values
|
|
130
|
+
|
|
131
|
+
others : numpy.ndarray
|
|
132
|
+
Results from other algorithms which should be used for filtering nds solutions
|
|
133
|
+
|
|
134
|
+
calc_hv : bool
|
|
135
|
+
Whether the hv is calculate - (None if more than 3 dimensions)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
Returns
|
|
139
|
+
-------
|
|
140
|
+
rigd : float
|
|
141
|
+
R-IGD
|
|
142
|
+
|
|
143
|
+
rhv : float
|
|
144
|
+
R-HV if calc_hv is true and less or equal to 3 dimensions
|
|
145
|
+
|
|
146
|
+
"""
|
|
147
|
+
self.F, self.others = F, others
|
|
148
|
+
|
|
149
|
+
translated = []
|
|
150
|
+
final_PF = []
|
|
151
|
+
|
|
152
|
+
# 1. Prescreen Procedure - NDS Filtering
|
|
153
|
+
pop = self._filter()
|
|
154
|
+
|
|
155
|
+
pf = self.pf
|
|
156
|
+
if pf is None:
|
|
157
|
+
pf = self.problem.pareto_front()
|
|
158
|
+
|
|
159
|
+
if pf is None:
|
|
160
|
+
raise Exception("Please provide the Pareto front to calculate the R-Metric!")
|
|
161
|
+
|
|
162
|
+
labels = np.argmin(cdist(pop, self.ref_points), axis=1)
|
|
163
|
+
|
|
164
|
+
for i in range(len(self.ref_points)):
|
|
165
|
+
cluster = pop[np.where(labels == i)]
|
|
166
|
+
if len(cluster) != 0:
|
|
167
|
+
# 2. Representative Point Identification
|
|
168
|
+
zp = self._preprocess(cluster, self.ref_points[i], w_point=self.w_points[i])[0]
|
|
169
|
+
# 3. Filtering Procedure - Filter points
|
|
170
|
+
trimmed_data = self._trim(cluster, zp, range=self.delta)
|
|
171
|
+
# 4. Solution Translation
|
|
172
|
+
pop_t = self._translate(zp, trimmed_data, self.ref_points[i], w_point=self.w_points[i])
|
|
173
|
+
translated.extend(pop_t)
|
|
174
|
+
|
|
175
|
+
# 5. R-Metric Computation
|
|
176
|
+
target = self._preprocess(data=pf, ref_point=self.ref_points[i], w_point=self.w_points[i])
|
|
177
|
+
PF = self._trim(pf, target)
|
|
178
|
+
final_PF.extend(PF)
|
|
179
|
+
|
|
180
|
+
translated = np.array(translated)
|
|
181
|
+
final_PF = np.array(final_PF)
|
|
182
|
+
|
|
183
|
+
rigd, rhv = None, None
|
|
184
|
+
|
|
185
|
+
if len(translated) > 0:
|
|
186
|
+
|
|
187
|
+
# IGD Computation
|
|
188
|
+
rigd = IGD(final_PF).do(translated)
|
|
189
|
+
|
|
190
|
+
nadir_point = np.amax(self.w_points, axis=0)
|
|
191
|
+
front = translated
|
|
192
|
+
dim = self.ref_points[0].shape[0]
|
|
193
|
+
if calc_hv:
|
|
194
|
+
if dim <= 3:
|
|
195
|
+
try:
|
|
196
|
+
rhv = Hypervolume(ref_point=nadir_point).do(front)
|
|
197
|
+
except:
|
|
198
|
+
pass
|
|
199
|
+
|
|
200
|
+
if calc_hv:
|
|
201
|
+
return rigd, rhv
|
|
202
|
+
else:
|
|
203
|
+
return rigd
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from pymoo.core.indicator import Indicator
|
|
3
|
+
from pymoo.indicators.distance_indicator import at_least_2d_array, derive_ideal_and_nadir_from_pf
|
|
4
|
+
from scipy.spatial.distance import pdist, squareform
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class SpacingIndicator(Indicator):
|
|
8
|
+
|
|
9
|
+
def __init__(self,
|
|
10
|
+
pf=None,
|
|
11
|
+
zero_to_one=False,
|
|
12
|
+
ideal=None,
|
|
13
|
+
nadir=None):
|
|
14
|
+
"""Spacing indicator
|
|
15
|
+
|
|
16
|
+
The smaller the value this indicator assumes, the most uniform is the distribution of elements on the pareto front.
|
|
17
|
+
|
|
18
|
+
Parameters
|
|
19
|
+
----------
|
|
20
|
+
pf : 2d array, optional
|
|
21
|
+
Pareto front, by default None
|
|
22
|
+
zero_to_one : bool, optional
|
|
23
|
+
Whether or not the objective values should be normalized in calculations, by default False
|
|
24
|
+
ideal : 1d array, optional
|
|
25
|
+
Ideal point, by default None
|
|
26
|
+
nadir : 1d array, optional
|
|
27
|
+
Nadir point, by default None
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
# the pareto front if necessary to calculate the indicator
|
|
31
|
+
pf = at_least_2d_array(pf, extend_as="row")
|
|
32
|
+
ideal, nadir = derive_ideal_and_nadir_from_pf(pf, ideal=ideal, nadir=nadir)
|
|
33
|
+
|
|
34
|
+
super().__init__(pf=pf,
|
|
35
|
+
zero_to_one=zero_to_one,
|
|
36
|
+
ideal=ideal,
|
|
37
|
+
nadir=nadir)
|
|
38
|
+
|
|
39
|
+
def _do(self, F, *args, **kwargs):
|
|
40
|
+
|
|
41
|
+
# Get F dimensions
|
|
42
|
+
n_points, n_obj = F.shape
|
|
43
|
+
|
|
44
|
+
# knn
|
|
45
|
+
D = squareform(pdist(F, metric="cityblock"))
|
|
46
|
+
d = np.partition(D, 1, axis=1)[:, 1]
|
|
47
|
+
dm = np.mean(d)
|
|
48
|
+
|
|
49
|
+
# Get spacing
|
|
50
|
+
S = np.sqrt(np.sum(np.square(d - dm)) / n_points)
|
|
51
|
+
|
|
52
|
+
return S
|
pymoo/mcdm/__init__.py
ADDED
|
File without changes
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from pymoo.core.decision_making import DecisionMaking
|
|
2
|
+
from pymoo.util.normalization import normalize
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class CompromiseProgramming(DecisionMaking):
|
|
6
|
+
|
|
7
|
+
def __init__(self, metric="euclidean", **kwargs) -> None:
|
|
8
|
+
super().__init__(**kwargs)
|
|
9
|
+
self.metric = metric
|
|
10
|
+
|
|
11
|
+
def _do(self, F, **kwargs):
|
|
12
|
+
|
|
13
|
+
F, _, ideal_point, nadir_point = normalize(F,
|
|
14
|
+
xl=self.ideal_point,
|
|
15
|
+
xu=self.nadir_point,
|
|
16
|
+
estimate_bounds_if_none=True,
|
|
17
|
+
return_bounds=True)
|
|
18
|
+
|
|
19
|
+
return None
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import warnings
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from pymoo.core.decision_making import DecisionMaking, find_outliers_upper_tail, NeighborFinder
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class HighTradeoffPoints(DecisionMaking):
|
|
9
|
+
|
|
10
|
+
def __init__(self, epsilon=0.125, **kwargs) -> None:
|
|
11
|
+
super().__init__(**kwargs)
|
|
12
|
+
self.epsilon = epsilon
|
|
13
|
+
|
|
14
|
+
def _do(self, F, **kwargs):
|
|
15
|
+
n, m = F.shape
|
|
16
|
+
|
|
17
|
+
neighbors_finder = NeighborFinder(F, epsilon=0.125, n_min_neigbors="auto", consider_2d=False)
|
|
18
|
+
|
|
19
|
+
mu = np.full(n, - np.inf)
|
|
20
|
+
|
|
21
|
+
# for each solution in the set calculate the least amount of improvement per unit deterioration
|
|
22
|
+
for i in range(n):
|
|
23
|
+
|
|
24
|
+
# for each neighbour in a specific radius of that solution
|
|
25
|
+
neighbors = neighbors_finder.find(i)
|
|
26
|
+
|
|
27
|
+
# calculate the trade-off to all neighbours
|
|
28
|
+
diff = F[neighbors] - F[i]
|
|
29
|
+
|
|
30
|
+
# calculate sacrifice and gain
|
|
31
|
+
sacrifice = np.maximum(0, diff).sum(axis=1)
|
|
32
|
+
gain = np.maximum(0, -diff).sum(axis=1)
|
|
33
|
+
|
|
34
|
+
warnings.filterwarnings('ignore')
|
|
35
|
+
tradeoff = sacrifice / gain
|
|
36
|
+
|
|
37
|
+
# otherwise find the one with the smalled one
|
|
38
|
+
mu[i] = np.nanmin(tradeoff)
|
|
39
|
+
|
|
40
|
+
return find_outliers_upper_tail(mu)
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from pymoo.core.decision_making import DecisionMaking
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class PseudoWeights(DecisionMaking):
|
|
7
|
+
|
|
8
|
+
def __init__(self, weights, **kwargs) -> None:
|
|
9
|
+
super().__init__(**kwargs)
|
|
10
|
+
self.weights = weights
|
|
11
|
+
|
|
12
|
+
def _do(self, F, return_pseudo_weights=False, **kwargs):
|
|
13
|
+
ideal, nadir = self.ideal, self.nadir
|
|
14
|
+
|
|
15
|
+
if ideal is None:
|
|
16
|
+
ideal = F.min(axis=0)
|
|
17
|
+
if nadir is None:
|
|
18
|
+
nadir = F.max(axis=0)
|
|
19
|
+
|
|
20
|
+
# normalized distance to the worst solution
|
|
21
|
+
pseudo_weights = ((nadir - F) / (nadir - ideal))
|
|
22
|
+
|
|
23
|
+
# normalize weights to sum up to one
|
|
24
|
+
pseudo_weights = pseudo_weights / np.sum(pseudo_weights, axis=1)[:, None]
|
|
25
|
+
|
|
26
|
+
# search for the closest individual having this pseudo weights
|
|
27
|
+
I = np.argmin(np.sum(np.abs(pseudo_weights - self.weights), axis=1))
|
|
28
|
+
|
|
29
|
+
if return_pseudo_weights:
|
|
30
|
+
return I, pseudo_weights
|
|
31
|
+
else:
|
|
32
|
+
return I
|
|
File without changes
|